diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..dba8746781b7db27a4e348896fbd7f27bc819087 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +/es-git-go +/.GOPATH +/bin +/tmp +/vendor/github.com/stretchr/testify +/vendor/github.com/wadey/gocovmerge +/vendor/golang.org/x/tools +/vendor/manifest diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..7aa5dcccea93c746add7723cd809e1d9c7307f97 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,22 @@ +.test: &test + services: + - elasticsearch:5.1 + variables: + V: "1" + ELASTIC_CONNECTION_INFO: '{"url":["http://elasticsearch:9200"]}' + stage: test + script: + - apt-get update && apt-get -yy install libicu-dev + - make setup + - make format + - make cover + - make + - make test + +test 1.7: + <<: *test + image: golang:1.7 + +test 1.8: + <<: *test + image: golang:1.8 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d95544105a8f2969e2b935aecaee9645b3ab4b88 --- /dev/null +++ b/Makefile @@ -0,0 +1,144 @@ +# The import path is where your repository can be found. +# To import subpackages, always prepend the full import path. +# If you change this, run `make clean`. Read more: https://git.io/vM7zV +IMPORT_PATH := gitlab.com/gitlab-org/es-git-go + +GO15VENDOREXPERIMENT := 1 + +# V := 1 # When V is set, print commands and build progress. + +# Space separated patterns of packages to skip in list, test, format. +IGNORED_PACKAGES := /vendor/ + +.PHONY: all +all: build + +.PHONY: build +build: .GOPATH/.ok + $Q go install $(if $V,-v) $(VERSION_FLAGS) $(IMPORT_PATH) + +### Code not in the repository root? Another binary? Add to the path like this. +# .PHONY: otherbin +# otherbin: .GOPATH/.ok +# $Q go install $(if $V,-v) $(VERSION_FLAGS) $(IMPORT_PATH)/cmd/otherbin + +##### ^^^^^^ EDIT ABOVE ^^^^^^ ##### + +##### =====> Utility targets <===== ##### + +.PHONY: clean test list cover format + +clean: + $Q rm -rf bin .GOPATH tmp + +test: .GOPATH/.ok + $Q go test $(if $V,-v) -i -race $(allpackages) # install -race libs to speed up next run +ifndef CI + $Q go vet $(allpackages) + $Q GODEBUG=cgocheck=2 go test $(if $V,-v) -race $(allpackages) +else + $Q ( go vet $(allpackages); echo $$? ) | \ + tee .GOPATH/test/vet.txt | sed '$$ d'; exit $$(tail -1 .GOPATH/test/vet.txt) + $Q ( GODEBUG=cgocheck=2 go test -v -race $(allpackages); echo $$? ) | \ + tee .GOPATH/test/output.txt | sed '$$ d'; exit $$(tail -1 .GOPATH/test/output.txt) +endif + +list: .GOPATH/.ok + @echo $(allpackages) + +cover: bin/gocovmerge .GOPATH/.ok + @echo "NOTE: make cover does not exit 1 on failure, don't use it to check for tests success!" + $Q rm -f .GOPATH/cover/*.out .GOPATH/cover/all.merged + $(if $V,@echo "-- go test -coverpkg=./... -coverprofile=.GOPATH/cover/... ./...") + @for MOD in $(allpackages); do \ + go test -coverpkg=`echo $(allpackages)|tr " " ","` \ + -coverprofile=.GOPATH/cover/unit-`echo $$MOD|tr "/" "_"`.out \ + $$MOD 2>&1 | grep -v "no packages being tested depend on"; \ + done + $Q ./bin/gocovmerge .GOPATH/cover/*.out > .GOPATH/cover/all.merged +ifndef CI + $Q go tool cover -html .GOPATH/cover/all.merged +else + $Q go tool cover -html .GOPATH/cover/all.merged -o .GOPATH/cover/all.html +endif + @echo "" + @echo "=====> Total test coverage: <=====" + @echo "" + $Q go tool cover -func .GOPATH/cover/all.merged + +format: bin/goimports .GOPATH/.ok + $Q find .GOPATH/src/$(IMPORT_PATH)/ -iname \*.go | grep -v \ + -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) | xargs ./bin/goimports -w + +##### =====> Internals <===== ##### + +.PHONY: setup +setup: clean .GOPATH/.ok + @if ! grep "/.GOPATH" .gitignore > /dev/null 2>&1; then \ + echo "/.GOPATH" >> .gitignore; \ + echo "/bin" >> .gitignore; \ + fi + go get -u github.com/FiloSottile/gvt + - ./bin/gvt fetch golang.org/x/tools/cmd/goimports + - ./bin/gvt fetch github.com/wadey/gocovmerge + - ./bin/gvt fetch github.com/stretchr/testify/assert + - mkdir tmp + - git clone --bare https://gitlab.com/gitlab-org/gitlab-test.git tmp/gitlab-test.git + +VERSION := $(shell git describe --tags --always --dirty="-dev") +DATE := $(shell date -u '+%Y-%m-%d-%H%M UTC') +VERSION_FLAGS := -ldflags='-X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"' + +# cd into the GOPATH to workaround ./... not following symlinks +_allpackages = $(shell ( cd $(CURDIR)/.GOPATH/src/$(IMPORT_PATH) && \ + GOPATH=$(CURDIR)/.GOPATH go list ./... 2>&1 1>&3 | \ + grep -v -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES)) 1>&2 ) 3>&1 | \ + grep -v -e "^$$" $(addprefix -e ,$(IGNORED_PACKAGES))) + +# memoize allpackages, so that it's executed only once and only if used +allpackages = $(if $(__allpackages),,$(eval __allpackages := $$(_allpackages)))$(__allpackages) + +export GOPATH := $(CURDIR)/.GOPATH +unexport GOBIN + +Q := $(if $V,,@) + +.GOPATH/.ok: + $Q mkdir -p "$(dir .GOPATH/src/$(IMPORT_PATH))" + $Q ln -s ../../../.. ".GOPATH/src/$(IMPORT_PATH)" + $Q mkdir -p .GOPATH/test .GOPATH/cover + $Q mkdir -p bin + $Q ln -s ../bin .GOPATH/bin + $Q touch $@ + +.PHONY: bin/gocovmerge bin/goimports +bin/gocovmerge: .GOPATH/.ok + @test -d ./vendor/github.com/wadey/gocovmerge || \ + { echo "Vendored gocovmerge not found, try running 'make setup'..."; exit 1; } + $Q go install $(IMPORT_PATH)/vendor/github.com/wadey/gocovmerge +bin/goimports: .GOPATH/.ok + @test -d ./vendor/golang.org/x/tools/cmd/goimports || \ + { echo "Vendored goimports not found, try running 'make setup'..."; exit 1; } + $Q go install $(IMPORT_PATH)/vendor/golang.org/x/tools/cmd/goimports + +# Based on https://github.com/cloudflare/hellogopher - v1.1 - MIT License +# +# Copyright (c) 2017 Cloudflare +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6989b41ac50b47976131d34d1980677ef8cdba62 --- /dev/null +++ b/README.md @@ -0,0 +1,23 @@ +# GitLab Elasticsearch Indexer + +This project indexes Git repositories into Elasticsearch for GitLab. See the +[homepage](https://gitlab.com/gitlab-org/es-git-go) for more information. + +## Building + +This project relies on [ICU](http://site.icu-project.org/) for text encoding; +ensure the development packages for your platform are installed before running +`make`: + +### Debian / Ubuntu + +``` +# apt install libicu-dev +``` + +### Mac OSX + +``` +$ brew install icu4c +$ export PKG_CONFIG_PATH="/usr/local/opt/icu4c/lib/pkgconfig:$PKG_CONFIG_PATH" +``` diff --git a/elastic/client.go b/elastic/client.go new file mode 100644 index 0000000000000000000000000000000000000000..979cbc49b6f6924e0cfeda5a8d171a1dce3984d9 --- /dev/null +++ b/elastic/client.go @@ -0,0 +1,153 @@ +package elastic + +import ( + "context" + "fmt" + "net/http" + "os" + "strings" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/deoxxa/aws_signing_client" + "gopkg.in/olivere/elastic.v5" +) + +var ( + timeoutError = fmt.Errorf("Timeout") +) + +const ( + // TODO: make this configurable / detectable. + // Limiting to 10MiB lets us work on small AWS clusters, but unnecessarily + // increases round trips in larger or non-AWS clusters + MaxBulkSize = 10 * 1024 * 1024 + BulkWorkers = 10 +) + +type Client struct { + IndexName string + ProjectID string + Client *elastic.Client + bulk *elastic.BulkProcessor +} + +// FromEnv creates an Elasticsearch client from the `ELASTIC_CONNECTION_INFO` +// environment variable +func FromEnv(projectID string) (*Client, error) { + data := strings.NewReader(os.Getenv("ELASTIC_CONNECTION_INFO")) + + config, err := ReadConfig(data) + if err != nil { + return nil, fmt.Errorf("Couldn't parse ELASTIC_CONNECTION_INFO: %s", err) + } + + railsEnv := os.Getenv("RAILS_ENV") + indexName := "gitlab" + if railsEnv != "" { + indexName = indexName + "-" + railsEnv + } + + config.IndexName = indexName + config.ProjectID = projectID + + return NewClient(config) +} + +func NewClient(config *Config) (*Client, error) { + var opts []elastic.ClientOptionFunc + + // AWS settings have to come first or they override custom URL, etc + if config.AWS { + credentials := credentials.NewStaticCredentials(config.AccessKey, config.SecretKey, "") + signer := v4.NewSigner(credentials) + awsClient, err := aws_signing_client.New(signer, &http.Client{}, "es", config.Region) + if err != nil { + return nil, err + } + + opts = append(opts, elastic.SetHttpClient(awsClient)) + } + + // Sniffer should look for HTTPS URLs if at-least-one initial URL is HTTPS + for _, url := range config.URL { + if strings.HasPrefix(url, "https:") { + opts = append(opts, elastic.SetScheme("https")) + break + } + } + + opts = append(opts, elastic.SetURL(config.URL...), elastic.SetSniff(false)) + + client, err := elastic.NewClient(opts...) + if err != nil { + return nil, err + } + + bulk, err := client.BulkProcessor(). + Workers(BulkWorkers). + BulkSize(MaxBulkSize). + Do(context.Background()) + + if err != nil { + return nil, err + } + + return &Client{ + IndexName: config.IndexName, + ProjectID: config.ProjectID, + Client: client, + bulk: bulk, + }, nil +} + +func (c *Client) ParentID() string { + return c.ProjectID +} + +func (c *Client) Flush() error { + return c.bulk.Flush() +} + +func (c *Client) Close() { + c.Client.Stop() +} + +func (c *Client) Index(id string, thing interface{}) { + req := elastic.NewBulkIndexRequest(). + Index(c.IndexName). + Type("repository"). + Parent(c.ProjectID). + Id(id). + Doc(thing) + + c.bulk.Add(req) +} + +// We only really use this for tests +func (c *Client) Get(id string) (*elastic.GetResult, error) { + return c.Client.Get(). + Index(c.IndexName). + Type("repository"). + Id(id). + Routing(c.ProjectID). + Do(context.TODO()) +} + +func (c *Client) GetCommit(id string) (*elastic.GetResult, error) { + return c.Get(c.ProjectID + "_" + id) +} + +func (c *Client) GetBlob(path string) (*elastic.GetResult, error) { + return c.Get(c.ProjectID + "_" + path) +} + +func (c *Client) Remove(id string) { + req := elastic.NewBulkDeleteRequest(). + Index(c.IndexName). + Type("repository"). + Parent(c.ProjectID). + Id(id) + + c.bulk.Add(req) +} diff --git a/elastic/client_test.go b/elastic/client_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0b412d131930c25bca8b6929ea621adc84d72969 --- /dev/null +++ b/elastic/client_test.go @@ -0,0 +1,102 @@ +package elastic_test + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/http/httptest" + "os" + "regexp" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/elastic" +) + +const ( + projectID = "667" +) + +func TestAWSConfiguration(t *testing.T) { + var req *http.Request + + // httptest certificate is unsigned + transport := http.DefaultTransport + defer func() { http.DefaultTransport = transport }() + http.DefaultTransport = &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} + + f := func(w http.ResponseWriter, r *http.Request) { + req = r + + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{}`)) + } + + srv := httptest.NewTLSServer(http.HandlerFunc(f)) + defer srv.Close() + + config, err := elastic.ReadConfig(strings.NewReader( + `{ + "url":["` + srv.URL + `"], + "aws":true, + "aws_region": "us-east-1", + "aws_access_key": "0", + "aws_secret_access_key": "0" + }`, + )) + assert.NoError(t, err) + + client, err := elastic.NewClient(config) + assert.NoError(t, err) + defer client.Close() + + if assert.NotNil(t, req) { + authRE := regexp.MustCompile(`\AAWS4-HMAC-SHA256 Credential=0/\d{8}/us-east-1/es/aws4_request, SignedHeaders=accept;date;host;x-amz-date, Signature=[a-f0-9]{64}\z`) + assert.Regexp(t, authRE, req.Header.Get("Authorization")) + assert.NotEqual(t, "", req.Header.Get("X-Amz-Date")) + } +} + +func TestElasticClientIndexAndRetrieval(t *testing.T) { + config := os.Getenv("ELASTIC_CONNECTION_INFO") + if config == "" { + t.Log("ELASTIC_CONNECTION_INFO not set") + t.SkipNow() + } + + os.Setenv("RAILS_ENV", fmt.Sprintf("test-elastic-%d", time.Now().Unix())) + + client, err := elastic.FromEnv(projectID) + assert.NoError(t, err) + + assert.Equal(t, projectID, client.ParentID()) + + assert.NoError(t, client.CreateIndex()) + + blobDoc := map[string]interface{}{} + client.Index(projectID+"_foo", blobDoc) + + commitDoc := map[string]interface{}{} + client.Index(projectID+"_0000", commitDoc) + + assert.NoError(t, client.Flush()) + + blob, err := client.GetBlob("foo") + assert.NoError(t, err) + assert.Equal(t, true, blob.Found) + + commit, err := client.GetCommit("0000") + assert.NoError(t, err) + assert.Equal(t, true, commit.Found) + + client.Remove(projectID + "_foo") + assert.NoError(t, client.Flush()) + + _, err = client.GetBlob("foo") + assert.Error(t, err) + + assert.NoError(t, client.DeleteIndex()) +} diff --git a/elastic/elastic.go b/elastic/elastic.go new file mode 100644 index 0000000000000000000000000000000000000000..bd28a5b97e47249e94f8d509034524756c5966c6 --- /dev/null +++ b/elastic/elastic.go @@ -0,0 +1,26 @@ +package elastic + +import ( + "encoding/json" + "io" +) + +type Config struct { + IndexName string `json:"-"` + ProjectID string `json:"-"` + URL []string `json:"url"` + AWS bool `json:"aws"` + Region string `json:"aws_region"` + AccessKey string `json:"aws_access_key"` + SecretKey string `json:"aws_secret_access_key"` +} + +func ReadConfig(r io.Reader) (*Config, error) { + var out Config + + if err := json.NewDecoder(r).Decode(&out); err != nil { + return nil, err + } + + return &out, nil +} diff --git a/elastic/index.go b/elastic/index.go new file mode 100644 index 0000000000000000000000000000000000000000..b77ee485b219c692d4c0a98eb0a7b017daad0057 --- /dev/null +++ b/elastic/index.go @@ -0,0 +1,673 @@ +package elastic + +import ( + "context" +) + +var indexMapping = ` +{ + "settings": { + "analysis": { + "filter": { + "my_stemmer": { + "name": "light_english", + "type": "stemmer" + }, + "code": { + "type": "pattern_capture", + "preserve_original": "1", + "patterns": [ + "(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)", + "(\\d+)" + ] + } + }, + "char_filter": { + "code_mapping": { + "type": "mapping", + "mappings": [ + ". => ' '" + ] + } + }, + "analyzer": { + "default": { + "filter": [ + "standard", + "lowercase", + "my_stemmer" + ], + "tokenizer": "standard" + }, + "code_search_analyzer": { + "filter": [ + "lowercase", + "asciifolding" + ], + "char_filter": [ + "code_mapping" + ], + "type": "custom", + "tokenizer": "standard" + }, + "path_analyzer": { + "filter": [ + "lowercase", + "asciifolding" + ], + "type": "custom", + "tokenizer": "path_tokenizer" + }, + "sha_analyzer": { + "filter": [ + "lowercase", + "asciifolding" + ], + "type": "custom", + "tokenizer": "sha_tokenizer" + }, + "code_analyzer": { + "filter": [ + "code", + "lowercase", + "asciifolding" + ], + "char_filter": [ + "code_mapping" + ], + "type": "custom", + "tokenizer": "standard" + }, + "my_ngram_analyzer": { + "filter": [ + "lowercase" + ], + "tokenizer": "my_ngram_tokenizer" + } + }, + "tokenizer": { + "my_ngram_tokenizer": { + "token_chars": [ + "letter", + "digit" + ], + "min_gram": "2", + "type": "nGram", + "max_gram": "3" + }, + "sha_tokenizer": { + "token_chars": [ + "letter", + "digit" + ], + "min_gram": "5", + "type": "edgeNGram", + "max_gram": "40" + }, + "path_tokenizer": { + "reverse": "true", + "type": "path_hierarchy" + } + } + } + }, + "mappings": { + "milestone": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "created_at": { + "type": "date" + }, + "description": { + "type": "text", + "index_options": "offsets" + }, + "id": { + "type": "integer" + }, + "project_id": { + "type": "integer" + }, + "title": { + "type": "text", + "index_options": "offsets" + }, + "updated_at": { + "type": "date" + } + } + }, + "note": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "created_at": { + "type": "date" + }, + "id": { + "type": "integer" + }, + "issue": { + "properties": { + "assignee_id": { + "type": "integer" + }, + "author_id": { + "type": "integer" + }, + "confidential": { + "type": "boolean" + } + } + }, + "note": { + "type": "text", + "index_options": "offsets" + }, + "noteable_id": { + "type": "integer" + }, + "noteable_type": { + "type": "keyword" + }, + "project_id": { + "type": "integer" + }, + "updated_at": { + "type": "date" + } + } + }, + "project_wiki": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "blob": { + "properties": { + "commit_sha": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "content": { + "type": "text", + "index_options": "offsets", + "analyzer": "code_analyzer", + "search_analyzer": "code_search_analyzer" + }, + "file_name": { + "type": "text", + "analyzer": "code_analyzer", + "search_analyzer": "code_search_analyzer" + }, + "id": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "language": { + "type": "keyword" + }, + "oid": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "path": { + "type": "text", + "analyzer": "path_analyzer" + }, + "rid": { + "type": "keyword" + } + } + }, + "commit": { + "properties": { + "author": { + "properties": { + "email": { + "type": "text", + "index_options": "offsets" + }, + "name": { + "type": "text", + "index_options": "offsets" + }, + "time": { + "type": "date", + "format": "basic_date_time_no_millis" + } + } + }, + "commiter": { + "properties": { + "email": { + "type": "text", + "index_options": "offsets" + }, + "name": { + "type": "text", + "index_options": "offsets" + }, + "time": { + "type": "date", + "format": "basic_date_time_no_millis" + } + } + }, + "id": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "message": { + "type": "text", + "index_options": "offsets" + }, + "rid": { + "type": "keyword" + }, + "sha": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + } + } + } + } + }, + "issue": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "assignee_id": { + "type": "integer" + }, + "author_id": { + "type": "integer" + }, + "confidential": { + "type": "boolean" + }, + "created_at": { + "type": "date" + }, + "description": { + "type": "text", + "index_options": "offsets" + }, + "id": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "project_id": { + "type": "integer" + }, + "state": { + "type": "text" + }, + "title": { + "type": "text", + "index_options": "offsets" + }, + "updated_at": { + "type": "date" + } + } + }, + "merge_request": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "author_id": { + "type": "integer" + }, + "created_at": { + "type": "date" + }, + "description": { + "type": "text", + "index_options": "offsets" + }, + "id": { + "type": "integer" + }, + "iid": { + "type": "integer" + }, + "merge_status": { + "type": "text" + }, + "source_branch": { + "type": "text", + "index_options": "offsets" + }, + "source_project_id": { + "type": "integer" + }, + "state": { + "type": "text" + }, + "target_branch": { + "type": "text", + "index_options": "offsets" + }, + "target_project_id": { + "type": "integer" + }, + "title": { + "type": "text", + "index_options": "offsets" + }, + "updated_at": { + "type": "date" + } + } + }, + "repository": { + "_parent": { + "type": "project" + }, + "_routing": { + "required": true + }, + "properties": { + "blob": { + "properties": { + "commit_sha": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "content": { + "type": "text", + "index_options": "offsets", + "analyzer": "code_analyzer", + "search_analyzer": "code_search_analyzer" + }, + "file_name": { + "type": "text", + "analyzer": "code_analyzer", + "search_analyzer": "code_search_analyzer" + }, + "id": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "language": { + "type": "keyword" + }, + "oid": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "path": { + "type": "text", + "analyzer": "path_analyzer" + }, + "rid": { + "type": "keyword" + }, + "type": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "commit": { + "properties": { + "author": { + "properties": { + "email": { + "type": "text", + "index_options": "offsets" + }, + "name": { + "type": "text", + "index_options": "offsets" + }, + "time": { + "type": "date", + "format": "basic_date_time_no_millis" + } + } + }, + "commiter": { + "properties": { + "email": { + "type": "text", + "index_options": "offsets" + }, + "name": { + "type": "text", + "index_options": "offsets" + }, + "time": { + "type": "date", + "format": "basic_date_time_no_millis" + } + } + }, + "committer": { + "properties": { + "email": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "name": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + }, + "time": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + }, + "id": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "message": { + "type": "text", + "index_options": "offsets" + }, + "rid": { + "type": "keyword" + }, + "sha": { + "type": "text", + "index_options": "offsets", + "analyzer": "sha_analyzer" + }, + "type": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 + } + } + } + } + } + } + }, + "project": { + "properties": { + "archived": { + "type": "boolean" + }, + "created_at": { + "type": "date" + }, + "description": { + "type": "text", + "index_options": "offsets" + }, + "id": { + "type": "integer" + }, + "issues_access_level": { + "type": "integer" + }, + "last_activity_at": { + "type": "date" + }, + "last_pushed_at": { + "type": "date" + }, + "merge_requests_access_level": { + "type": "integer" + }, + "name": { + "type": "text", + "index_options": "offsets" + }, + "name_with_namespace": { + "type": "text", + "index_options": "offsets", + "analyzer": "my_ngram_analyzer" + }, + "namespace_id": { + "type": "integer" + }, + "path": { + "type": "text", + "index_options": "offsets" + }, + "path_with_namespace": { + "type": "text", + "index_options": "offsets" + }, + "repository_access_level": { + "type": "integer" + }, + "snippets_access_level": { + "type": "integer" + }, + "updated_at": { + "type": "date" + }, + "visibility_level": { + "type": "integer" + }, + "wiki_access_level": { + "type": "integer" + } + } + }, + "snippet": { + "properties": { + "author_id": { + "type": "integer" + }, + "content": { + "type": "text", + "index_options": "offsets" + }, + "created_at": { + "type": "date" + }, + "file_name": { + "type": "text", + "index_options": "offsets" + }, + "id": { + "type": "integer" + }, + "project_id": { + "type": "integer" + }, + "state": { + "type": "text" + }, + "title": { + "type": "text", + "index_options": "offsets" + }, + "updated_at": { + "type": "date" + }, + "visibility_level": { + "type": "integer" + } + } + } + } +} +` + +// CreateIndex creates an index matching that created by gitlab-elasticsearch-git v1.1.1 +func (c *Client) CreateIndex() error { + createIndex, err := c.Client.CreateIndex(c.IndexName).BodyString(indexMapping).Do(context.Background()) + if err != nil { + return err + } + + if !createIndex.Acknowledged { + return timeoutError + } + + return nil +} + +func (c *Client) DeleteIndex() error { + deleteIndex, err := c.Client.DeleteIndex(c.IndexName).Do(context.Background()) + if err != nil { + return err + } + + if !deleteIndex.Acknowledged { + return timeoutError + } + + return nil +} diff --git a/git/go-git.go b/git/go-git.go new file mode 100644 index 0000000000000000000000000000000000000000..d35289d9549a49e49106583208f24f9c5962516f --- /dev/null +++ b/git/go-git.go @@ -0,0 +1,179 @@ +package git + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/filemode" + "gopkg.in/src-d/go-git.v4/plumbing/object" + "gopkg.in/src-d/go-git.v4/utils/merkletrie" +) + +var ( + endError = fmt.Errorf("Finished") // not really an error +) + +type goGitRepository struct { + *git.Repository + + FromHash plumbing.Hash + ToHash plumbing.Hash + + FromCommit *object.Commit + ToCommit *object.Commit +} + +func NewGoGitRepository(projectPath string, fromSHA string, toSHA string) (*goGitRepository, error) { + out := &goGitRepository{} + + repo, err := git.PlainOpen(projectPath) + if err != nil { + return nil, err + } + out.Repository = repo + + if fromSHA == "" { + out.FromHash = plumbing.ZeroHash + } else { + out.FromHash = plumbing.NewHash(fromSHA) + + commit, err := repo.CommitObject(out.FromHash) + if err != nil { + return nil, fmt.Errorf("Bad from SHA (%s): %s", out.FromHash, err) + } + + out.FromCommit = commit + } + + if toSHA == "" { + ref, err := out.Repository.Head() + if err != nil { + return nil, err + } + + out.ToHash = ref.Hash() + } else { + out.ToHash = plumbing.NewHash(toSHA) + } + + commit, err := out.Repository.CommitObject(out.ToHash) + if err != nil { + return nil, fmt.Errorf("Bad to SHA (%s): %s", out.ToHash, err) + } + + out.ToCommit = commit + + return out, nil +} + +func (r *goGitRepository) diff() (object.Changes, error) { + var fromTree, toTree *object.Tree + + if r.FromCommit != nil { + tree, err := r.FromCommit.Tree() + if err != nil { + return nil, err + } + + fromTree = tree + } + + toTree, err := r.ToCommit.Tree() + if err != nil { + return nil, err + } + + return object.DiffTree(fromTree, toTree) +} + +func goGitBuildSignature(sig object.Signature) Signature { + return Signature{ + Name: sig.Name, + Email: sig.Email, + When: sig.When, + } +} + +func goGitBuildFile(change object.ChangeEntry, file *object.File) *File { + return &File{ + Path: change.Name, + Oid: file.ID().String(), + Blob: file.Blob.Reader, + Size: file.Size, + } +} + +func (r *goGitRepository) EachFileChange(ins, mod, del FileFunc) error { + changes, err := r.diff() + if err != nil { + return err + } + + fromCommitStr := r.FromHash.String() + toCommitStr := r.ToHash.String() + + for _, change := range changes { + // FIXME(nick): submodules may need better support + // https://github.com/src-d/go-git/issues/317 + if change.From.TreeEntry.Mode == filemode.Submodule || change.To.TreeEntry.Mode == filemode.Submodule { + continue + } + + fromF, toF, err := change.Files() + if err != nil { + return err + } + + action, err := change.Action() + if err != nil { + return err + } + + switch action { + case merkletrie.Insert: + err = ins(goGitBuildFile(change.To, toF), fromCommitStr, toCommitStr) + case merkletrie.Modify: + err = mod(goGitBuildFile(change.To, toF), fromCommitStr, toCommitStr) + case merkletrie.Delete: + err = del(goGitBuildFile(change.From, fromF), fromCommitStr, toCommitStr) + default: + err = fmt.Errorf("Unrecognised change calculating diff: %+v", change) + } + + if err != nil { + return err + } + } + + return nil +} + +// EachCommit runs `f` for each commit within `fromSHA`..`toSHA` +// go-git doesn't directly support ranges of revisions, so we emulate this by +// walking the commit history between from and to +func (r *goGitRepository) EachCommit(f CommitFunc) error { + err := object.WalkCommitHistoryPost(r.ToCommit, func(c *object.Commit) error { + if r.FromCommit != nil && c.ID() == r.FromCommit.ID() { + return endError + } + + commit := &Commit{ + Message: c.Message, + Hash: c.Hash.String(), + Author: goGitBuildSignature(c.Author), + Committer: goGitBuildSignature(c.Committer), + } + if err := f(commit); err != nil { + return err + } + + return nil + }) + + if err != nil && err != endError { + return fmt.Errorf("WalkCommitHistory: %s", err) + } + + return nil +} diff --git a/git/repository.go b/git/repository.go new file mode 100644 index 0000000000000000000000000000000000000000..8d0ce9ae04df6bbc068f5197a06f4f3f98c00cc2 --- /dev/null +++ b/git/repository.go @@ -0,0 +1,34 @@ +package git + +import ( + "io" + "time" +) + +type File struct { + Path string + Blob func() (io.ReadCloser, error) + Oid string + Size int64 +} + +type Signature struct { + Name string + Email string + When time.Time +} + +type Commit struct { + Author Signature + Committer Signature + Message string + Hash string +} + +type Repository interface { + EachFileChange(ins, mod, del FileFunc) error + EachCommit(f CommitFunc) error +} + +type FileFunc func(file *File, fromCommit, toCommit string) error +type CommitFunc func(commit *Commit) error diff --git a/git/repository_test.go b/git/repository_test.go new file mode 100644 index 0000000000000000000000000000000000000000..55440d7fcb69e610479c6fceaeb439670b35962c --- /dev/null +++ b/git/repository_test.go @@ -0,0 +1,272 @@ +package git_test + +import ( + "flag" + "io/ioutil" + "os" + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/git" +) + +var ( + testRepo = flag.String("test-repo", "../tmp/gitlab-test.git", "Path to `gitlab-test` repository for integration tests") +) + +const ( + initialSHA = "1a0b36b3cdad1d2ee32457c102a8c0b7056fa863" + headSHA = "b83d6e391c22777fca1ed3012fce84f633d7fed0" +) + +func checkDeps(t *testing.T) { + if _, err := os.Stat(*testRepo); err != nil { + t.Log("No test repo found at ", *testRepo) + t.SkipNow() + } +} + +func runEachCommit(repo git.Repository) (map[string]*git.Commit, []string, error) { + commits := make(map[string]*git.Commit) + commitHashes := []string{} + + err := repo.EachCommit(func(commit *git.Commit) error { + commits[commit.Hash] = commit + commitHashes = append(commitHashes, commit.Hash) + return nil + }) + + return commits, commitHashes, err +} + +func TestEachCommit(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "", headSHA) + assert.NoError(t, err) + + commits, commitHashes, err := runEachCommit(repo) + assert.NoError(t, err) + + expectedCommits := []string{ + "b83d6e391c22777fca1ed3012fce84f633d7fed0", + "498214de67004b1da3d820901307bed2a68a8ef6", + "1b12f15a11fc6e62177bef08f47bc7b5ce50b141", + "38008cb17ce1466d8fec2dfa6f6ab8dcfe5cf49e", + "6907208d755b60ebeacb2e9dfea74c92c3449a1f", + "c347ca2e140aa667b968e51ed0ffe055501fe4f4", + "d59c60028b053793cecfb4022de34602e1a9218e", + "281d3a76f31c812dbf48abce82ccf6860adedd81", + "a5391128b0ef5d21df5dd23d98557f4ef12fae20", + "54fcc214b94e78d7a41a9a8fe6d87a5e59500e51", + "be93687618e4b132087f430a4d8fc3a609c9b77c", + "048721d90c449b244b7b4c53a9186b04330174ec", + "5f923865dde3436854e9ceb9cdb7815618d4e849", + "d2d430676773caa88cdaf7c55944073b2fd5561a", + "2ea1f3dec713d940208fb5ce4a38765ecb5d3f73", + "59e29889be61e6e0e5e223bfa9ac2721d31605b8", + "66eceea0db202bb39c4e445e8ca28689645366c5", + "08f22f255f082689c0d7d39d19205085311542bc", + "19e2e9b4ef76b422ce1154af39a91323ccc57434", + "c642fe9b8b9f28f9225d7ea953fe14e74748d53b", + "9a944d90955aaf45f6d0c88f30e27f8d2c41cec0", + "c7fbe50c7c7419d9701eebe64b1fdacc3df5b9dd", + "e56497bb5f03a90a51293fc6d516788730953899", + "4cd80ccab63c82b4bad16faa5193fbd2aa06df40", + "5937ac0a7beb003549fc5fd26fc247adbce4a52e", + "570e7b2abdd848b95f2f578043fc23bd6f6fd24d", + "6f6d7e7ed97bb5f0054f2b1df789b39ca89b6ff9", + "d14d6c0abdd253381df51a723d58691b2ee1ab08", + "c1acaa58bbcbc3eafe538cb8274ba387047b69f8", + "ae73cb07c9eeaf35924a10f713b364d32b2dd34f", + "874797c3a73b60d2187ed6e2fcabd289ff75171e", + "2f63565e7aac07bcdadb654e253078b727143ec4", + "33f3729a45c02fc67d00adb1b8bca394b0e761d9", + "913c66a37b4a45b9769037c55c2d238bd0942d2e", + "cfe32cf61b73a0d5e9f13e774abde7ff789b1660", + "6d394385cf567f80a8fd85055db1ab4c5295806f", + "1a0b36b3cdad1d2ee32457c102a8c0b7056fa863", + } + + // We don't mind the order these are given in + sort.Strings(expectedCommits) + sort.Strings(commitHashes) + + assert.Equal(t, expectedCommits, commitHashes) + + // Now choose one commit and check it in detail + + commit := commits[initialSHA] + date, err := time.Parse("Mon Jan 02 15:04:05 2006 -0700", "Thu Feb 27 00:03:18 2014 -0800") + assert.NoError(t, err) + + dmitriy := git.Signature{ + Name: "Dmitriy Zaporozhets", + Email: "dmitriy.zaporozhets@gmail.com", + When: date, + } + + assert.Equal(t, initialSHA, commit.Hash) + assert.Equal(t, "Initial commit\n", commit.Message) + assert.Equal(t, dmitriy, commit.Author) + assert.Equal(t, dmitriy, commit.Author) +} + +func TestEachCommitGivenRangeOf3Commits(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "1b12f15a11fc6e62177bef08f47bc7b5ce50b141", headSHA) + assert.NoError(t, err) + + _, commitHashes, err := runEachCommit(repo) + assert.NoError(t, err) + + expected := []string{"498214de67004b1da3d820901307bed2a68a8ef6", headSHA} + sort.Strings(expected) + sort.Strings(commitHashes) + + assert.Equal(t, expected, commitHashes) +} + +func TestEachCommitGivenRangeOf2Commits(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "498214de67004b1da3d820901307bed2a68a8ef6", headSHA) + assert.NoError(t, err) + + _, commitHashes, err := runEachCommit(repo) + assert.NoError(t, err) + + assert.Equal(t, []string{headSHA}, commitHashes) +} + +func TestEachCommitGivenRangeOf1Commit(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, headSHA, headSHA) + assert.NoError(t, err) + + _, commitHashes, err := runEachCommit(repo) + assert.NoError(t, err) + assert.Equal(t, []string{}, commitHashes) +} + +func TestEmptyToSHADefaultsToHeadSHA(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "498214de67004b1da3d820901307bed2a68a8ef6", "") + assert.NoError(t, err) + + _, commitHashes, err := runEachCommit(repo) + assert.NoError(t, err) + assert.Equal(t, []string{headSHA}, commitHashes) +} + +// TODO: store the ins/mod/del status of each change +func runEachFileChange(repo git.Repository) (map[string]*git.File, []string, error) { + files := make(map[string]*git.File) + filePaths := []string{} + + store := func(f *git.File, _, _ string) error { + files[f.Path] = f + filePaths = append(filePaths, f.Path) + return nil + } + + err := repo.EachFileChange(store, store, store) + return files, filePaths, err +} + +func TestEachFileChangeMod(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "", headSHA) + assert.NoError(t, err) + + files, filePaths, err := runEachFileChange(repo) + assert.NoError(t, err) + + expectedFiles := []string{ + ".gitattributes", + ".gitignore", + ".gitmodules", + "CHANGELOG", + "CONTRIBUTING.md", + "Gemfile.zip", + "LICENSE", + "MAINTENANCE.md", + "PROCESS.md", + "README", + "README.md", + "VERSION", + "bar/branch-test.txt", + "custom-highlighting/test.gitlab-custom", + "encoding/feature-1.txt", + "encoding/feature-2.txt", + "encoding/hotfix-1.txt", + "encoding/hotfix-2.txt", + "encoding/iso8859.txt", + "encoding/russian.rb", + "encoding/test.txt", + "encoding/テスト.txt", + "encoding/テスト.xls", + "files/html/500.html", + "files/images/6049019_460s.jpg", + "files/images/logo-black.png", + "files/images/logo-white.png", + "files/images/wm.svg", + "files/js/application.js", + "files/js/commit.coffee", + "files/lfs/lfs_object.iso", + "files/markdown/ruby-style-guide.md", + "files/ruby/popen.rb", + "files/ruby/regex.rb", + "files/ruby/version_info.rb", + "files/whitespace", + "foo/bar/.gitkeep", + "with space/README.md", + } + + // We don't mind the order these are given in + sort.Strings(expectedFiles) + sort.Strings(filePaths) + + assert.Equal(t, expectedFiles, filePaths) + + // Now choose one file and check it in detail + file := files["VERSION"] + blob, err := file.Blob() + assert.NoError(t, err) + data, err := ioutil.ReadAll(blob) + assert.NoError(t, err) + + assert.Equal(t, "VERSION", file.Path) + assert.Equal(t, "998707b421c89bd9a3063333f9f728ef3e43d101", file.Oid) + assert.Equal(t, int64(10), file.Size) + assert.Equal(t, "6.7.0.pre\n", string(data)) +} + +func TestEachFileChangeGivenRangeOfThreeCommits(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "1b12f15a11fc6e62177bef08f47bc7b5ce50b141", headSHA) + assert.NoError(t, err) + + _, filePaths, err := runEachFileChange(repo) + + assert.Equal(t, []string{"bar/branch-test.txt"}, filePaths) +} + +func TestEachFileChangeGivenRangeOfTwoCommits(t *testing.T) { + checkDeps(t) + + repo, err := git.NewGoGitRepository(*testRepo, "498214de67004b1da3d820901307bed2a68a8ef6", headSHA) + assert.NoError(t, err) + + _, filePaths, err := runEachFileChange(repo) + + assert.Equal(t, []string{}, filePaths) +} diff --git a/indexer/blob.go b/indexer/blob.go new file mode 100644 index 0000000000000000000000000000000000000000..4656728ebbcbc9b1c8a86d3c71c1f616c99aaafc --- /dev/null +++ b/indexer/blob.go @@ -0,0 +1,123 @@ +package indexer + +import ( + "bytes" + "fmt" + "io/ioutil" + + "gitlab.com/gitlab-org/es-git-go/git" + "gitlab.com/gitlab-org/es-git-go/linguist" +) + +var ( + SkipTooLargeBlob = fmt.Errorf("Blob should be skipped: Too large") + SkipBinaryBlob = fmt.Errorf("Blob should be skipped: binary") +) + +const ( + binarySearchLimit = 8 * 1024 // 8 KiB, Same as git + maxBlobSize = 1024 * 1024 // 1MiB, same as gitlab-elasticsearch-git +) + +func isSkipBlobErr(err error) bool { + switch err { + case SkipTooLargeBlob: + return true + case SkipBinaryBlob: + return true + } + + return false +} + +type Blob struct { + Type string `json:"type"` + ID string `json:"-"` + OID string `json:"oid"` + RepoID string `json:"rid"` + CommitSHA string `json:"commit_sha"` + Content string `json:"content"` + Path string `json:"path"` + + // Message copied from gitlab-elasticsearch-git: + // + // We're duplicating file_name parameter here because we need another + // analyzer for it. + // + //Ideally this should be done with copy_to: 'blob.file_name' option + //but it does not work in ES v2.3.*. We're doing it so to not make users + //install newest versions + // + //https://github.com/elastic/elasticsearch-mapper-attachments/issues/124 + Filename string `json:"file_name"` + + Language string `json:"language"` +} + +func GenerateBlobID(parentID, filename string) string { + return fmt.Sprintf("%s_%s", parentID, filename) +} + +func BuildBlob(file *git.File, parentID, commitSHA string) (*Blob, error) { + if file.Size > maxBlobSize { + return nil, SkipTooLargeBlob + } + + reader, err := file.Blob() + if err != nil { + return nil, err + } + + defer reader.Close() + + // FIXME(nick): This doesn't look cheap. Check the RAM & CPU pressure, esp. + // for large blobs + b, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + if DetectBinary(b) { + return nil, SkipBinaryBlob + } + + content := tryEncodeBytes(b) + filename := tryEncodeString(file.Path) + + return &Blob{ + Type: "blob", + ID: GenerateBlobID(parentID, filename), + OID: file.Oid, + RepoID: parentID, + CommitSHA: commitSHA, + Content: content, + Path: filename, + Filename: filename, + Language: DetectLanguage(filename, b), + }, nil +} + +// DetectLanguage returns a string describing the language of the file. This is +// programming language, rather than natural language. +// +// If no language is detected, "Text" is returned. +func DetectLanguage(filename string, data []byte) string { + lang := linguist.DetectLanguage(filename, data) + if lang != nil { + return lang.Name + } + + return "Text" +} + +// DetectBinary checks whether the passed-in data contains a NUL byte. Only scan +// the start of large blobs. This is the same test performed by git to check +// text/binary +func DetectBinary(data []byte) bool { + searchLimit := binarySearchLimit + if len(data) < searchLimit { + searchLimit = len(data) + } + + return bytes.Contains(data[:searchLimit], []byte{0}) +} diff --git a/indexer/blob_test.go b/indexer/blob_test.go new file mode 100644 index 0000000000000000000000000000000000000000..e9cde92fa8f6ce92ffd3687dd8057b3c4bf8b1f9 --- /dev/null +++ b/indexer/blob_test.go @@ -0,0 +1,72 @@ +package indexer_test + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/indexer" +) + +func TestBuildBlob(t *testing.T) { + file := gitFile("foo/bar", "foo") + expected := validBlob(file, "foo", "Text") + + actual, err := indexer.BuildBlob(file, expected.RepoID, expected.CommitSHA) + assert.NoError(t, err) + + assert.Equal(t, expected, actual) + + expectedJSON := `{ + "commit_sha" : "` + expected.CommitSHA + `", + "content" : "` + expected.Content + `", + "file_name" : "` + expected.Filename + `", + "language" : "` + expected.Language + `", + "oid" : "` + expected.OID + `", + "path" : "` + expected.Path + `", + "rid" : "` + expected.RepoID + `", + "type" : "blob" + }` + + actualJSON, err := json.Marshal(actual) + assert.NoError(t, err) + assert.JSONEq(t, expectedJSON, string(actualJSON)) +} + +func TestBuildBlobSkipsLargeBlobs(t *testing.T) { + file := gitFile("foo/bar", "foo") + file.Size = 1024*1024 + 1 + + blob, err := indexer.BuildBlob(file, parentID, sha) + assert.Error(t, err, indexer.SkipTooLargeBlob) + assert.Nil(t, blob) +} + +func TestBuildBlobSkipsBinaryBlobs(t *testing.T) { + file := gitFile("foo/bar", "foo\x00") + + blob, err := indexer.BuildBlob(file, parentID, sha) + assert.Equal(t, err, indexer.SkipBinaryBlob) + assert.Nil(t, blob) +} + +func TestBuildBlobDetectsLanguageByFilename(t *testing.T) { + file := gitFile("Makefile.am", "foo") + blob, err := indexer.BuildBlob(file, parentID, sha) + + assert.NoError(t, err) + assert.Equal(t, "Makefile", blob.Language) +} + +func TestBuildBlobDetectsLanguageByExtension(t *testing.T) { + file := gitFile("foo.rb", "foo") + blob, err := indexer.BuildBlob(file, parentID, sha) + + assert.NoError(t, err) + assert.Equal(t, "Ruby", blob.Language) +} + +func TestGenerateBlobID(t *testing.T) { + assert.Equal(t, "projectID_path", indexer.GenerateBlobID("projectID", "path")) +} diff --git a/indexer/commit.go b/indexer/commit.go new file mode 100644 index 0000000000000000000000000000000000000000..c9bef4d6fcfbaddd89a4c8d7ba63ea96105b6daf --- /dev/null +++ b/indexer/commit.go @@ -0,0 +1,35 @@ +package indexer + +import ( + "fmt" + + "gitlab.com/gitlab-org/es-git-go/git" +) + +type Commit struct { + Type string `json:"type"` + ID string `json:"-"` + Author *Person `json:"author"` + Committer *Person `json:"committer"` + RepoID string `json:"rid"` + Message string `json:"message"` + SHA string `json:"sha"` +} + +func GenerateCommitID(parentID, commitSHA string) string { + return fmt.Sprintf("%s_%s", parentID, commitSHA) +} + +func BuildCommit(c *git.Commit, parentID string) *Commit { + sha := c.Hash + + return &Commit{ + Type: "commit", + Author: BuildPerson(c.Author), + Committer: BuildPerson(c.Committer), + ID: GenerateCommitID(parentID, sha), + RepoID: parentID, + Message: tryEncodeString(c.Message), + SHA: sha, + } +} diff --git a/indexer/commit_test.go b/indexer/commit_test.go new file mode 100644 index 0000000000000000000000000000000000000000..6806390fba4f7652fdaddb33f94c3d309444bb83 --- /dev/null +++ b/indexer/commit_test.go @@ -0,0 +1,44 @@ +package indexer_test + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/indexer" +) + +func TestBuildCommit(t *testing.T) { + gitCommit := gitCommit("Initial commit") + + expected := validCommit(gitCommit) + actual := indexer.BuildCommit(gitCommit, expected.RepoID) + + assert.Equal(t, expected, actual) + + expectedJSON := `{ + "sha" : "` + expected.SHA + `", + "message" : "` + expected.Message + `", + "author" : { + "name": "` + expected.Author.Name + `", + "email": "` + expected.Author.Email + `", + "time": "` + indexer.GenerateDate(gitCommit.Author.When) + `" + }, + "committer" : { + "name": "` + expected.Committer.Name + `", + "email": "` + expected.Committer.Email + `", + "time": "` + indexer.GenerateDate(gitCommit.Committer.When) + `" + }, + "rid" : "` + expected.RepoID + `", + "type" : "commit" + }` + + actualJSON, err := json.Marshal(actual) + assert.NoError(t, err) + assert.JSONEq(t, expectedJSON, string(actualJSON)) +} + +func TestGenerateCommitID(t *testing.T) { + assert.Equal(t, "projectID_sha", indexer.GenerateCommitID("projectID", "sha")) +} diff --git a/indexer/encoding.go b/indexer/encoding.go new file mode 100644 index 0000000000000000000000000000000000000000..b0b544779520572749c6cba3676585225c2b5619 --- /dev/null +++ b/indexer/encoding.go @@ -0,0 +1,68 @@ +package indexer + +import ( + "fmt" + "log" + + "github.com/lupine/icu" +) + +var ( + detector *icu.CharsetDetector + converter = icu.NewCharsetConverter(maxBlobSize) +) + +func init() { + var err error + detector, err = icu.NewCharsetDetector() + if err != nil { + panic(err) + } +} + +func tryEncodeString(s string) string { + encoded, err := encodeString(s) + if err != nil { + log.Println(err) + return s // TODO: Run it through the UTF-8 replacement encoder + } + + return encoded +} + +func tryEncodeBytes(b []byte) string { + encoded, err := encodeBytes(b) + if err != nil { + log.Println(err) + s := string(b) + return s // TODO: Run it through the UTF-8 replacement encoder + } + + return encoded +} + +func encodeString(s string) (string, error) { + return encodeBytes([]byte(s)) +} + +// encodeString converts a string from an arbitrary encoding to UTF-8 +func encodeBytes(b []byte) (string, error) { + if len(b) == 0 { + return "", nil + } + + matches, err := detector.GuessCharset(b) + if err != nil { + return "", fmt.Errorf("Couldn't guess charset: %s", err) + } + + // Try encoding for each match, returning the first that succeeds + for _, match := range matches { + utf8, err := converter.ConvertToUtf8(b, match.Charset) + if err == nil { + return string(utf8), nil + } + } + + return "", fmt.Errorf("Failed to convert from %s to UTF-8", matches[0].Charset) +} diff --git a/indexer/indexer.go b/indexer/indexer.go new file mode 100644 index 0000000000000000000000000000000000000000..12a017b8ececf7d0dc4c8444ac4fcb72f8818ae2 --- /dev/null +++ b/indexer/indexer.go @@ -0,0 +1,76 @@ +package indexer + +import ( + "fmt" + "log" + + "gitlab.com/gitlab-org/es-git-go/git" +) + +type Submitter interface { + ParentID() string + + Index(id string, thing interface{}) + Remove(id string) + + Flush() error +} + +type Indexer struct { + git.Repository + Submitter +} + +func (i *Indexer) SubmitCommit(c *git.Commit) error { + commit := BuildCommit(c, i.Submitter.ParentID()) + + i.Submitter.Index(commit.ID, map[string]interface{}{"commit": commit}) + return nil +} + +func (i *Indexer) SubmitBlob(f *git.File, _, toCommit string) error { + blob, err := BuildBlob(f, i.Submitter.ParentID(), toCommit) + if err != nil { + if isSkipBlobErr(err) { + return nil + } + + return fmt.Errorf("Blob %s: %s", f.Path, err) + } + + i.Submitter.Index(blob.ID, map[string]interface{}{"blob": blob}) + return nil +} + +func (i *Indexer) RemoveBlob(file *git.File, _, _ string) error { + blobID := GenerateBlobID(i.Submitter.ParentID(), file.Path) + + i.Submitter.Remove(blobID) + return nil +} + +func (i *Indexer) IndexCommits() error { + return i.Repository.EachCommit(i.SubmitCommit) +} + +func (i *Indexer) IndexBlobs() error { + return i.Repository.EachFileChange(i.SubmitBlob, i.SubmitBlob, i.RemoveBlob) +} + +func (i *Indexer) Index() error { + if err := i.IndexBlobs(); err != nil { + log.Print("Error while indexing blobs: ", err) + return err + } + + if err := i.IndexCommits(); err != nil { + log.Print("Error while indexing commits: ", err) + return err + } + + if err := i.Submitter.Flush(); err != nil { + log.Print("Error while flushing requests: ", err) + } + + return nil +} diff --git a/indexer/indexer_test.go b/indexer/indexer_test.go new file mode 100644 index 0000000000000000000000000000000000000000..1b51b7912945e34fb7bb5e6a855efd67a47928e2 --- /dev/null +++ b/indexer/indexer_test.go @@ -0,0 +1,220 @@ +package indexer_test + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/git" + "gitlab.com/gitlab-org/es-git-go/indexer" +) + +const ( + sha = "9876543210987654321098765432109876543210" + oid = "0123456789012345678901234567890123456789" + parentID = "667" +) + +type fakeSubmitter struct { + flushed int + + indexed int + indexedID []string + indexedThing []interface{} + + removed int + removedID []string +} + +type fakeRepository struct { + commits []*git.Commit + + added []*git.File + modified []*git.File + removed []*git.File +} + +func (f *fakeSubmitter) ParentID() string { + return parentID +} + +func (f *fakeSubmitter) Index(id string, thing interface{}) { + f.indexed++ + f.indexedID = append(f.indexedID, id) + f.indexedThing = append(f.indexedThing, thing) +} + +func (f *fakeSubmitter) Remove(id string) { + f.removed++ + f.removedID = append(f.removedID, id) +} + +func (f *fakeSubmitter) Flush() error { + f.flushed++ + return nil +} + +func (r *fakeRepository) EachFileChange(ins, mod, del git.FileFunc) error { + for _, file := range r.added { + if err := ins(file, sha, sha); err != nil { + return err + } + } + + for _, file := range r.modified { + if err := mod(file, sha, sha); err != nil { + return err + } + } + + for _, file := range r.removed { + if err := del(file, sha, sha); err != nil { + return err + } + } + + return nil +} + +func (r *fakeRepository) EachCommit(f git.CommitFunc) error { + for _, commit := range r.commits { + if err := f(commit); err != nil { + return err + } + } + + return nil +} + +func setupIndexer() (*indexer.Indexer, *fakeRepository, *fakeSubmitter) { + repo := &fakeRepository{} + submitter := &fakeSubmitter{} + + return &indexer.Indexer{ + Repository: repo, + Submitter: submitter, + }, repo, submitter +} + +func readerFunc(data string, err error) func() (io.ReadCloser, error) { + return func() (io.ReadCloser, error) { + return ioutil.NopCloser(strings.NewReader(data)), err + } +} + +func gitFile(path, content string) *git.File { + return &git.File{ + Path: path, + Blob: readerFunc(content, nil), + Size: int64(len(content)), + Oid: oid, + } +} + +func gitCommit(message string) *git.Commit { + return &git.Commit{ + Author: git.Signature{ + Email: "job@gitlab.com", + Name: "Job van der Voort", + When: time.Date(2016, time.September, 27, 14, 37, 46, 0, time.UTC), + }, + Committer: git.Signature{ + Email: "nick@gitlab.com", + Name: "Nick Thomas", + When: time.Date(2017, time.October, 28, 15, 38, 47, 1, time.UTC), + }, + Message: message, + Hash: sha, + } +} + +func validBlob(file *git.File, content, language string) *indexer.Blob { + return &indexer.Blob{ + Type: "blob", + ID: indexer.GenerateBlobID(parentID, file.Path), + OID: oid, + RepoID: parentID, + CommitSHA: sha, + Content: content, + Path: file.Path, + Filename: file.Path, + Language: language, + } +} + +func validCommit(gitCommit *git.Commit) *indexer.Commit { + return &indexer.Commit{ + Type: "commit", + ID: indexer.GenerateCommitID(parentID, gitCommit.Hash), + Author: indexer.BuildPerson(gitCommit.Author), + Committer: indexer.BuildPerson(gitCommit.Committer), + RepoID: parentID, + Message: gitCommit.Message, + SHA: sha, + } +} + +func TestIndex(t *testing.T) { + idx, repo, submit := setupIndexer() + + gitCommit := gitCommit("Initial commit") + gitAdded := gitFile("foo/bar", "added file") + gitModified := gitFile("foo/baz", "modified file") + gitRemoved := gitFile("foo/qux", "removed file") + + gitTooBig := gitFile("invalid/too-big", "") + gitTooBig.Size = int64(1024*1024 + 1) + + gitBinary := gitFile("invalid/binary", "foo\x00") + + commit := validCommit(gitCommit) + added := validBlob(gitAdded, "added file", "Text") + modified := validBlob(gitModified, "modified file", "Text") + removed := validBlob(gitRemoved, "removed file", "Text") + + repo.commits = append(repo.commits, gitCommit) + repo.added = append(repo.added, gitAdded, gitTooBig, gitBinary) + repo.modified = append(repo.modified, gitModified) + repo.removed = append(repo.removed, gitRemoved) + + idx.Index() + + assert.Equal(t, submit.indexed, 3) + assert.Equal(t, submit.removed, 1) + + assert.Equal(t, parentID+"_"+added.Path, submit.indexedID[0]) + assert.Equal(t, map[string]interface{}{"blob": added}, submit.indexedThing[0]) + + assert.Equal(t, parentID+"_"+modified.Path, submit.indexedID[1]) + assert.Equal(t, map[string]interface{}{"blob": modified}, submit.indexedThing[1]) + + assert.Equal(t, parentID+"_"+commit.SHA, submit.indexedID[2]) + assert.Equal(t, map[string]interface{}{"commit": commit}, submit.indexedThing[2]) + + assert.Equal(t, parentID+"_"+removed.Path, submit.removedID[0]) + + assert.Equal(t, submit.flushed, 1) +} + +func TestErrorIndexingSkipsRemainder(t *testing.T) { + idx, repo, submit := setupIndexer() + + gitOKFile := gitFile("ok", "") + + gitBreakingFile := gitFile("broken", "") + gitBreakingFile.Blob = readerFunc("", fmt.Errorf("Error")) + + repo.added = append(repo.added, gitBreakingFile, gitOKFile) + + err := idx.Index() + + assert.Error(t, err) + assert.Equal(t, submit.indexed, 0) + assert.Equal(t, submit.removed, 0) + assert.Equal(t, submit.flushed, 0) +} diff --git a/indexer/person.go b/indexer/person.go new file mode 100644 index 0000000000000000000000000000000000000000..eb75a836fd19ff4142f382fb14c5a794b84c1151 --- /dev/null +++ b/indexer/person.go @@ -0,0 +1,29 @@ +package indexer + +import ( + "time" + + "gitlab.com/gitlab-org/es-git-go/git" +) + +const ( + elasticTimeFormat = "20060102T150405-0700" +) + +type Person struct { + Name string `json:"name"` + Email string `json:"email"` + Time string `json:"time"` // %Y%m%dT%H%M%S%z +} + +func GenerateDate(t time.Time) string { + return t.Format(elasticTimeFormat) +} + +func BuildPerson(p git.Signature) *Person { + return &Person{ + Name: tryEncodeString(p.Name), + Email: tryEncodeString(p.Email), + Time: GenerateDate(p.When), + } +} diff --git a/integration_test.go b/integration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..d48fdee1fccbfcf316e9b4bbcc1dd66572834913 --- /dev/null +++ b/integration_test.go @@ -0,0 +1,206 @@ +package main_test + +import ( + "encoding/json" + "flag" + "fmt" + "os" + "os/exec" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/elastic" + "gitlab.com/gitlab-org/es-git-go/indexer" +) + +var ( + binary = flag.String("binary", "./bin/es-git-go", "Path to `es-git-go` binary for integration tests") + testRepo = flag.String("test-repo", "./tmp/gitlab-test.git", "Path to `gitlab-test` repository for integration tests") +) + +const ( + projectID = "667" + headSHA = "b83d6e391c22777fca1ed3012fce84f633d7fed0" +) + +func checkDeps(t *testing.T) { + if os.Getenv("ELASTIC_CONNECTION_INFO") == "" { + t.Log("ELASTIC_CONNECTION_INFO not set") + t.Skip() + } + + if testing.Short() { + t.Log("Test run with -short, skipping integration test") + t.Skip() + } + + if _, err := os.Stat(*binary); err != nil { + t.Log("No binary found at ", *binary) + t.Skip() + } + + if _, err := os.Stat(*testRepo); err != nil { + t.Log("No test repo found at ", *testRepo) + t.Skip() + } +} + +func buildIndex(t *testing.T) (*elastic.Client, func()) { + railsEnv := fmt.Sprintf("test-integration-%d", time.Now().Unix()) + os.Setenv("RAILS_ENV", railsEnv) + + client, err := elastic.FromEnv(projectID) + assert.NoError(t, err) + + assert.NoError(t, client.CreateIndex()) + + return client, func() { + client.DeleteIndex() + } +} + +func run(from, to string) error { + cmd := exec.Command(*binary, projectID, *testRepo) + cmd.Env = os.Environ() + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if from != "" { + cmd.Env = append(cmd.Env, "FROM_SHA="+from) + } + + if to != "" { + cmd.Env = append(cmd.Env, "TO_SHA="+to) + } + + return cmd.Run() +} + +func TestIndexingRemovesFiles(t *testing.T) { + checkDeps(t) + c, td := buildIndex(t) + defer td() + + // The commit before files/empty is removed - so it should be indexed + assert.NoError(t, run("", "19e2e9b4ef76b422ce1154af39a91323ccc57434")) + _, err := c.GetBlob("files/empty") + assert.NoError(t, err) + + // Now we expect it to have been removed + assert.NoError(t, run("19e2e9b4ef76b422ce1154af39a91323ccc57434", "08f22f255f082689c0d7d39d19205085311542bc")) + _, err = c.GetBlob("files/empty") + assert.Error(t, err) +} + +// Go source is defined to be UTF-8 encoded, so literals here are UTF-8 +func TestIndexingTranscodesToUTF8(t *testing.T) { + checkDeps(t) + c, td := buildIndex(t) + defer td() + + assert.NoError(t, run("", headSHA)) + + for _, tc := range []struct{ + path string + expected string + } { + {"encoding/iso8859.txt", "ç‹ž\n"}, // GB18030 + {"encoding/test.txt", "ã“ã‚Œã¯ãƒ†ã‚¹ãƒˆã§ã™ã€‚\nã“れもマージã—ã¦ä¸‹ã•ã„。\n\nAdd excel file.\nDelete excel file."}, // SHIFT_JIS + } { + + blob, err := c.GetBlob(tc.path) + assert.NoError(t, err) + + blobDoc := make(map[string]*indexer.Blob) + assert.NoError(t, json.Unmarshal(*blob.Source, &blobDoc)) + + assert.Equal(t, tc.expected, blobDoc["blob"].Content) + } +} + +func TestIndexingGitlabTest(t *testing.T) { + checkDeps(t) + c, td := buildIndex(t) + defer td() + + assert.NoError(t, run("", headSHA)) + + // Check the indexing of a commit + commit, err := c.GetCommit(headSHA) + assert.NoError(t, err) + assert.True(t, commit.Found) + assert.Equal(t, "repository", commit.Type) + assert.Equal(t, projectID+"_"+headSHA, commit.Id) + assert.Equal(t, projectID, commit.Routing) + assert.Equal(t, projectID, commit.Parent) + + doc := make(map[string]map[string]interface{}) + assert.NoError(t, json.Unmarshal(*commit.Source, &doc)) + + commitDoc, ok := doc["commit"] + assert.True(t, ok) + assert.Equal( + t, + map[string]interface{}{ + "type": "commit", + "sha": headSHA, + "author": map[string]interface{}{ + "email": "job@gitlab.com", + "name": "Job van der Voort", + "time": "20160927T143746+0000", + }, + "committer": map[string]interface{}{ + "email": "job@gitlab.com", + "name": "Job van der Voort", + "time": "20160927T143746+0000", + }, + "rid": projectID, + "message": "Merge branch 'branch-merged' into 'master'\r\n\r\nadds bar folder and branch-test text file to check Repository merged_to_root_ref method\r\n\r\n\r\n\r\nSee merge request !12", + }, + commitDoc, + ) + + // Check the indexing of a text blob + blob, err := c.GetBlob("README.md") + assert.NoError(t, err) + assert.True(t, blob.Found) + assert.Equal(t, "repository", blob.Type) + assert.Equal(t, projectID+"_README.md", blob.Id) + assert.Equal(t, projectID, blob.Routing) + assert.Equal(t, projectID, blob.Parent) + + doc = make(map[string]map[string]interface{}) + assert.NoError(t, json.Unmarshal(*blob.Source, &doc)) + + blobDoc, ok := doc["blob"] + assert.True(t, ok) + assert.Equal( + t, + map[string]interface{}{ + "type": "blob", + "language": "Markdown", + "path": "README.md", + "file_name": "README.md", + "oid": "faaf198af3a36dbf41961466703cc1d47c61d051", + "rid": projectID, + "commit_sha": headSHA, + "content": "testme\n======\n\nSample repo for testing gitlab features\n", + }, + blobDoc, + ) + + // Check that a binary blob isn't indexed + _, err = c.GetBlob("Gemfile.zip") + assert.Error(t, err) + + // Test that timezones are preserved + commit, err = c.GetCommit("498214de67004b1da3d820901307bed2a68a8ef6") + assert.NoError(t, err) + + cDoc := make(map[string]*indexer.Commit) + assert.NoError(t, json.Unmarshal(*commit.Source, &cDoc)) + assert.Equal(t, "20160921T161326+0100", cDoc["commit"].Author.Time) + assert.Equal(t, "20160921T161326+0100", cDoc["commit"].Committer.Time) +} diff --git a/linguist/LICENSE.md b/linguist/LICENSE.md new file mode 100644 index 0000000000000000000000000000000000000000..01446ee32e148d6f54dd4950ed2a8f4c5560d2d1 --- /dev/null +++ b/linguist/LICENSE.md @@ -0,0 +1,33 @@ +# Linguist + +The Ruby [github-linguist](https://github.com/github/linguist) Gem is a useful +collection of helpers for language detection. This directory contains +reimplementations of parts of the functionality of that gem. + +The file `languages.go` is programmatically generated from +[this file](https://github.com/github/linguist/blob/master/lib/linguist/languages.yml). + +The following license applies: + +Copyright (c) 2017 GitHub, Inc. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/linguist/generate_languages.go.rb b/linguist/generate_languages.go.rb new file mode 100644 index 0000000000000000000000000000000000000000..dea498ccaaa03474674c83a8c954b32e911576a3 --- /dev/null +++ b/linguist/generate_languages.go.rb @@ -0,0 +1,52 @@ +require 'net/http' +require 'yaml' + +def array(ary) + "[]string{#{ary.map(&:inspect).join(", ")}}" +end + +def bool(value, default) + value = default if value.nil? + (!!value).inspect +end + +def build_language(name, details) + out = [] + + out << ["Name", name.inspect] + out << ["Type", details['type'].inspect ] if details['type'] + out << ["Group", details['group'].inspect ] if details['group'] + out << ["Color", details['color'].inspect ] if details['color'] + out << ["Aliases", array(details['aliases']) ] if details['aliases'] + out << ["Extensions", array(details['extensions']) ] if details['extensions'] + out << ["Filenames", array(details['filenames']) ] if details['filenames'] + out << ["Interpreters", array(details['interpreters'])] if details['interpreters'] + out << ["TmScope", details['tm_scope'].inspect ] if details['tm_scope'] + out << ["AceMode", details['type'].inspect ] if details['ace_mode'] + out << ["LanguageID", details['language_id'] ] if details['language_id'] + + # Two strange booleans + out << ["Wrap", bool(details['wrap'], false) ] + out << ["Searchable", bool(details['searchable'], true) ] + + max_key = out.map {|k,v| k.size }.max + out = out.map do |k, v| + "\t\t\t#{k}:#{" " * (max_key - k.size)} #{v}," + end + + "\t\t#{name.inspect}: &Language{\n#{out.join("\n")}\n\t\t},\n" +end + +LANGUAGES_YML = URI.parse("https://raw.githubusercontent.com/github/linguist/v4.7.6/lib/linguist/languages.yml") + +languages = YAML.load(Net::HTTP.get(LANGUAGES_YML)) + +f = File.open("languages.go", "w") +f.puts "package linguist" +f.puts "" +f.puts "var (" +f.puts "\tLanguages = map[string]*Language{" +languages.each {|name, details| f.puts build_language(name, details) } +f.puts "\t}" +f.puts ")" +f.close diff --git a/linguist/language.go b/linguist/language.go new file mode 100644 index 0000000000000000000000000000000000000000..963c7de9b015c45786db3a3fdbbdf038eb8f80c2 --- /dev/null +++ b/linguist/language.go @@ -0,0 +1,95 @@ +package linguist + +import ( + "path" +) + +// There's no YAML support in the Go stdlib, so use ruby instead. +// This will create a file `languages.go`, containing the Languages variable +//go:generate ruby generate_languages.go.rb + +type Language struct { + Name string + Type string + Group string + Color string + Aliases []string + Extensions []string + Filenames []string + Interpreters []string + TmScope string + AceMode string + Wrap bool + Searchable bool +} + +var ( + languagesByExtension map[string][]*Language + languagesByFilename map[string][]*Language +) + +func init() { + languagesByExtension = make(map[string][]*Language) + for _, lang := range Languages { + for _, ext := range lang.Extensions { + languagesByExtension[ext] = append(languagesByExtension[ext], lang) + } + } + + languagesByFilename = make(map[string][]*Language) + for _, lang := range Languages { + for _, filename := range lang.Filenames { + languagesByFilename[filename] = append(languagesByFilename[filename], lang) + } + } +} + +// and returns only the languges present in both A and B +func and(a, b []*Language) []*Language { + var out []*Language + + for _, langA := range a { + for _, langB := range b { + if langA == langB { + out = append(out, langA) + } + } + } + + return out +} + +func DetectLanguageByFilename(filename string) []*Language { + return languagesByFilename[path.Base(filename)] +} + +func DetectLanguageByExtension(filename string) []*Language { + return languagesByExtension[path.Ext(filename)] +} + +func DetectLanguage(filename string, blob []byte) *Language { + // TODO: github-linguist uses a range of strategies not replicated here. + // It does the following: + // + // * modelines + // * shebangs + // * filename / extension (we have these) + // * heuristics + // * classifier + + byFilename := DetectLanguageByFilename(filename) + if len(byFilename) == 1 { + return byFilename[0] + } + + byExtension := DetectLanguageByExtension(filename) + if len(byFilename) > 1 { + byExtension = and(byFilename, byExtension) + } + + if len(byExtension) > 0 { + return byExtension[0] + } + + return nil +} diff --git a/linguist/language_test.go b/linguist/language_test.go new file mode 100644 index 0000000000000000000000000000000000000000..cd0b9104d2c1d5d562f9646525669f3c3c8b722a --- /dev/null +++ b/linguist/language_test.go @@ -0,0 +1,86 @@ +package linguist_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "gitlab.com/gitlab-org/es-git-go/linguist" +) + +func TestCommonLanguagesAreDetectedByExtension(t *testing.T) { + type tc struct { + filename string + name string + } + + for _, tc := range []struct { + file string + lang string + }{ + {"foo.go", "Go"}, + {".go", "Go"}, + {"foo.go.rb", "Ruby"}, + {"foo.rb", "Ruby"}, + {"foo.c", "C"}, + {"foo.cpp", "C++"}, + {"/bar/foo.ini", "INI"}, + {"bar/foo.ini", "INI"}, + {"c:/foo.ini", "INI"}, + {`c:\foo.ini`, "INI"}, + {"foo.md", "Markdown"}, // Multiple possible languages + } { + langs := linguist.DetectLanguageByExtension(tc.file) + assert.Equal(t, 1, len(langs)) + assert.Equal(t, tc.lang, langs[0].Name) + + lang := linguist.DetectLanguage(tc.file, []byte{}) + assert.NotNil(t, lang) + assert.Equal(t, tc.lang, lang.Name) + + } +} + +func TestImaginaryLanguageIsntRecognised(t *testing.T) { + lang := linguist.DetectLanguageByFilename("foo.absolutely-nobody-will-make-this-extension") + assert.Nil(t, lang) +} + +// This test checks the content of languages.go against expectations chosen to +// validate the go:generate script +func TestAttributesAreCopiedCorrectly(t *testing.T) { + ada := linguist.Languages["Ada"] + assert.NotNil(t, ada) + + cmake := linguist.Languages["CMake"] + assert.NotNil(t, cmake) + + gettext := linguist.Languages["Gettext Catalog"] + assert.NotNil(t, gettext) + + golang := linguist.Languages["Go"] + assert.NotNil(t, golang) + + json := linguist.Languages["JSON"] + assert.NotNil(t, json) + + markdown := linguist.Languages["Markdown"] + assert.NotNil(t, markdown) + + ruby := linguist.Languages["Ruby"] + assert.NotNil(t, ruby) + + assert.Equal(t, "Go", golang.Name) + assert.Equal(t, "programming", golang.Type) + assert.Equal(t, "JavaScript", json.Group) + assert.Equal(t, "#375eab", golang.Color) + assert.Equal(t, []string{"ada95", "ada2005"}, ada.Aliases) + assert.Equal(t, []string{".go"}, golang.Extensions) + assert.Equal(t, []string{"CMakeLists.txt"}, cmake.Filenames) + assert.Equal(t, []string{"ruby", "macruby", "rake", "jruby", "rbx"}, ruby.Interpreters) + assert.Equal(t, "source.gfm", markdown.TmScope) + assert.Equal(t, "programming", golang.AceMode) + assert.Equal(t, false, gettext.Searchable) + assert.Equal(t, true, markdown.Wrap) + +} diff --git a/linguist/languages.go b/linguist/languages.go new file mode 100644 index 0000000000000000000000000000000000000000..4799f974e57051d099557d54705c9de4994f17cb --- /dev/null +++ b/linguist/languages.go @@ -0,0 +1,3868 @@ +package linguist + +var ( + Languages = map[string]*Language{ + "ABAP": &Language{ + Name: "ABAP", + Type: "programming", + Color: "#E8274B", + Extensions: []string{".abap"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AGS Script": &Language{ + Name: "AGS Script", + Type: "programming", + Color: "#B9D9FF", + Aliases: []string{"ags"}, + Extensions: []string{".asc", ".ash"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AMPL": &Language{ + Name: "AMPL", + Type: "programming", + Color: "#E6EFBB", + Extensions: []string{".ampl", ".mod"}, + TmScope: "source.ampl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ANTLR": &Language{ + Name: "ANTLR", + Type: "programming", + Color: "#9DC3FF", + Extensions: []string{".g4"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "API Blueprint": &Language{ + Name: "API Blueprint", + Type: "markup", + Color: "#2ACCA8", + Extensions: []string{".apib"}, + TmScope: "text.html.markdown.source.gfm.apib", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "APL": &Language{ + Name: "APL", + Type: "programming", + Color: "#5A8164", + Extensions: []string{".apl", ".dyalog"}, + TmScope: "source.apl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ASP": &Language{ + Name: "ASP", + Type: "programming", + Color: "#6a40fd", + Aliases: []string{"aspx", "aspx-vb"}, + Extensions: []string{".asp", ".asax", ".ascx", ".ashx", ".asmx", ".aspx", ".axd"}, + TmScope: "text.html.asp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ATS": &Language{ + Name: "ATS", + Type: "programming", + Color: "#1ac620", + Aliases: []string{"ats2"}, + Extensions: []string{".dats", ".hats", ".sats"}, + TmScope: "source.ats", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ActionScript": &Language{ + Name: "ActionScript", + Type: "programming", + Color: "#882B0F", + Aliases: []string{"actionscript 3", "actionscript3", "as3"}, + Extensions: []string{".as"}, + TmScope: "source.actionscript.3", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ada": &Language{ + Name: "Ada", + Type: "programming", + Color: "#02f88c", + Aliases: []string{"ada95", "ada2005"}, + Extensions: []string{".adb", ".ada", ".ads"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Agda": &Language{ + Name: "Agda", + Type: "programming", + Color: "#315665", + Extensions: []string{".agda"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Alloy": &Language{ + Name: "Alloy", + Type: "programming", + Color: "#64C800", + Extensions: []string{".als"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ant Build System": &Language{ + Name: "Ant Build System", + Type: "data", + Filenames: []string{"ant.xml", "build.xml"}, + TmScope: "text.xml.ant", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "ApacheConf": &Language{ + Name: "ApacheConf", + Type: "markup", + Aliases: []string{"aconf", "apache"}, + Extensions: []string{".apacheconf", ".vhost"}, + TmScope: "source.apache-config", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Apex": &Language{ + Name: "Apex", + Type: "programming", + Extensions: []string{".cls"}, + TmScope: "source.java", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AppleScript": &Language{ + Name: "AppleScript", + Type: "programming", + Color: "#101F1F", + Aliases: []string{"osascript"}, + Extensions: []string{".applescript", ".scpt"}, + Interpreters: []string{"osascript"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Arc": &Language{ + Name: "Arc", + Type: "programming", + Color: "#aa2afe", + Extensions: []string{".arc"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Arduino": &Language{ + Name: "Arduino", + Type: "programming", + Color: "#bd79d1", + Extensions: []string{".ino"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AsciiDoc": &Language{ + Name: "AsciiDoc", + Type: "prose", + Extensions: []string{".asciidoc", ".adoc", ".asc"}, + TmScope: "text.html.asciidoc", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "AspectJ": &Language{ + Name: "AspectJ", + Type: "programming", + Color: "#a957b0", + Extensions: []string{".aj"}, + TmScope: "source.aspectj", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Assembly": &Language{ + Name: "Assembly", + Type: "programming", + Color: "#6E4C13", + Aliases: []string{"nasm"}, + Extensions: []string{".asm", ".a51", ".inc", ".nasm"}, + TmScope: "source.asm.x86", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Augeas": &Language{ + Name: "Augeas", + Type: "programming", + Extensions: []string{".aug"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AutoHotkey": &Language{ + Name: "AutoHotkey", + Type: "programming", + Color: "#6594b9", + Aliases: []string{"ahk"}, + Extensions: []string{".ahk", ".ahkl"}, + TmScope: "source.ahk", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "AutoIt": &Language{ + Name: "AutoIt", + Type: "programming", + Color: "#1C3552", + Aliases: []string{"au3", "AutoIt3", "AutoItScript"}, + Extensions: []string{".au3"}, + TmScope: "source.autoit.3", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Awk": &Language{ + Name: "Awk", + Type: "programming", + Extensions: []string{".awk", ".auk", ".gawk", ".mawk", ".nawk"}, + Interpreters: []string{"awk", "gawk", "mawk", "nawk"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Batchfile": &Language{ + Name: "Batchfile", + Type: "programming", + Color: "#C1F12E", + Aliases: []string{"bat", "batch", "dosbatch", "winbatch"}, + Extensions: []string{".bat", ".cmd"}, + TmScope: "source.dosbatch", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Befunge": &Language{ + Name: "Befunge", + Type: "programming", + Extensions: []string{".befunge"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Bison": &Language{ + Name: "Bison", + Type: "programming", + Group: "Yacc", + Color: "#6A463F", + Extensions: []string{".bison"}, + TmScope: "source.bison", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "BitBake": &Language{ + Name: "BitBake", + Type: "programming", + Extensions: []string{".bb"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "BlitzBasic": &Language{ + Name: "BlitzBasic", + Type: "programming", + Aliases: []string{"b3d", "blitz3d", "blitzplus", "bplus"}, + Extensions: []string{".bb", ".decls"}, + TmScope: "source.blitzmax", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "BlitzMax": &Language{ + Name: "BlitzMax", + Type: "programming", + Color: "#cd6400", + Aliases: []string{"bmax"}, + Extensions: []string{".bmx"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Bluespec": &Language{ + Name: "Bluespec", + Type: "programming", + Extensions: []string{".bsv"}, + TmScope: "source.bsv", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Boo": &Language{ + Name: "Boo", + Type: "programming", + Color: "#d4bec1", + Extensions: []string{".boo"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Brainfuck": &Language{ + Name: "Brainfuck", + Type: "programming", + Color: "#2F2530", + Extensions: []string{".b", ".bf"}, + TmScope: "source.bf", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Brightscript": &Language{ + Name: "Brightscript", + Type: "programming", + Extensions: []string{".brs"}, + TmScope: "source.brightscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Bro": &Language{ + Name: "Bro", + Type: "programming", + Extensions: []string{".bro"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "C": &Language{ + Name: "C", + Type: "programming", + Color: "#555555", + Extensions: []string{".c", ".cats", ".h", ".idc", ".w"}, + Interpreters: []string{"tcc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "C#": &Language{ + Name: "C#", + Type: "programming", + Color: "#178600", + Aliases: []string{"csharp"}, + Extensions: []string{".cs", ".cake", ".cshtml", ".csx"}, + TmScope: "source.cs", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "C++": &Language{ + Name: "C++", + Type: "programming", + Color: "#f34b7d", + Aliases: []string{"cpp"}, + Extensions: []string{".cpp", ".c++", ".cc", ".cp", ".cxx", ".h", ".h++", ".hh", ".hpp", ".hxx", ".inc", ".inl", ".ipp", ".tcc", ".tpp"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "C-ObjDump": &Language{ + Name: "C-ObjDump", + Type: "data", + Extensions: []string{".c-objdump"}, + TmScope: "objdump.x86asm", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "C2hs Haskell": &Language{ + Name: "C2hs Haskell", + Type: "programming", + Group: "Haskell", + Aliases: []string{"c2hs"}, + Extensions: []string{".chs"}, + TmScope: "source.haskell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "CLIPS": &Language{ + Name: "CLIPS", + Type: "programming", + Extensions: []string{".clp"}, + TmScope: "source.clips", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "CMake": &Language{ + Name: "CMake", + Type: "programming", + Extensions: []string{".cmake", ".cmake.in"}, + Filenames: []string{"CMakeLists.txt"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "COBOL": &Language{ + Name: "COBOL", + Type: "programming", + Extensions: []string{".cob", ".cbl", ".ccp", ".cobol", ".cpy"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "CSS": &Language{ + Name: "CSS", + Type: "markup", + Color: "#563d7c", + Extensions: []string{".css"}, + TmScope: "source.css", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "CSV": &Language{ + Name: "CSV", + Type: "data", + Extensions: []string{".csv"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Cap'n Proto": &Language{ + Name: "Cap'n Proto", + Type: "programming", + Extensions: []string{".capnp"}, + TmScope: "source.capnp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "CartoCSS": &Language{ + Name: "CartoCSS", + Type: "programming", + Aliases: []string{"Carto"}, + Extensions: []string{".mss"}, + TmScope: "source.css.mss", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ceylon": &Language{ + Name: "Ceylon", + Type: "programming", + Extensions: []string{".ceylon"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Chapel": &Language{ + Name: "Chapel", + Type: "programming", + Color: "#8dc63f", + Aliases: []string{"chpl"}, + Extensions: []string{".chpl"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Charity": &Language{ + Name: "Charity", + Type: "programming", + Extensions: []string{".ch"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ChucK": &Language{ + Name: "ChucK", + Type: "programming", + Extensions: []string{".ck"}, + TmScope: "source.java", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cirru": &Language{ + Name: "Cirru", + Type: "programming", + Color: "#ccccff", + Extensions: []string{".cirru"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Clarion": &Language{ + Name: "Clarion", + Type: "programming", + Color: "#db901e", + Extensions: []string{".clw"}, + TmScope: "source.clarion", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Clean": &Language{ + Name: "Clean", + Type: "programming", + Color: "#3F85AF", + Extensions: []string{".icl", ".dcl"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Click": &Language{ + Name: "Click", + Type: "programming", + Color: "#E4E6F3", + Extensions: []string{".click"}, + TmScope: "source.click", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Clojure": &Language{ + Name: "Clojure", + Type: "programming", + Color: "#db5855", + Extensions: []string{".clj", ".boot", ".cl2", ".cljc", ".cljs", ".cljs.hl", ".cljscm", ".cljx", ".hic"}, + Filenames: []string{"riemann.config"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "CoffeeScript": &Language{ + Name: "CoffeeScript", + Type: "programming", + Color: "#244776", + Aliases: []string{"coffee", "coffee-script"}, + Extensions: []string{".coffee", "._coffee", ".cake", ".cjsx", ".cson", ".iced"}, + Filenames: []string{"Cakefile"}, + Interpreters: []string{"coffee"}, + TmScope: "source.coffee", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ColdFusion": &Language{ + Name: "ColdFusion", + Type: "programming", + Group: "ColdFusion", + Color: "#ed2cd6", + Aliases: []string{"cfm", "cfml", "coldfusion html"}, + Extensions: []string{".cfm", ".cfml"}, + TmScope: "text.html.cfm", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ColdFusion CFC": &Language{ + Name: "ColdFusion CFC", + Type: "programming", + Group: "ColdFusion", + Color: "#ed2cd6", + Aliases: []string{"cfc"}, + Extensions: []string{".cfc"}, + TmScope: "source.cfscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Common Lisp": &Language{ + Name: "Common Lisp", + Type: "programming", + Color: "#3fb68b", + Aliases: []string{"lisp"}, + Extensions: []string{".lisp", ".asd", ".cl", ".l", ".lsp", ".ny", ".podsl", ".sexp"}, + Interpreters: []string{"lisp", "sbcl", "ccl", "clisp", "ecl"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Component Pascal": &Language{ + Name: "Component Pascal", + Type: "programming", + Color: "#B0CE4E", + Aliases: []string{"delphi", "objectpascal"}, + Extensions: []string{".cp", ".cps"}, + TmScope: "source.pascal", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cool": &Language{ + Name: "Cool", + Type: "programming", + Extensions: []string{".cl"}, + TmScope: "source.cool", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Coq": &Language{ + Name: "Coq", + Type: "programming", + Extensions: []string{".coq", ".v"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cpp-ObjDump": &Language{ + Name: "Cpp-ObjDump", + Type: "data", + Aliases: []string{"c++-objdumb"}, + Extensions: []string{".cppobjdump", ".c++-objdump", ".c++objdump", ".cpp-objdump", ".cxx-objdump"}, + TmScope: "objdump.x86asm", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Creole": &Language{ + Name: "Creole", + Type: "prose", + Extensions: []string{".creole"}, + TmScope: "text.html.creole", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Crystal": &Language{ + Name: "Crystal", + Type: "programming", + Color: "#776791", + Extensions: []string{".cr"}, + Interpreters: []string{"crystal"}, + TmScope: "source.crystal", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cucumber": &Language{ + Name: "Cucumber", + Type: "programming", + Color: "#5B2063", + Aliases: []string{"gherkin"}, + Extensions: []string{".feature"}, + TmScope: "text.gherkin.feature", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cuda": &Language{ + Name: "Cuda", + Type: "programming", + Color: "#3A4E3A", + Extensions: []string{".cu", ".cuh"}, + TmScope: "source.cuda-c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cycript": &Language{ + Name: "Cycript", + Type: "programming", + Extensions: []string{".cy"}, + TmScope: "source.js", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Cython": &Language{ + Name: "Cython", + Type: "programming", + Group: "Python", + Aliases: []string{"pyrex"}, + Extensions: []string{".pyx", ".pxd", ".pxi"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "D": &Language{ + Name: "D", + Type: "programming", + Color: "#ba595e", + Extensions: []string{".d", ".di"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "D-ObjDump": &Language{ + Name: "D-ObjDump", + Type: "data", + Extensions: []string{".d-objdump"}, + TmScope: "objdump.x86asm", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "DIGITAL Command Language": &Language{ + Name: "DIGITAL Command Language", + Type: "programming", + Aliases: []string{"dcl"}, + Extensions: []string{".com"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "DM": &Language{ + Name: "DM", + Type: "programming", + Color: "#447265", + Aliases: []string{"byond"}, + Extensions: []string{".dm"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "DNS Zone": &Language{ + Name: "DNS Zone", + Type: "data", + Extensions: []string{".zone", ".arpa"}, + TmScope: "text.zone_file", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "DTrace": &Language{ + Name: "DTrace", + Type: "programming", + Aliases: []string{"dtrace-script"}, + Extensions: []string{".d"}, + Interpreters: []string{"dtrace"}, + TmScope: "source.c", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Darcs Patch": &Language{ + Name: "Darcs Patch", + Type: "data", + Aliases: []string{"dpatch"}, + Extensions: []string{".darcspatch", ".dpatch"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Dart": &Language{ + Name: "Dart", + Type: "programming", + Color: "#00B4AB", + Extensions: []string{".dart"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Diff": &Language{ + Name: "Diff", + Type: "data", + Aliases: []string{"udiff"}, + Extensions: []string{".diff", ".patch"}, + TmScope: "source.diff", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Dockerfile": &Language{ + Name: "Dockerfile", + Type: "data", + Extensions: []string{".dockerfile"}, + Filenames: []string{"Dockerfile"}, + TmScope: "source.dockerfile", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Dogescript": &Language{ + Name: "Dogescript", + Type: "programming", + Color: "#cca760", + Extensions: []string{".djs"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Dylan": &Language{ + Name: "Dylan", + Type: "programming", + Color: "#6c616e", + Extensions: []string{".dylan", ".dyl", ".intr", ".lid"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "E": &Language{ + Name: "E", + Type: "programming", + Color: "#ccce35", + Extensions: []string{".E"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ECL": &Language{ + Name: "ECL", + Type: "programming", + Color: "#8a1267", + Extensions: []string{".ecl", ".eclxml"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ECLiPSe": &Language{ + Name: "ECLiPSe", + Type: "programming", + Group: "prolog", + Extensions: []string{".ecl"}, + TmScope: "source.prolog.eclipse", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Eagle": &Language{ + Name: "Eagle", + Type: "markup", + Color: "#814C05", + Extensions: []string{".sch", ".brd"}, + TmScope: "text.xml", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Ecere Projects": &Language{ + Name: "Ecere Projects", + Type: "data", + Group: "JavaScript", + Extensions: []string{".epj"}, + TmScope: "source.json", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Eiffel": &Language{ + Name: "Eiffel", + Type: "programming", + Color: "#946d57", + Extensions: []string{".e"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Elixir": &Language{ + Name: "Elixir", + Type: "programming", + Color: "#6e4a7e", + Extensions: []string{".ex", ".exs"}, + Filenames: []string{"mix.lock"}, + Interpreters: []string{"elixir"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Elm": &Language{ + Name: "Elm", + Type: "programming", + Color: "#60B5CC", + Extensions: []string{".elm"}, + TmScope: "source.elm", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Emacs Lisp": &Language{ + Name: "Emacs Lisp", + Type: "programming", + Color: "#c065db", + Aliases: []string{"elisp", "emacs"}, + Extensions: []string{".el", ".emacs", ".emacs.desktop"}, + Filenames: []string{".emacs", ".emacs.desktop"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "EmberScript": &Language{ + Name: "EmberScript", + Type: "programming", + Color: "#FFF4F3", + Extensions: []string{".em", ".emberscript"}, + TmScope: "source.coffee", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Erlang": &Language{ + Name: "Erlang", + Type: "programming", + Color: "#B83998", + Extensions: []string{".erl", ".es", ".escript", ".hrl", ".xrl", ".yrl"}, + Filenames: []string{"rebar.config", "rebar.config.lock", "rebar.lock"}, + Interpreters: []string{"escript"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "F#": &Language{ + Name: "F#", + Type: "programming", + Color: "#b845fc", + Aliases: []string{"fsharp"}, + Extensions: []string{".fs", ".fsi", ".fsx"}, + TmScope: "source.fsharp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "FLUX": &Language{ + Name: "FLUX", + Type: "programming", + Color: "#88ccff", + Extensions: []string{".fx", ".flux"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "FORTRAN": &Language{ + Name: "FORTRAN", + Type: "programming", + Color: "#4d41b1", + Extensions: []string{".f90", ".f", ".f03", ".f08", ".f77", ".f95", ".for", ".fpp"}, + TmScope: "source.fortran.modern", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Factor": &Language{ + Name: "Factor", + Type: "programming", + Color: "#636746", + Extensions: []string{".factor"}, + Filenames: []string{".factor-boot-rc", ".factor-rc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Fancy": &Language{ + Name: "Fancy", + Type: "programming", + Color: "#7b9db4", + Extensions: []string{".fy", ".fancypack"}, + Filenames: []string{"Fakefile"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Fantom": &Language{ + Name: "Fantom", + Type: "programming", + Color: "#dbded5", + Extensions: []string{".fan"}, + TmScope: "source.fan", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Filterscript": &Language{ + Name: "Filterscript", + Type: "programming", + Group: "RenderScript", + Extensions: []string{".fs"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Formatted": &Language{ + Name: "Formatted", + Type: "data", + Extensions: []string{".for", ".eam.fs"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Forth": &Language{ + Name: "Forth", + Type: "programming", + Color: "#341708", + Extensions: []string{".fth", ".4th", ".f", ".for", ".forth", ".fr", ".frt", ".fs"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "FreeMarker": &Language{ + Name: "FreeMarker", + Type: "programming", + Color: "#0050b2", + Aliases: []string{"ftl"}, + Extensions: []string{".ftl"}, + TmScope: "text.html.ftl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Frege": &Language{ + Name: "Frege", + Type: "programming", + Color: "#00cafe", + Extensions: []string{".fr"}, + TmScope: "source.haskell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "G-code": &Language{ + Name: "G-code", + Type: "data", + Extensions: []string{".g", ".gco", ".gcode"}, + TmScope: "source.gcode", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "GAMS": &Language{ + Name: "GAMS", + Type: "programming", + Extensions: []string{".gms"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "GAP": &Language{ + Name: "GAP", + Type: "programming", + Extensions: []string{".g", ".gap", ".gd", ".gi", ".tst"}, + TmScope: "source.gap", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "GAS": &Language{ + Name: "GAS", + Type: "programming", + Group: "Assembly", + Extensions: []string{".s", ".ms"}, + TmScope: "source.asm.x86", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "GDScript": &Language{ + Name: "GDScript", + Type: "programming", + Extensions: []string{".gd"}, + TmScope: "source.gdscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "GLSL": &Language{ + Name: "GLSL", + Type: "programming", + Extensions: []string{".glsl", ".fp", ".frag", ".frg", ".fs", ".fshader", ".geo", ".geom", ".glslv", ".gshader", ".shader", ".vert", ".vrx", ".vshader"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Game Maker Language": &Language{ + Name: "Game Maker Language", + Type: "programming", + Color: "#8fb200", + Extensions: []string{".gml"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Genshi": &Language{ + Name: "Genshi", + Type: "programming", + Aliases: []string{"xml+genshi", "xml+kid"}, + Extensions: []string{".kid"}, + TmScope: "text.xml.genshi", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gentoo Ebuild": &Language{ + Name: "Gentoo Ebuild", + Type: "programming", + Group: "Shell", + Extensions: []string{".ebuild"}, + TmScope: "source.shell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gentoo Eclass": &Language{ + Name: "Gentoo Eclass", + Type: "programming", + Group: "Shell", + Extensions: []string{".eclass"}, + TmScope: "source.shell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gettext Catalog": &Language{ + Name: "Gettext Catalog", + Type: "prose", + Aliases: []string{"pot"}, + Extensions: []string{".po", ".pot"}, + TmScope: "source.po", + AceMode: "prose", + Wrap: false, + Searchable: false, + }, + "Glyph": &Language{ + Name: "Glyph", + Type: "programming", + Color: "#e4cc98", + Extensions: []string{".glf"}, + TmScope: "source.tcl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gnuplot": &Language{ + Name: "Gnuplot", + Type: "programming", + Color: "#f0a9f0", + Extensions: []string{".gp", ".gnu", ".gnuplot", ".plot", ".plt"}, + Interpreters: []string{"gnuplot"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Go": &Language{ + Name: "Go", + Type: "programming", + Color: "#375eab", + Extensions: []string{".go"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Golo": &Language{ + Name: "Golo", + Type: "programming", + Color: "#88562A", + Extensions: []string{".golo"}, + TmScope: "source.golo", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gosu": &Language{ + Name: "Gosu", + Type: "programming", + Color: "#82937f", + Extensions: []string{".gs", ".gst", ".gsx", ".vark"}, + TmScope: "source.gosu.2", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Grace": &Language{ + Name: "Grace", + Type: "programming", + Extensions: []string{".grace"}, + TmScope: "source.grace", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Gradle": &Language{ + Name: "Gradle", + Type: "data", + Extensions: []string{".gradle"}, + TmScope: "source.groovy.gradle", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Grammatical Framework": &Language{ + Name: "Grammatical Framework", + Type: "programming", + Color: "#79aa7a", + Aliases: []string{"gf"}, + Extensions: []string{".gf"}, + TmScope: "source.haskell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Graph Modeling Language": &Language{ + Name: "Graph Modeling Language", + Type: "data", + Extensions: []string{".gml"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Graphviz (DOT)": &Language{ + Name: "Graphviz (DOT)", + Type: "data", + Extensions: []string{".dot", ".gv"}, + TmScope: "source.dot", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Groff": &Language{ + Name: "Groff", + Type: "markup", + Aliases: []string{"nroff"}, + Extensions: []string{".man", ".1", ".1in", ".1m", ".1x", ".2", ".3", ".3in", ".3m", ".3qt", ".3x", ".4", ".5", ".6", ".7", ".8", ".9", ".l", ".ms", ".n", ".rno", ".roff"}, + TmScope: "text.groff", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Groovy": &Language{ + Name: "Groovy", + Type: "programming", + Color: "#e69f56", + Extensions: []string{".groovy", ".grt", ".gtpl", ".gvy"}, + Interpreters: []string{"groovy"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Groovy Server Pages": &Language{ + Name: "Groovy Server Pages", + Type: "programming", + Group: "Groovy", + Aliases: []string{"gsp", "java server page"}, + Extensions: []string{".gsp"}, + TmScope: "text.html.jsp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "HCL": &Language{ + Name: "HCL", + Type: "programming", + Extensions: []string{".hcl", ".tf"}, + TmScope: "source.ruby", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "HTML": &Language{ + Name: "HTML", + Type: "markup", + Color: "#e44b23", + Aliases: []string{"xhtml"}, + Extensions: []string{".html", ".htm", ".html.hl", ".inc", ".st", ".xht", ".xhtml"}, + TmScope: "text.html.basic", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "HTML+Django": &Language{ + Name: "HTML+Django", + Type: "markup", + Group: "HTML", + Aliases: []string{"django", "html+django/jinja", "html+jinja", "htmldjango"}, + Extensions: []string{".mustache", ".jinja"}, + TmScope: "text.html.django", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "HTML+EEX": &Language{ + Name: "HTML+EEX", + Type: "markup", + Group: "HTML", + Aliases: []string{"eex"}, + Extensions: []string{".eex"}, + TmScope: "text.html.elixir", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "HTML+ERB": &Language{ + Name: "HTML+ERB", + Type: "markup", + Group: "HTML", + Aliases: []string{"erb"}, + Extensions: []string{".erb", ".erb.deface"}, + TmScope: "text.html.erb", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "HTML+PHP": &Language{ + Name: "HTML+PHP", + Type: "markup", + Group: "HTML", + Extensions: []string{".phtml"}, + TmScope: "text.html.php", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "HTTP": &Language{ + Name: "HTTP", + Type: "data", + Extensions: []string{".http"}, + TmScope: "source.httpspec", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Hack": &Language{ + Name: "Hack", + Type: "programming", + Color: "#878787", + Extensions: []string{".hh", ".php"}, + TmScope: "text.html.php", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Haml": &Language{ + Name: "Haml", + Type: "markup", + Group: "HTML", + Color: "#ECE2A9", + Extensions: []string{".haml", ".haml.deface"}, + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Handlebars": &Language{ + Name: "Handlebars", + Type: "markup", + Group: "HTML", + Color: "#01a9d6", + Aliases: []string{"hbs", "htmlbars"}, + Extensions: []string{".handlebars", ".hbs"}, + TmScope: "text.html.handlebars", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Harbour": &Language{ + Name: "Harbour", + Type: "programming", + Color: "#0e60e3", + Extensions: []string{".hb"}, + TmScope: "source.harbour", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Haskell": &Language{ + Name: "Haskell", + Type: "programming", + Color: "#29b544", + Extensions: []string{".hs", ".hsc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Haxe": &Language{ + Name: "Haxe", + Type: "programming", + Color: "#df7900", + Extensions: []string{".hx", ".hxsl"}, + TmScope: "source.haxe.2", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Hy": &Language{ + Name: "Hy", + Type: "programming", + Color: "#7790B2", + Aliases: []string{"hylang"}, + Extensions: []string{".hy"}, + TmScope: "source.hy", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "HyPhy": &Language{ + Name: "HyPhy", + Type: "programming", + Extensions: []string{".bf"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "IDL": &Language{ + Name: "IDL", + Type: "programming", + Color: "#a3522f", + Extensions: []string{".pro", ".dlm"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "IGOR Pro": &Language{ + Name: "IGOR Pro", + Type: "programming", + Aliases: []string{"igor", "igorpro"}, + Extensions: []string{".ipf"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "INI": &Language{ + Name: "INI", + Type: "data", + Aliases: []string{"dosini"}, + Extensions: []string{".ini", ".cfg", ".prefs", ".pro", ".properties"}, + TmScope: "source.ini", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "IRC log": &Language{ + Name: "IRC log", + Type: "data", + Aliases: []string{"irc", "irc logs"}, + Extensions: []string{".irclog", ".weechatlog"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Idris": &Language{ + Name: "Idris", + Type: "programming", + Extensions: []string{".idr", ".lidr"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Inform 7": &Language{ + Name: "Inform 7", + Type: "programming", + Aliases: []string{"i7", "inform7"}, + Extensions: []string{".ni", ".i7x"}, + TmScope: "source.inform7", + AceMode: "programming", + Wrap: true, + Searchable: true, + }, + "Inno Setup": &Language{ + Name: "Inno Setup", + Type: "programming", + Extensions: []string{".iss"}, + TmScope: "source.inno", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Io": &Language{ + Name: "Io", + Type: "programming", + Color: "#a9188d", + Extensions: []string{".io"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ioke": &Language{ + Name: "Ioke", + Type: "programming", + Color: "#078193", + Extensions: []string{".ik"}, + Interpreters: []string{"ioke"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Isabelle": &Language{ + Name: "Isabelle", + Type: "programming", + Color: "#FEFE00", + Extensions: []string{".thy"}, + TmScope: "source.isabelle.theory", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Isabelle ROOT": &Language{ + Name: "Isabelle ROOT", + Type: "programming", + Group: "Isabelle", + Filenames: []string{"ROOT"}, + TmScope: "source.isabelle.root", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "J": &Language{ + Name: "J", + Type: "programming", + Color: "#9EEDFF", + Extensions: []string{".ijs"}, + TmScope: "source.j", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "JFlex": &Language{ + Name: "JFlex", + Type: "programming", + Group: "Lex", + Color: "#DBCA00", + Extensions: []string{".flex", ".jflex"}, + TmScope: "source.jflex", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "JSON": &Language{ + Name: "JSON", + Type: "data", + Group: "JavaScript", + Extensions: []string{".json", ".geojson", ".lock", ".topojson"}, + Filenames: []string{".jshintrc", "composer.lock"}, + TmScope: "source.json", + AceMode: "data", + Wrap: false, + Searchable: false, + }, + "JSON5": &Language{ + Name: "JSON5", + Type: "data", + Extensions: []string{".json5"}, + TmScope: "source.js", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "JSONLD": &Language{ + Name: "JSONLD", + Type: "data", + Group: "JavaScript", + Extensions: []string{".jsonld"}, + TmScope: "source.js", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "JSONiq": &Language{ + Name: "JSONiq", + Type: "programming", + Color: "#40d47e", + Extensions: []string{".jq"}, + TmScope: "source.jq", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "JSX": &Language{ + Name: "JSX", + Type: "programming", + Group: "JavaScript", + Extensions: []string{".jsx"}, + TmScope: "source.js.jsx", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Jade": &Language{ + Name: "Jade", + Type: "markup", + Group: "HTML", + Extensions: []string{".jade"}, + TmScope: "text.jade", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Jasmin": &Language{ + Name: "Jasmin", + Type: "programming", + Extensions: []string{".j"}, + TmScope: "source.jasmin", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Java": &Language{ + Name: "Java", + Type: "programming", + Color: "#b07219", + Extensions: []string{".java"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Java Server Pages": &Language{ + Name: "Java Server Pages", + Type: "programming", + Group: "Java", + Aliases: []string{"jsp"}, + Extensions: []string{".jsp"}, + TmScope: "text.html.jsp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "JavaScript": &Language{ + Name: "JavaScript", + Type: "programming", + Color: "#f1e05a", + Aliases: []string{"js", "node"}, + Extensions: []string{".js", "._js", ".bones", ".es6", ".frag", ".gs", ".jake", ".jsb", ".jscad", ".jsfl", ".jsm", ".jss", ".njs", ".pac", ".sjs", ".ssjs", ".sublime-build", ".sublime-commands", ".sublime-completions", ".sublime-keymap", ".sublime-macro", ".sublime-menu", ".sublime-mousemap", ".sublime-project", ".sublime-settings", ".sublime-theme", ".sublime-workspace", ".sublime_metrics", ".sublime_session", ".xsjs", ".xsjslib"}, + Filenames: []string{"Jakefile"}, + Interpreters: []string{"node"}, + TmScope: "source.js", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Julia": &Language{ + Name: "Julia", + Type: "programming", + Color: "#a270ba", + Extensions: []string{".jl"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Jupyter Notebook": &Language{ + Name: "Jupyter Notebook", + Type: "markup", + Color: "#DA5B0B", + Aliases: []string{"IPython Notebook"}, + Extensions: []string{".ipynb"}, + Filenames: []string{"Notebook"}, + TmScope: "source.json", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "KRL": &Language{ + Name: "KRL", + Type: "programming", + Color: "#28431f", + Extensions: []string{".krl"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "KiCad": &Language{ + Name: "KiCad", + Type: "programming", + Extensions: []string{".sch", ".brd", ".kicad_pcb"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Kit": &Language{ + Name: "Kit", + Type: "markup", + Extensions: []string{".kit"}, + TmScope: "text.html.basic", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Kotlin": &Language{ + Name: "Kotlin", + Type: "programming", + Color: "#F18E33", + Extensions: []string{".kt", ".ktm", ".kts"}, + TmScope: "source.Kotlin", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LFE": &Language{ + Name: "LFE", + Type: "programming", + Group: "Erlang", + Color: "#004200", + Extensions: []string{".lfe"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LLVM": &Language{ + Name: "LLVM", + Type: "programming", + Color: "#185619", + Extensions: []string{".ll"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LOLCODE": &Language{ + Name: "LOLCODE", + Type: "programming", + Color: "#cc9900", + Extensions: []string{".lol"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LSL": &Language{ + Name: "LSL", + Type: "programming", + Color: "#3d9970", + Extensions: []string{".lsl", ".lslp"}, + Interpreters: []string{"lsl"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LabVIEW": &Language{ + Name: "LabVIEW", + Type: "programming", + Extensions: []string{".lvproj"}, + TmScope: "text.xml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Lasso": &Language{ + Name: "Lasso", + Type: "programming", + Color: "#999999", + Aliases: []string{"lassoscript"}, + Extensions: []string{".lasso", ".las", ".lasso8", ".lasso9", ".ldml"}, + TmScope: "file.lasso", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Latte": &Language{ + Name: "Latte", + Type: "markup", + Group: "HTML", + Color: "#A8FF97", + Extensions: []string{".latte"}, + TmScope: "text.html.smarty", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Lean": &Language{ + Name: "Lean", + Type: "programming", + Extensions: []string{".lean", ".hlean"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Less": &Language{ + Name: "Less", + Type: "markup", + Group: "CSS", + Color: "#A1D9A1", + Extensions: []string{".less"}, + TmScope: "source.css.less", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Lex": &Language{ + Name: "Lex", + Type: "programming", + Color: "#DBCA00", + Aliases: []string{"flex"}, + Extensions: []string{".l", ".lex"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LilyPond": &Language{ + Name: "LilyPond", + Type: "programming", + Extensions: []string{".ly", ".ily"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Limbo": &Language{ + Name: "Limbo", + Type: "programming", + Extensions: []string{".b", ".m"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Linker Script": &Language{ + Name: "Linker Script", + Type: "data", + Extensions: []string{".ld", ".lds"}, + Filenames: []string{"ld.script"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Linux Kernel Module": &Language{ + Name: "Linux Kernel Module", + Type: "data", + Extensions: []string{".mod"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Liquid": &Language{ + Name: "Liquid", + Type: "markup", + Extensions: []string{".liquid"}, + TmScope: "text.html.liquid", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Literate Agda": &Language{ + Name: "Literate Agda", + Type: "programming", + Group: "Agda", + Extensions: []string{".lagda"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Literate CoffeeScript": &Language{ + Name: "Literate CoffeeScript", + Type: "programming", + Group: "CoffeeScript", + Aliases: []string{"litcoffee"}, + Extensions: []string{".litcoffee"}, + TmScope: "source.litcoffee", + AceMode: "programming", + Wrap: true, + Searchable: true, + }, + "Literate Haskell": &Language{ + Name: "Literate Haskell", + Type: "programming", + Group: "Haskell", + Aliases: []string{"lhaskell", "lhs"}, + Extensions: []string{".lhs"}, + TmScope: "text.tex.latex.haskell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LiveScript": &Language{ + Name: "LiveScript", + Type: "programming", + Color: "#499886", + Aliases: []string{"live-script", "ls"}, + Extensions: []string{".ls", "._ls"}, + Filenames: []string{"Slakefile"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Logos": &Language{ + Name: "Logos", + Type: "programming", + Extensions: []string{".xm", ".x", ".xi"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Logtalk": &Language{ + Name: "Logtalk", + Type: "programming", + Extensions: []string{".lgt", ".logtalk"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LookML": &Language{ + Name: "LookML", + Type: "programming", + Color: "#652B81", + Extensions: []string{".lookml"}, + TmScope: "source.yaml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "LoomScript": &Language{ + Name: "LoomScript", + Type: "programming", + Extensions: []string{".ls"}, + TmScope: "source.loomscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Lua": &Language{ + Name: "Lua", + Type: "programming", + Color: "#000080", + Extensions: []string{".lua", ".fcgi", ".nse", ".pd_lua", ".rbxs", ".wlua"}, + Interpreters: []string{"lua"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "M": &Language{ + Name: "M", + Type: "programming", + Aliases: []string{"mumps"}, + Extensions: []string{".mumps", ".m"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "MAXScript": &Language{ + Name: "MAXScript", + Type: "programming", + Color: "#00a6a6", + Extensions: []string{".ms", ".mcr"}, + TmScope: "source.maxscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "MTML": &Language{ + Name: "MTML", + Type: "markup", + Color: "#b7e1f4", + Extensions: []string{".mtml"}, + TmScope: "text.html.basic", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "MUF": &Language{ + Name: "MUF", + Type: "programming", + Group: "Forth", + Extensions: []string{".muf", ".m"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Makefile": &Language{ + Name: "Makefile", + Type: "programming", + Color: "#427819", + Aliases: []string{"bsdmake", "make", "mf"}, + Extensions: []string{".mak", ".d", ".mk"}, + Filenames: []string{"GNUmakefile", "Kbuild", "Makefile", "Makefile.am", "Makefile.in", "Makefile.inc", "makefile"}, + Interpreters: []string{"make"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Mako": &Language{ + Name: "Mako", + Type: "programming", + Extensions: []string{".mako", ".mao"}, + TmScope: "text.html.mako", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Markdown": &Language{ + Name: "Markdown", + Type: "prose", + Color: "#083FA1", + Extensions: []string{".md", ".markdown", ".mkd", ".mkdn", ".mkdown", ".ron"}, + TmScope: "source.gfm", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Mask": &Language{ + Name: "Mask", + Type: "markup", + Color: "#f97732", + Extensions: []string{".mask"}, + TmScope: "source.mask", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Mathematica": &Language{ + Name: "Mathematica", + Type: "programming", + Aliases: []string{"mma"}, + Extensions: []string{".mathematica", ".cdf", ".m", ".ma", ".mt", ".nb", ".nbp", ".wl", ".wlt"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Matlab": &Language{ + Name: "Matlab", + Type: "programming", + Color: "#bb92ac", + Aliases: []string{"octave"}, + Extensions: []string{".matlab", ".m"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Maven POM": &Language{ + Name: "Maven POM", + Type: "data", + Filenames: []string{"pom.xml"}, + TmScope: "text.xml.pom", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Max": &Language{ + Name: "Max", + Type: "programming", + Color: "#c4a79c", + Aliases: []string{"max/msp", "maxmsp"}, + Extensions: []string{".maxpat", ".maxhelp", ".maxproj", ".mxt", ".pat"}, + TmScope: "source.json", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "MediaWiki": &Language{ + Name: "MediaWiki", + Type: "prose", + Extensions: []string{".mediawiki", ".wiki"}, + TmScope: "text.html.mediawiki", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Mercury": &Language{ + Name: "Mercury", + Type: "programming", + Color: "#ff2b2b", + Extensions: []string{".m", ".moo"}, + Interpreters: []string{"mmi"}, + TmScope: "source.mercury", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Metal": &Language{ + Name: "Metal", + Type: "programming", + Color: "#8f14e9", + Extensions: []string{".metal"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "MiniD": &Language{ + Name: "MiniD", + Type: "programming", + Extensions: []string{".minid"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: false, + }, + "Mirah": &Language{ + Name: "Mirah", + Type: "programming", + Color: "#c7a938", + Extensions: []string{".druby", ".duby", ".mir", ".mirah"}, + TmScope: "source.ruby", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Modelica": &Language{ + Name: "Modelica", + Type: "programming", + Extensions: []string{".mo"}, + TmScope: "source.modelica", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Modula-2": &Language{ + Name: "Modula-2", + Type: "programming", + Extensions: []string{".mod"}, + TmScope: "source.modula2", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Module Management System": &Language{ + Name: "Module Management System", + Type: "programming", + Extensions: []string{".mms", ".mmk"}, + Filenames: []string{"descrip.mmk", "descrip.mms"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Monkey": &Language{ + Name: "Monkey", + Type: "programming", + Extensions: []string{".monkey"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Moocode": &Language{ + Name: "Moocode", + Type: "programming", + Extensions: []string{".moo"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "MoonScript": &Language{ + Name: "MoonScript", + Type: "programming", + Extensions: []string{".moon"}, + Interpreters: []string{"moon"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Myghty": &Language{ + Name: "Myghty", + Type: "programming", + Extensions: []string{".myt"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NCL": &Language{ + Name: "NCL", + Type: "programming", + Color: "#28431f", + Extensions: []string{".ncl"}, + TmScope: "source.ncl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NL": &Language{ + Name: "NL", + Type: "data", + Extensions: []string{".nl"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "NSIS": &Language{ + Name: "NSIS", + Type: "programming", + Extensions: []string{".nsi", ".nsh"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Nemerle": &Language{ + Name: "Nemerle", + Type: "programming", + Color: "#3d3c6e", + Extensions: []string{".n"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NetLinx": &Language{ + Name: "NetLinx", + Type: "programming", + Color: "#0aa0ff", + Extensions: []string{".axs", ".axi"}, + TmScope: "source.netlinx", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NetLinx+ERB": &Language{ + Name: "NetLinx+ERB", + Type: "programming", + Color: "#747faa", + Extensions: []string{".axs.erb", ".axi.erb"}, + TmScope: "source.netlinx.erb", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NetLogo": &Language{ + Name: "NetLogo", + Type: "programming", + Color: "#ff6375", + Extensions: []string{".nlogo"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NewLisp": &Language{ + Name: "NewLisp", + Type: "programming", + Color: "#87AED7", + Extensions: []string{".nl", ".lisp", ".lsp"}, + Interpreters: []string{"newlisp"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Nginx": &Language{ + Name: "Nginx", + Type: "markup", + Color: "#9469E9", + Aliases: []string{"nginx configuration file"}, + Extensions: []string{".nginxconf", ".vhost"}, + Filenames: []string{"nginx.conf"}, + TmScope: "source.nginx", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Nimrod": &Language{ + Name: "Nimrod", + Type: "programming", + Color: "#37775b", + Extensions: []string{".nim", ".nimrod"}, + TmScope: "source.nim", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ninja": &Language{ + Name: "Ninja", + Type: "data", + Extensions: []string{".ninja"}, + TmScope: "source.ninja", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Nit": &Language{ + Name: "Nit", + Type: "programming", + Color: "#009917", + Extensions: []string{".nit"}, + TmScope: "source.nit", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Nix": &Language{ + Name: "Nix", + Type: "programming", + Color: "#7e7eff", + Aliases: []string{"nixos"}, + Extensions: []string{".nix"}, + TmScope: "source.nix", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Nu": &Language{ + Name: "Nu", + Type: "programming", + Color: "#c9df40", + Aliases: []string{"nush"}, + Extensions: []string{".nu"}, + Filenames: []string{"Nukefile"}, + Interpreters: []string{"nush"}, + TmScope: "source.scheme", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "NumPy": &Language{ + Name: "NumPy", + Type: "programming", + Group: "Python", + Color: "#9C8AF9", + Extensions: []string{".numpy", ".numpyw", ".numsc"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "OCaml": &Language{ + Name: "OCaml", + Type: "programming", + Color: "#3be133", + Extensions: []string{".ml", ".eliom", ".eliomi", ".ml4", ".mli", ".mll", ".mly"}, + Interpreters: []string{"ocaml", "ocamlrun"}, + TmScope: "source.ocaml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ObjDump": &Language{ + Name: "ObjDump", + Type: "data", + Extensions: []string{".objdump"}, + TmScope: "objdump.x86asm", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Objective-C": &Language{ + Name: "Objective-C", + Type: "programming", + Color: "#438eff", + Aliases: []string{"obj-c", "objc", "objectivec"}, + Extensions: []string{".m", ".h"}, + TmScope: "source.objc", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Objective-C++": &Language{ + Name: "Objective-C++", + Type: "programming", + Color: "#6866fb", + Aliases: []string{"obj-c++", "objc++", "objectivec++"}, + Extensions: []string{".mm"}, + TmScope: "source.objc++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Objective-J": &Language{ + Name: "Objective-J", + Type: "programming", + Color: "#ff0c5a", + Aliases: []string{"obj-j", "objectivej", "objj"}, + Extensions: []string{".j", ".sj"}, + TmScope: "source.js.objj", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Omgrofl": &Language{ + Name: "Omgrofl", + Type: "programming", + Color: "#cabbff", + Extensions: []string{".omgrofl"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Opa": &Language{ + Name: "Opa", + Type: "programming", + Extensions: []string{".opa"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Opal": &Language{ + Name: "Opal", + Type: "programming", + Color: "#f7ede0", + Extensions: []string{".opal"}, + TmScope: "source.opal", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "OpenCL": &Language{ + Name: "OpenCL", + Type: "programming", + Group: "C", + Extensions: []string{".cl", ".opencl"}, + TmScope: "source.c", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "OpenEdge ABL": &Language{ + Name: "OpenEdge ABL", + Type: "programming", + Aliases: []string{"progress", "openedge", "abl"}, + Extensions: []string{".p", ".cls"}, + TmScope: "source.abl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "OpenSCAD": &Language{ + Name: "OpenSCAD", + Type: "programming", + Extensions: []string{".scad"}, + TmScope: "source.scad", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Org": &Language{ + Name: "Org", + Type: "prose", + Extensions: []string{".org"}, + TmScope: "none", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Ox": &Language{ + Name: "Ox", + Type: "programming", + Extensions: []string{".ox", ".oxh", ".oxo"}, + TmScope: "source.ox", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Oxygene": &Language{ + Name: "Oxygene", + Type: "programming", + Color: "#cdd0e3", + Extensions: []string{".oxygene"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Oz": &Language{ + Name: "Oz", + Type: "programming", + Color: "#fab738", + Extensions: []string{".oz"}, + TmScope: "source.oz", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PAWN": &Language{ + Name: "PAWN", + Type: "programming", + Color: "#dbb284", + Extensions: []string{".pwn"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PHP": &Language{ + Name: "PHP", + Type: "programming", + Color: "#4F5D95", + Aliases: []string{"inc"}, + Extensions: []string{".php", ".aw", ".ctp", ".fcgi", ".inc", ".php3", ".php4", ".php5", ".phps", ".phpt"}, + Filenames: []string{"Phakefile"}, + Interpreters: []string{"php"}, + TmScope: "text.html.php", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PLSQL": &Language{ + Name: "PLSQL", + Type: "programming", + Color: "#dad8d8", + Extensions: []string{".pls", ".pck", ".pkb", ".pks", ".plb", ".plsql", ".sql"}, + TmScope: "source.plsql.oracle", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PLpgSQL": &Language{ + Name: "PLpgSQL", + Type: "programming", + Extensions: []string{".sql"}, + TmScope: "source.sql", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "POV-Ray SDL": &Language{ + Name: "POV-Ray SDL", + Type: "programming", + Aliases: []string{"pov-ray", "povray"}, + Extensions: []string{".pov", ".inc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pan": &Language{ + Name: "Pan", + Type: "programming", + Color: "#cc0000", + Extensions: []string{".pan"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Papyrus": &Language{ + Name: "Papyrus", + Type: "programming", + Color: "#6600cc", + Extensions: []string{".psc"}, + TmScope: "source.papyrus", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Parrot": &Language{ + Name: "Parrot", + Type: "programming", + Color: "#f3ca0a", + Extensions: []string{".parrot"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Parrot Assembly": &Language{ + Name: "Parrot Assembly", + Type: "programming", + Group: "Parrot", + Aliases: []string{"pasm"}, + Extensions: []string{".pasm"}, + Interpreters: []string{"parrot"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Parrot Internal Representation": &Language{ + Name: "Parrot Internal Representation", + Type: "programming", + Group: "Parrot", + Aliases: []string{"pir"}, + Extensions: []string{".pir"}, + Interpreters: []string{"parrot"}, + TmScope: "source.parrot.pir", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pascal": &Language{ + Name: "Pascal", + Type: "programming", + Color: "#E3F171", + Extensions: []string{".pas", ".dfm", ".dpr", ".inc", ".lpr", ".pp"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Perl": &Language{ + Name: "Perl", + Type: "programming", + Color: "#0298c3", + Extensions: []string{".pl", ".al", ".cgi", ".fcgi", ".perl", ".ph", ".plx", ".pm", ".pod", ".psgi", ".t"}, + Interpreters: []string{"perl"}, + TmScope: "source.perl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Perl6": &Language{ + Name: "Perl6", + Type: "programming", + Color: "#0000fb", + Extensions: []string{".6pl", ".6pm", ".nqp", ".p6", ".p6l", ".p6m", ".pl", ".pl6", ".pm", ".pm6", ".t"}, + Filenames: []string{"Rexfile"}, + Interpreters: []string{"perl6"}, + TmScope: "source.perl6fe", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pickle": &Language{ + Name: "Pickle", + Type: "data", + Extensions: []string{".pkl"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "PicoLisp": &Language{ + Name: "PicoLisp", + Type: "programming", + Extensions: []string{".l"}, + Interpreters: []string{"picolisp", "pil"}, + TmScope: "source.lisp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PigLatin": &Language{ + Name: "PigLatin", + Type: "programming", + Color: "#fcd7de", + Extensions: []string{".pig"}, + TmScope: "source.pig_latin", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pike": &Language{ + Name: "Pike", + Type: "programming", + Color: "#005390", + Extensions: []string{".pike", ".pmod"}, + Interpreters: []string{"pike"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pod": &Language{ + Name: "Pod", + Type: "prose", + Extensions: []string{".pod"}, + TmScope: "none", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "PogoScript": &Language{ + Name: "PogoScript", + Type: "programming", + Color: "#d80074", + Extensions: []string{".pogo"}, + TmScope: "source.pogoscript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pony": &Language{ + Name: "Pony", + Type: "programming", + Extensions: []string{".pony"}, + TmScope: "source.pony", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PostScript": &Language{ + Name: "PostScript", + Type: "markup", + Aliases: []string{"postscr"}, + Extensions: []string{".ps", ".eps"}, + TmScope: "source.postscript", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "PowerShell": &Language{ + Name: "PowerShell", + Type: "programming", + Aliases: []string{"posh"}, + Extensions: []string{".ps1", ".psd1", ".psm1"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Processing": &Language{ + Name: "Processing", + Type: "programming", + Color: "#0096D8", + Extensions: []string{".pde"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Prolog": &Language{ + Name: "Prolog", + Type: "programming", + Color: "#74283c", + Extensions: []string{".pl", ".pro", ".prolog", ".yap"}, + Interpreters: []string{"swipl", "yap"}, + TmScope: "source.prolog", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Propeller Spin": &Language{ + Name: "Propeller Spin", + Type: "programming", + Color: "#7fa2a7", + Extensions: []string{".spin"}, + TmScope: "source.spin", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Protocol Buffer": &Language{ + Name: "Protocol Buffer", + Type: "markup", + Aliases: []string{"protobuf", "Protocol Buffers"}, + Extensions: []string{".proto"}, + TmScope: "source.protobuf", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Public Key": &Language{ + Name: "Public Key", + Type: "data", + Extensions: []string{".asc", ".pub"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Puppet": &Language{ + Name: "Puppet", + Type: "programming", + Color: "#302B6D", + Extensions: []string{".pp"}, + Filenames: []string{"Modulefile"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Pure Data": &Language{ + Name: "Pure Data", + Type: "programming", + Color: "#91de79", + Extensions: []string{".pd"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PureBasic": &Language{ + Name: "PureBasic", + Type: "programming", + Color: "#5a6986", + Extensions: []string{".pb", ".pbi"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "PureScript": &Language{ + Name: "PureScript", + Type: "programming", + Color: "#1D222D", + Extensions: []string{".purs"}, + TmScope: "source.purescript", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Python": &Language{ + Name: "Python", + Type: "programming", + Color: "#3572A5", + Aliases: []string{"rusthon"}, + Extensions: []string{".py", ".bzl", ".cgi", ".fcgi", ".gyp", ".lmi", ".pyde", ".pyp", ".pyt", ".pyw", ".tac", ".wsgi", ".xpy"}, + Filenames: []string{"BUILD", "SConscript", "SConstruct", "Snakefile", "wscript"}, + Interpreters: []string{"python", "python2", "python3"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Python traceback": &Language{ + Name: "Python traceback", + Type: "data", + Group: "Python", + Extensions: []string{".pytb"}, + TmScope: "text.python.traceback", + AceMode: "data", + Wrap: false, + Searchable: false, + }, + "QML": &Language{ + Name: "QML", + Type: "programming", + Color: "#44a51c", + Extensions: []string{".qml", ".qbs"}, + TmScope: "source.qml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "QMake": &Language{ + Name: "QMake", + Type: "programming", + Extensions: []string{".pro", ".pri"}, + Interpreters: []string{"qmake"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "R": &Language{ + Name: "R", + Type: "programming", + Color: "#198CE7", + Aliases: []string{"R", "Rscript", "splus"}, + Extensions: []string{".r", ".rd", ".rsx"}, + Filenames: []string{".Rprofile"}, + Interpreters: []string{"Rscript"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "RAML": &Language{ + Name: "RAML", + Type: "markup", + Color: "#77d9fb", + Extensions: []string{".raml"}, + TmScope: "source.yaml", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "RDoc": &Language{ + Name: "RDoc", + Type: "prose", + Color: "#8E84BF", + Extensions: []string{".rdoc"}, + TmScope: "text.rdoc", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "REALbasic": &Language{ + Name: "REALbasic", + Type: "programming", + Extensions: []string{".rbbas", ".rbfrm", ".rbmnu", ".rbres", ".rbtbar", ".rbuistate"}, + TmScope: "source.vbnet", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "RHTML": &Language{ + Name: "RHTML", + Type: "markup", + Group: "HTML", + Aliases: []string{"html+ruby"}, + Extensions: []string{".rhtml"}, + TmScope: "text.html.erb", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "RMarkdown": &Language{ + Name: "RMarkdown", + Type: "prose", + Extensions: []string{".rmd"}, + TmScope: "source.gfm", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Racket": &Language{ + Name: "Racket", + Type: "programming", + Color: "#22228f", + Extensions: []string{".rkt", ".rktd", ".rktl", ".scrbl"}, + Interpreters: []string{"racket"}, + TmScope: "source.racket", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ragel in Ruby Host": &Language{ + Name: "Ragel in Ruby Host", + Type: "programming", + Color: "#9d5200", + Aliases: []string{"ragel-rb", "ragel-ruby"}, + Extensions: []string{".rl"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Raw token data": &Language{ + Name: "Raw token data", + Type: "data", + Aliases: []string{"raw"}, + Extensions: []string{".raw"}, + TmScope: "none", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Rebol": &Language{ + Name: "Rebol", + Type: "programming", + Color: "#358a5b", + Extensions: []string{".reb", ".r", ".r2", ".r3", ".rebol"}, + TmScope: "source.rebol", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Red": &Language{ + Name: "Red", + Type: "programming", + Color: "#ee0000", + Aliases: []string{"red/system"}, + Extensions: []string{".red", ".reds"}, + TmScope: "source.red", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Redcode": &Language{ + Name: "Redcode", + Type: "programming", + Extensions: []string{".cw"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ren'Py": &Language{ + Name: "Ren'Py", + Type: "programming", + Group: "Python", + Color: "#ff7f7f", + Aliases: []string{"renpy"}, + Extensions: []string{".rpy"}, + TmScope: "source.renpy", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "RenderScript": &Language{ + Name: "RenderScript", + Type: "programming", + Extensions: []string{".rs", ".rsh"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "RobotFramework": &Language{ + Name: "RobotFramework", + Type: "programming", + Extensions: []string{".robot"}, + TmScope: "text.robot", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Rouge": &Language{ + Name: "Rouge", + Type: "programming", + Color: "#cc0088", + Extensions: []string{".rg"}, + TmScope: "source.clojure", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Ruby": &Language{ + Name: "Ruby", + Type: "programming", + Color: "#701516", + Aliases: []string{"jruby", "macruby", "rake", "rb", "rbx"}, + Extensions: []string{".rb", ".builder", ".fcgi", ".gemspec", ".god", ".irbrc", ".jbuilder", ".mspec", ".pluginspec", ".podspec", ".rabl", ".rake", ".rbuild", ".rbw", ".rbx", ".ru", ".ruby", ".thor", ".watchr"}, + Filenames: []string{".pryrc", "Appraisals", "Berksfile", "Brewfile", "Buildfile", "Deliverfile", "Fastfile", "Gemfile", "Gemfile.lock", "Guardfile", "Jarfile", "Mavenfile", "Podfile", "Puppetfile", "Snapfile", "Thorfile", "Vagrantfile", "buildfile"}, + Interpreters: []string{"ruby", "macruby", "rake", "jruby", "rbx"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Rust": &Language{ + Name: "Rust", + Type: "programming", + Color: "#dea584", + Extensions: []string{".rs", ".rs.in"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SAS": &Language{ + Name: "SAS", + Type: "programming", + Color: "#B34936", + Extensions: []string{".sas"}, + TmScope: "source.sas", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SCSS": &Language{ + Name: "SCSS", + Type: "markup", + Group: "CSS", + Color: "#CF649A", + Extensions: []string{".scss"}, + TmScope: "source.scss", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "SMT": &Language{ + Name: "SMT", + Type: "programming", + Extensions: []string{".smt2", ".smt"}, + Interpreters: []string{"boolector", "cvc4", "mathsat5", "opensmt", "smtinterpol", "smt-rat", "stp", "verit", "yices2", "z3"}, + TmScope: "source.smt", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SPARQL": &Language{ + Name: "SPARQL", + Type: "data", + Extensions: []string{".sparql", ".rq"}, + TmScope: "source.sparql", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "SQF": &Language{ + Name: "SQF", + Type: "programming", + Color: "#3F3F3F", + Extensions: []string{".sqf", ".hqf"}, + TmScope: "source.sqf", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SQL": &Language{ + Name: "SQL", + Type: "data", + Extensions: []string{".sql", ".cql", ".ddl", ".inc", ".prc", ".tab", ".udf", ".viw"}, + TmScope: "source.sql", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "SQLPL": &Language{ + Name: "SQLPL", + Type: "programming", + Extensions: []string{".sql", ".db2"}, + TmScope: "source.sql", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "STON": &Language{ + Name: "STON", + Type: "data", + Group: "Smalltalk", + Extensions: []string{".ston"}, + TmScope: "source.smalltalk", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "SVG": &Language{ + Name: "SVG", + Type: "data", + Extensions: []string{".svg"}, + TmScope: "text.xml", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Sage": &Language{ + Name: "Sage", + Type: "programming", + Group: "Python", + Extensions: []string{".sage", ".sagews"}, + TmScope: "source.python", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SaltStack": &Language{ + Name: "SaltStack", + Type: "programming", + Color: "#646464", + Aliases: []string{"saltstate", "salt"}, + Extensions: []string{".sls"}, + TmScope: "source.yaml.salt", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Sass": &Language{ + Name: "Sass", + Type: "markup", + Group: "CSS", + Color: "#CF649A", + Extensions: []string{".sass"}, + TmScope: "source.sass", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Scala": &Language{ + Name: "Scala", + Type: "programming", + Color: "#DC322F", + Extensions: []string{".scala", ".sbt", ".sc"}, + Interpreters: []string{"scala"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Scaml": &Language{ + Name: "Scaml", + Type: "markup", + Group: "HTML", + Extensions: []string{".scaml"}, + TmScope: "source.scaml", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Scheme": &Language{ + Name: "Scheme", + Type: "programming", + Color: "#1e4aec", + Extensions: []string{".scm", ".sld", ".sls", ".sps", ".ss"}, + Interpreters: []string{"guile", "bigloo", "chicken"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Scilab": &Language{ + Name: "Scilab", + Type: "programming", + Extensions: []string{".sci", ".sce", ".tst"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Self": &Language{ + Name: "Self", + Type: "programming", + Color: "#0579aa", + Extensions: []string{".self"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Shell": &Language{ + Name: "Shell", + Type: "programming", + Color: "#89e051", + Aliases: []string{"sh", "shell-script", "bash", "zsh"}, + Extensions: []string{".sh", ".bash", ".bats", ".cgi", ".command", ".fcgi", ".ksh", ".sh.in", ".tmux", ".tool", ".zsh"}, + Interpreters: []string{"bash", "rc", "sh", "zsh"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ShellSession": &Language{ + Name: "ShellSession", + Type: "programming", + Aliases: []string{"bash session", "console"}, + Extensions: []string{".sh-session"}, + TmScope: "text.shell-session", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Shen": &Language{ + Name: "Shen", + Type: "programming", + Color: "#120F14", + Extensions: []string{".shen"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Slash": &Language{ + Name: "Slash", + Type: "programming", + Color: "#007eff", + Extensions: []string{".sl"}, + TmScope: "text.html.slash", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Slim": &Language{ + Name: "Slim", + Type: "markup", + Group: "HTML", + Color: "#ff8f77", + Extensions: []string{".slim"}, + TmScope: "text.slim", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Smali": &Language{ + Name: "Smali", + Type: "programming", + Extensions: []string{".smali"}, + TmScope: "source.smali", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Smalltalk": &Language{ + Name: "Smalltalk", + Type: "programming", + Color: "#596706", + Aliases: []string{"squeak"}, + Extensions: []string{".st", ".cs"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Smarty": &Language{ + Name: "Smarty", + Type: "programming", + Extensions: []string{".tpl"}, + TmScope: "text.html.smarty", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SourcePawn": &Language{ + Name: "SourcePawn", + Type: "programming", + Color: "#5c7611", + Aliases: []string{"sourcemod"}, + Extensions: []string{".sp", ".inc", ".sma"}, + TmScope: "source.sp", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Squirrel": &Language{ + Name: "Squirrel", + Type: "programming", + Color: "#800000", + Extensions: []string{".nut"}, + TmScope: "source.c++", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Stan": &Language{ + Name: "Stan", + Type: "programming", + Color: "#b2011d", + Extensions: []string{".stan"}, + TmScope: "source.stan", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Standard ML": &Language{ + Name: "Standard ML", + Type: "programming", + Color: "#dc566d", + Aliases: []string{"sml"}, + Extensions: []string{".ML", ".fun", ".sig", ".sml"}, + TmScope: "source.ml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Stata": &Language{ + Name: "Stata", + Type: "programming", + Extensions: []string{".do", ".ado", ".doh", ".ihlp", ".mata", ".matah", ".sthlp"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Stylus": &Language{ + Name: "Stylus", + Type: "markup", + Group: "CSS", + Extensions: []string{".styl"}, + TmScope: "source.stylus", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "SuperCollider": &Language{ + Name: "SuperCollider", + Type: "programming", + Color: "#46390b", + Extensions: []string{".sc", ".scd"}, + Interpreters: []string{"sclang", "scsynth"}, + TmScope: "source.supercollider", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Swift": &Language{ + Name: "Swift", + Type: "programming", + Color: "#ffac45", + Extensions: []string{".swift"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "SystemVerilog": &Language{ + Name: "SystemVerilog", + Type: "programming", + Color: "#DAE1C2", + Extensions: []string{".sv", ".svh", ".vh"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "TOML": &Language{ + Name: "TOML", + Type: "data", + Extensions: []string{".toml"}, + TmScope: "source.toml", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "TXL": &Language{ + Name: "TXL", + Type: "programming", + Extensions: []string{".txl"}, + TmScope: "source.txl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Tcl": &Language{ + Name: "Tcl", + Type: "programming", + Color: "#e4cc98", + Extensions: []string{".tcl", ".adp", ".tm"}, + Interpreters: []string{"tclsh", "wish"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Tcsh": &Language{ + Name: "Tcsh", + Type: "programming", + Group: "Shell", + Extensions: []string{".tcsh", ".csh"}, + TmScope: "source.shell", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "TeX": &Language{ + Name: "TeX", + Type: "markup", + Color: "#3D6117", + Aliases: []string{"latex"}, + Extensions: []string{".tex", ".aux", ".bbx", ".bib", ".cbx", ".cls", ".dtx", ".ins", ".lbx", ".ltx", ".mkii", ".mkiv", ".mkvi", ".sty", ".toc"}, + AceMode: "markup", + Wrap: true, + Searchable: true, + }, + "Tea": &Language{ + Name: "Tea", + Type: "markup", + Extensions: []string{".tea"}, + TmScope: "source.tea", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Text": &Language{ + Name: "Text", + Type: "prose", + Aliases: []string{"fundamental"}, + Extensions: []string{".txt", ".fr", ".ncl"}, + TmScope: "none", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Textile": &Language{ + Name: "Textile", + Type: "prose", + Extensions: []string{".textile"}, + TmScope: "none", + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "Thrift": &Language{ + Name: "Thrift", + Type: "programming", + Extensions: []string{".thrift"}, + TmScope: "source.thrift", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Turing": &Language{ + Name: "Turing", + Type: "programming", + Color: "#45f715", + Extensions: []string{".t", ".tu"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Turtle": &Language{ + Name: "Turtle", + Type: "data", + Extensions: []string{".ttl"}, + TmScope: "source.turtle", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Twig": &Language{ + Name: "Twig", + Type: "markup", + Group: "HTML", + Extensions: []string{".twig"}, + TmScope: "text.html.twig", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "TypeScript": &Language{ + Name: "TypeScript", + Type: "programming", + Color: "#2b7489", + Aliases: []string{"ts"}, + Extensions: []string{".ts", ".tsx"}, + TmScope: "source.ts", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Unified Parallel C": &Language{ + Name: "Unified Parallel C", + Type: "programming", + Group: "C", + Color: "#4e3617", + Extensions: []string{".upc"}, + TmScope: "source.c", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Unity3D Asset": &Language{ + Name: "Unity3D Asset", + Type: "data", + Extensions: []string{".anim", ".asset", ".mat", ".meta", ".prefab", ".unity"}, + TmScope: "source.yaml", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Uno": &Language{ + Name: "Uno", + Type: "programming", + Extensions: []string{".uno"}, + TmScope: "source.cs", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "UnrealScript": &Language{ + Name: "UnrealScript", + Type: "programming", + Color: "#a54c4d", + Extensions: []string{".uc"}, + TmScope: "source.java", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "UrWeb": &Language{ + Name: "UrWeb", + Type: "programming", + Aliases: []string{"Ur/Web", "Ur"}, + Extensions: []string{".ur", ".urs"}, + TmScope: "source.ur", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "VCL": &Language{ + Name: "VCL", + Type: "programming", + Group: "Perl", + Extensions: []string{".vcl"}, + TmScope: "source.varnish.vcl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "VHDL": &Language{ + Name: "VHDL", + Type: "programming", + Color: "#adb2cb", + Extensions: []string{".vhdl", ".vhd", ".vhf", ".vhi", ".vho", ".vhs", ".vht", ".vhw"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Vala": &Language{ + Name: "Vala", + Type: "programming", + Color: "#fbe5cd", + Extensions: []string{".vala", ".vapi"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Verilog": &Language{ + Name: "Verilog", + Type: "programming", + Color: "#b2b7f8", + Extensions: []string{".v", ".veo"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "VimL": &Language{ + Name: "VimL", + Type: "programming", + Color: "#199f4b", + Aliases: []string{"vim", "nvim"}, + Extensions: []string{".vim"}, + Filenames: []string{".nvimrc", ".vimrc", "_vimrc", "gvimrc", "nvimrc", "vimrc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Visual Basic": &Language{ + Name: "Visual Basic", + Type: "programming", + Color: "#945db7", + Aliases: []string{"vb.net", "vbnet"}, + Extensions: []string{".vb", ".bas", ".cls", ".frm", ".frx", ".vba", ".vbhtml", ".vbs"}, + TmScope: "source.vbnet", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Volt": &Language{ + Name: "Volt", + Type: "programming", + Color: "#1F1F1F", + Extensions: []string{".volt"}, + TmScope: "source.d", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Vue": &Language{ + Name: "Vue", + Type: "markup", + Color: "#2c3e50", + Extensions: []string{".vue"}, + TmScope: "text.html.vue", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "Web Ontology Language": &Language{ + Name: "Web Ontology Language", + Type: "markup", + Color: "#9cc9dd", + Extensions: []string{".owl"}, + TmScope: "text.xml", + AceMode: "markup", + Wrap: false, + Searchable: true, + }, + "WebIDL": &Language{ + Name: "WebIDL", + Type: "programming", + Extensions: []string{".webidl"}, + TmScope: "source.webidl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "X10": &Language{ + Name: "X10", + Type: "programming", + Color: "#4B6BEF", + Aliases: []string{"xten"}, + Extensions: []string{".x10"}, + TmScope: "source.x10", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XC": &Language{ + Name: "XC", + Type: "programming", + Color: "#99DA07", + Extensions: []string{".xc"}, + TmScope: "source.xc", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XML": &Language{ + Name: "XML", + Type: "data", + Aliases: []string{"rss", "xsd", "wsdl"}, + Extensions: []string{".xml", ".ant", ".axml", ".ccxml", ".clixml", ".cproject", ".csl", ".csproj", ".ct", ".dita", ".ditamap", ".ditaval", ".dll.config", ".filters", ".fsproj", ".fxml", ".glade", ".gml", ".grxml", ".iml", ".ivy", ".jelly", ".jsproj", ".kml", ".launch", ".mdpolicy", ".mm", ".mod", ".mxml", ".nproj", ".nuspec", ".odd", ".osm", ".plist", ".pluginspec", ".ps1xml", ".psc1", ".pt", ".rdf", ".rss", ".scxml", ".srdf", ".storyboard", ".stTheme", ".sublime-snippet", ".targets", ".tmCommand", ".tml", ".tmLanguage", ".tmPreferences", ".tmSnippet", ".tmTheme", ".ts", ".tsx", ".ui", ".urdf", ".ux", ".vbproj", ".vcxproj", ".vxml", ".wsdl", ".wsf", ".wxi", ".wxl", ".wxs", ".x3d", ".xacro", ".xaml", ".xib", ".xlf", ".xliff", ".xmi", ".xml.dist", ".xproj", ".xsd", ".xul", ".zcml"}, + Filenames: []string{".classpath", ".project", "Settings.StyleCop", "Web.Debug.config", "Web.Release.config", "Web.config", "packages.config"}, + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "XPages": &Language{ + Name: "XPages", + Type: "programming", + Extensions: []string{".xsp-config", ".xsp.metadata"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XProc": &Language{ + Name: "XProc", + Type: "programming", + Extensions: []string{".xpl", ".xproc"}, + TmScope: "text.xml", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XQuery": &Language{ + Name: "XQuery", + Type: "programming", + Color: "#5232e7", + Extensions: []string{".xquery", ".xq", ".xql", ".xqm", ".xqy"}, + TmScope: "source.xq", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XS": &Language{ + Name: "XS", + Type: "programming", + Extensions: []string{".xs"}, + TmScope: "source.c", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "XSLT": &Language{ + Name: "XSLT", + Type: "programming", + Color: "#EB8CEB", + Aliases: []string{"xsl"}, + Extensions: []string{".xslt", ".xsl"}, + TmScope: "text.xml.xsl", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Xojo": &Language{ + Name: "Xojo", + Type: "programming", + Extensions: []string{".xojo_code", ".xojo_menu", ".xojo_report", ".xojo_script", ".xojo_toolbar", ".xojo_window"}, + TmScope: "source.vbnet", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Xtend": &Language{ + Name: "Xtend", + Type: "programming", + Extensions: []string{".xtend"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "YAML": &Language{ + Name: "YAML", + Type: "data", + Aliases: []string{"yml"}, + Extensions: []string{".yml", ".reek", ".rviz", ".syntax", ".yaml", ".yaml-tmlanguage"}, + TmScope: "source.yaml", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "YANG": &Language{ + Name: "YANG", + Type: "data", + Extensions: []string{".yang"}, + TmScope: "source.yang", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "Yacc": &Language{ + Name: "Yacc", + Type: "programming", + Color: "#4B6C4B", + Extensions: []string{".y", ".yacc", ".yy"}, + TmScope: "source.bison", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Zephir": &Language{ + Name: "Zephir", + Type: "programming", + Color: "#118f9e", + Extensions: []string{".zep"}, + TmScope: "source.php.zephir", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "Zimpl": &Language{ + Name: "Zimpl", + Type: "programming", + Extensions: []string{".zimpl", ".zmpl", ".zpl"}, + TmScope: "none", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "desktop": &Language{ + Name: "desktop", + Type: "data", + Extensions: []string{".desktop", ".desktop.in"}, + TmScope: "source.desktop", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "eC": &Language{ + Name: "eC", + Type: "programming", + Color: "#913960", + Extensions: []string{".ec", ".eh"}, + TmScope: "source.c.ec", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "edn": &Language{ + Name: "edn", + Type: "data", + Extensions: []string{".edn"}, + TmScope: "source.clojure", + AceMode: "data", + Wrap: false, + Searchable: true, + }, + "fish": &Language{ + Name: "fish", + Type: "programming", + Group: "Shell", + Extensions: []string{".fish"}, + TmScope: "source.fish", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "mupad": &Language{ + Name: "mupad", + Type: "programming", + Extensions: []string{".mu"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "nesC": &Language{ + Name: "nesC", + Type: "programming", + Color: "#94B0C7", + Extensions: []string{".nc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "ooc": &Language{ + Name: "ooc", + Type: "programming", + Color: "#b0b77e", + Extensions: []string{".ooc"}, + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "reStructuredText": &Language{ + Name: "reStructuredText", + Type: "prose", + Color: "#B3BCBC", + Aliases: []string{"rst"}, + Extensions: []string{".rst", ".rest", ".rest.txt", ".rst.txt"}, + AceMode: "prose", + Wrap: true, + Searchable: true, + }, + "wisp": &Language{ + Name: "wisp", + Type: "programming", + Color: "#7582D1", + Extensions: []string{".wisp"}, + TmScope: "source.clojure", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + "xBase": &Language{ + Name: "xBase", + Type: "programming", + Color: "#403a40", + Aliases: []string{"advpl", "clipper", "foxpro"}, + Extensions: []string{".prg", ".ch", ".prw"}, + TmScope: "source.harbour", + AceMode: "programming", + Wrap: false, + Searchable: true, + }, + } +) diff --git a/main.go b/main.go new file mode 100644 index 0000000000000000000000000000000000000000..43a299dbe2cecc617c937aa5f81270ff8fead8ec --- /dev/null +++ b/main.go @@ -0,0 +1,43 @@ +package main + +import ( + "log" + "os" + + "gitlab.com/gitlab-org/es-git-go/elastic" + "gitlab.com/gitlab-org/es-git-go/git" + "gitlab.com/gitlab-org/es-git-go/indexer" +) + +func main() { + if len(os.Args) != 3 { + log.Fatalf("Usage: %s <project-id> <project-path>", os.Args[0]) + } + + projectID := os.Args[1] + projectPath := os.Args[2] + fromSHA := os.Getenv("FROM_SHA") + toSHA := os.Getenv("TO_SHA") + + repo, err := git.NewGoGitRepository(projectPath, fromSHA, toSHA) + if err != nil { + log.Fatalf("Failed to open %s: %s", projectPath, err) + } + + esClient, err := elastic.FromEnv(projectID) + if err != nil { + log.Fatalln(err) + } + + idx := &indexer.Indexer{ + Submitter: esClient, + Repository: repo, + } + + log.Printf("Indexing from %s to %s", repo.FromHash, repo.ToHash) + log.Printf("Index: %s, Project ID: %s", esClient.IndexName, esClient.ParentID()) + + if err := idx.Index(); err != nil { + log.Fatalln("Indexing error: ", err) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..d645695673349e3947e8e5ae42332d0ac3164cd7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000000000000000000000000000000000000..5f14d1162ed45c1497667760e5c9078d8fb93014 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go new file mode 100644 index 0000000000000000000000000000000000000000..56fdfc2bfc76c9715a861810bee26b80e03734cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -0,0 +1,145 @@ +// Package awserr represents API error interface accessors for the SDK. +package awserr + +// An Error wraps lower level errors with code, message and an original error. +// The underlying concrete error type may also satisfy other interfaces which +// can be to used to obtain more specific information about the error. +// +// Calling Error() or String() will always include the full information about +// an error based on its underlying type. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Get error details +// log.Println("Error:", awsErr.Code(), awsErr.Message()) +// +// // Prints out full error message, including original error if there was one. +// log.Println("Error:", awsErr.Error()) +// +// // Get original error +// if origErr := awsErr.OrigErr(); origErr != nil { +// // operate on original error. +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type Error interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErr() error +} + +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// New returns an Error object described by the code, message, and origErr. +// +// If origErr satisfies the Error interface it will not be wrapped within a new +// Error object and will instead be returned. +func New(code, message string, origErr error) Error { + var errs []error + if origErr != nil { + errs = append(errs, origErr) + } + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) +} + +// A RequestFailure is an interface to extract request failure information from +// an Error such as the request ID of the failed request returned by a service. +// RequestFailures may not always have a requestID value if the request failed +// prior to reaching the service such as a connection error. +// +// Example: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if reqerr, ok := err.(RequestFailure); ok { +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// } else { +// log.Println("Error:", err.Error()) +// } +// } +// +// Combined with awserr.Error: +// +// output, err := s3manage.Upload(svc, input, opts) +// if err != nil { +// if awsErr, ok := err.(awserr.Error); ok { +// // Generic AWS Error with Code, Message, and original error (if any) +// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) +// +// if reqErr, ok := err.(awserr.RequestFailure); ok { +// // A service error occurred +// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) +// } +// } else { +// fmt.Println(err.Error()) +// } +// } +// +type RequestFailure interface { + Error + + // The status code of the HTTP response. + StatusCode() int + + // The request ID returned by the service for a request failure. This will + // be empty if no request ID is available such as the request failed due + // to a connection error. + RequestID() string +} + +// NewRequestFailure returns a new request error wrapper for the given Error +// provided. +func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { + return newRequestError(err, statusCode, reqID) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go new file mode 100644 index 0000000000000000000000000000000000000000..0202a008f5d7dec3c76a1e5bebe813a8b8b2593b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -0,0 +1,194 @@ +package awserr + +import "fmt" + +// SprintError returns a string of the formatted error code. +// +// Both extra and origErr are optional. If they are included their lines +// will be added, but if they are not included their lines will be ignored. +func SprintError(code, message, extra string, origErr error) string { + msg := fmt.Sprintf("%s: %s", code, message) + if extra != "" { + msg = fmt.Sprintf("%s\n\t%s", msg, extra) + } + if origErr != nil { + msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) + } + return msg +} + +// A baseError wraps the code and message which defines an error. It also +// can be used to wrap an original error object. +// +// Should be used as the root for errors satisfying the awserr.Error. Also +// for any error which does not fit into a specific error wrapper type. +type baseError struct { + // Classification of error + code string + + // Detailed information about error + message string + + // Optional original error this error is based off of. Allows building + // chained errors. + errs []error +} + +// newBaseError returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the +// error. +// +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b +} + +// Error returns the string representation of the error. +// +// See ErrorWithExtra for formatting. +// +// Satisfies the error interface. +func (b baseError) Error() string { + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (b baseError) String() string { + return b.Error() +} + +// Code returns the short phrase depicting the classification of the error. +func (b baseError) Code() string { + return b.code +} + +// Message returns the error details message. +func (b baseError) Message() string { + return b.message +} + +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. +func (b baseError) OrigErr() error { + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError Error + +// A requestError wraps a request or service error. +// +// Composed of baseError for code, message, and original error. +type requestError struct { + awsError + statusCode int + requestID string +} + +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. +// +// Also wraps original errors via the baseError. +func newRequestError(err Error, statusCode int, requestID string) *requestError { + return &requestError{ + awsError: err, + statusCode: statusCode, + requestID: requestID, + } +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (r requestError) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s", + r.statusCode, r.requestID) + return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (r requestError) String() string { + return r.Error() +} + +// StatusCode returns the wrapped status code for the error +func (r requestError) StatusCode() int { + return r.statusCode +} + +// RequestID returns the wrapped requestID +func (r requestError) RequestID() string { + return r.requestID +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go new file mode 100644 index 0000000000000000000000000000000000000000..1a3d106d5c1bb6e5d236d35a735ea1168f6fafa6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -0,0 +1,108 @@ +package awsutil + +import ( + "io" + "reflect" + "time" +) + +// Copy deeply copies a src structure to dst. Useful for copying request and +// response structures. +// +// Can copy between structs of different type, but will only copy fields which +// are assignable, and exist in both structs. Fields which are not assignable, +// or do not exist in both structs are ignored. +func Copy(dst, src interface{}) { + dstval := reflect.ValueOf(dst) + if !dstval.IsValid() { + panic("Copy dst cannot be nil") + } + + rcopy(dstval, reflect.ValueOf(src), true) +} + +// CopyOf returns a copy of src while also allocating the memory for dst. +// src must be a pointer type or this operation will fail. +func CopyOf(src interface{}) (dst interface{}) { + dsti := reflect.New(reflect.TypeOf(src).Elem()) + dst = dsti.Interface() + rcopy(dsti, reflect.ValueOf(src), true) + return +} + +// rcopy performs a recursive copy of values from the source to destination. +// +// root is used to skip certain aspects of the copy which are not valid +// for the root node of a object. +func rcopy(dst, src reflect.Value, root bool) { + if !src.IsValid() { + return + } + + switch src.Kind() { + case reflect.Ptr: + if _, ok := src.Interface().(io.Reader); ok { + if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { + dst.Elem().Set(src) + } else if dst.CanSet() { + dst.Set(src) + } + } else { + e := src.Type().Elem() + if dst.CanSet() && !src.IsNil() { + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } + } + if src.Elem().IsValid() { + // Keep the current root state since the depth hasn't changed + rcopy(dst.Elem(), src.Elem(), root) + } + } + case reflect.Struct: + t := dst.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) + } + } + case reflect.Slice: + if src.IsNil() { + break + } + + s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + dst.Set(s) + for i := 0; i < src.Len(); i++ { + rcopy(dst.Index(i), src.Index(i), false) + } + case reflect.Map: + if src.IsNil() { + break + } + + s := reflect.MakeMap(src.Type()) + dst.Set(s) + for _, k := range src.MapKeys() { + v := src.MapIndex(k) + v2 := reflect.New(v.Type()).Elem() + rcopy(v2, v, false) + dst.SetMapIndex(k, v2) + } + default: + // Assign the value if possible. If its not assignable, the value would + // need to be converted and the impact of that may be unexpected, or is + // not compatible with the dst type. + if src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000000000000000000000000000000000000..59fa4a558a9ac9432a5febe06e48315ba5708219 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go new file mode 100644 index 0000000000000000000000000000000000000000..11c52c38968743fce74d8d08271a2f8c97e651f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -0,0 +1,222 @@ +package awsutil + +import ( + "reflect" + "regexp" + "strconv" + "strings" + + "github.com/jmespath/go-jmespath" +) + +var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) + +// rValuesAtPath returns a slice of values found in value v. The values +// in v are explored recursively so all nested values are collected. +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { + pathparts := strings.Split(path, "||") + if len(pathparts) > 1 { + for _, pathpart := range pathparts { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { + return vals + } + } + return nil + } + + values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} + components := strings.Split(path, ".") + for len(values) > 0 && len(components) > 0 { + var index *int64 + var indexStar bool + c := strings.TrimSpace(components[0]) + if c == "" { // no actual component, illegal syntax + return nil + } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { + // TODO normalize case for user + return nil // don't support unexported fields + } + + // parse this component + if m := indexRe.FindStringSubmatch(c); m != nil { + c = m[1] + if m[2] == "" { + index = nil + indexStar = true + } else { + i, _ := strconv.ParseInt(m[2], 10, 32) + index = &i + indexStar = false + } + } + + nextvals := []reflect.Value{} + for _, value := range values { + // pull component name out of struct member + if value.Kind() != reflect.Struct { + continue + } + + if c == "*" { // pull all members + for i := 0; i < value.NumField(); i++ { + if f := reflect.Indirect(value.Field(i)); f.IsValid() { + nextvals = append(nextvals, f) + } + } + continue + } + + value = value.FieldByNameFunc(func(name string) bool { + if c == name { + return true + } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { + return true + } + return false + }) + + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. + value.Set(reflect.New(value.Type().Elem())) + value = value.Elem() + } else { + value = reflect.Indirect(value) + } + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + + if indexStar || index != nil { + nextvals = []reflect.Value{} + for _, valItem := range values { + value := reflect.Indirect(valItem) + if value.Kind() != reflect.Slice { + continue + } + + if indexStar { // grab all indices + for i := 0; i < value.Len(); i++ { + idx := reflect.Indirect(value.Index(i)) + if idx.IsValid() { + nextvals = append(nextvals, idx) + } + } + continue + } + + // pull out index + i := int(*index) + if i >= value.Len() { // check out of bounds + if createPath { + // TODO resize slice + } else { + continue + } + } else if i < 0 { // support negative indexing + i = value.Len() + i + } + value = reflect.Indirect(value.Index(i)) + + if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { + if !createPath && value.IsNil() { + value = reflect.ValueOf(nil) + } + } + + if value.IsValid() { + nextvals = append(nextvals, value) + } + } + values = nextvals + } + + components = components[1:] + } + return values +} + +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err + } + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil +} + +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. +func SetValueAtPath(i interface{}, path string, v interface{}) { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { + for _, rval := range rvals { + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) + } + } +} + +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) + } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go new file mode 100644 index 0000000000000000000000000000000000000000..710eb432f851075b3c42c93368ef8dea2f830621 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -0,0 +1,113 @@ +package awsutil + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" +) + +// Prettify returns the string representation of a value. +func Prettify(i interface{}) string { + var buf bytes.Buffer + prettify(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +// prettify will recursively walk value v to build a textual +// representation of the value. +func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + strtype := v.Type().String() + if strtype == "time.Time" { + fmt.Fprintf(buf, "%s", v.Interface()) + break + } else if strings.HasPrefix(strtype, "io.") { + buf.WriteString("<buffer>") + break + } + + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + prettify(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, "<binary> len %d", v.Len()) + break + } + + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + prettify(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + prettify(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + if !v.IsValid() { + fmt.Fprint(buf, "<invalid value>") + return + } + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + case io.ReadSeeker, io.Reader: + format = "buffer(%p)" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000000000000000000000000000000000000..b6432f1a1188d00439639ed516bb410d13efb7dd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000000000000000000000000000000000000..4778056ddfdae34bc3fea0b6f83426d70b41df40 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000000000000000000000000000000000000..f5a7c3792023189973d833f0545614cbf1f2ae16 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,455 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go new file mode 100644 index 0000000000000000000000000000000000000000..3b73a7da7f9d5f79c9ef1a00acc0c42aa226b3fc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -0,0 +1,369 @@ +package aws + +import "time" + +// String returns a pointer to the string value passed in. +func String(v string) *string { + return &v +} + +// StringValue returns the value of the string pointer passed in or +// "" if the pointer is nil. +func StringValue(v *string) string { + if v != nil { + return *v + } + return "" +} + +// StringSlice converts a slice of string values into a slice of +// string pointers +func StringSlice(src []string) []*string { + dst := make([]*string, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// StringValueSlice converts a slice of string pointers into a slice of +// string values +func StringValueSlice(src []*string) []string { + dst := make([]string, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// StringMap converts a string map of string values into a string +// map of string pointers +func StringMap(src map[string]string) map[string]*string { + dst := make(map[string]*string) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// StringValueMap converts a string map of string pointers into a string +// map of string values +func StringValueMap(src map[string]*string) map[string]string { + dst := make(map[string]string) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Bool returns a pointer to the bool value passed in. +func Bool(v bool) *bool { + return &v +} + +// BoolValue returns the value of the bool pointer passed in or +// false if the pointer is nil. +func BoolValue(v *bool) bool { + if v != nil { + return *v + } + return false +} + +// BoolSlice converts a slice of bool values into a slice of +// bool pointers +func BoolSlice(src []bool) []*bool { + dst := make([]*bool, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// BoolValueSlice converts a slice of bool pointers into a slice of +// bool values +func BoolValueSlice(src []*bool) []bool { + dst := make([]bool, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// BoolMap converts a string map of bool values into a string +// map of bool pointers +func BoolMap(src map[string]bool) map[string]*bool { + dst := make(map[string]*bool) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// BoolValueMap converts a string map of bool pointers into a string +// map of bool values +func BoolValueMap(src map[string]*bool) map[string]bool { + dst := make(map[string]bool) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int returns a pointer to the int value passed in. +func Int(v int) *int { + return &v +} + +// IntValue returns the value of the int pointer passed in or +// 0 if the pointer is nil. +func IntValue(v *int) int { + if v != nil { + return *v + } + return 0 +} + +// IntSlice converts a slice of int values into a slice of +// int pointers +func IntSlice(src []int) []*int { + dst := make([]*int, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// IntValueSlice converts a slice of int pointers into a slice of +// int values +func IntValueSlice(src []*int) []int { + dst := make([]int, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// IntMap converts a string map of int values into a string +// map of int pointers +func IntMap(src map[string]int) map[string]*int { + dst := make(map[string]*int) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// IntValueMap converts a string map of int pointers into a string +// map of int values +func IntValueMap(src map[string]*int) map[string]int { + dst := make(map[string]int) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Int64 returns a pointer to the int64 value passed in. +func Int64(v int64) *int64 { + return &v +} + +// Int64Value returns the value of the int64 pointer passed in or +// 0 if the pointer is nil. +func Int64Value(v *int64) int64 { + if v != nil { + return *v + } + return 0 +} + +// Int64Slice converts a slice of int64 values into a slice of +// int64 pointers +func Int64Slice(src []int64) []*int64 { + dst := make([]*int64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Int64ValueSlice converts a slice of int64 pointers into a slice of +// int64 values +func Int64ValueSlice(src []*int64) []int64 { + dst := make([]int64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Int64Map converts a string map of int64 values into a string +// map of int64 pointers +func Int64Map(src map[string]int64) map[string]*int64 { + dst := make(map[string]*int64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Int64ValueMap converts a string map of int64 pointers into a string +// map of int64 values +func Int64ValueMap(src map[string]*int64) map[string]int64 { + dst := make(map[string]int64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Float64 returns a pointer to the float64 value passed in. +func Float64(v float64) *float64 { + return &v +} + +// Float64Value returns the value of the float64 pointer passed in or +// 0 if the pointer is nil. +func Float64Value(v *float64) float64 { + if v != nil { + return *v + } + return 0 +} + +// Float64Slice converts a slice of float64 values into a slice of +// float64 pointers +func Float64Slice(src []float64) []*float64 { + dst := make([]*float64, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// Float64ValueSlice converts a slice of float64 pointers into a slice of +// float64 values +func Float64ValueSlice(src []*float64) []float64 { + dst := make([]float64, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// Float64Map converts a string map of float64 values into a string +// map of float64 pointers +func Float64Map(src map[string]float64) map[string]*float64 { + dst := make(map[string]*float64) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// Float64ValueMap converts a string map of float64 pointers into a string +// map of float64 values +func Float64ValueMap(src map[string]*float64) map[string]float64 { + dst := make(map[string]float64) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} + +// Time returns a pointer to the time.Time value passed in. +func Time(v time.Time) *time.Time { + return &v +} + +// TimeValue returns the value of the time.Time pointer passed in or +// time.Time{} if the pointer is nil. +func TimeValue(v *time.Time) time.Time { + if v != nil { + return *v + } + return time.Time{} +} + +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + +// TimeSlice converts a slice of time.Time values into a slice of +// time.Time pointers +func TimeSlice(src []time.Time) []*time.Time { + dst := make([]*time.Time, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + return dst +} + +// TimeValueSlice converts a slice of time.Time pointers into a slice of +// time.Time values +func TimeValueSlice(src []*time.Time) []time.Time { + dst := make([]time.Time, len(src)) + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + return dst +} + +// TimeMap converts a string map of time.Time values into a string +// map of time.Time pointers +func TimeMap(src map[string]time.Time) map[string]*time.Time { + dst := make(map[string]*time.Time) + for k, val := range src { + v := val + dst[k] = &v + } + return dst +} + +// TimeValueMap converts a string map of time.Time pointers into a string +// map of time.Time values +func TimeValueMap(src map[string]*time.Time) map[string]time.Time { + dst := make(map[string]time.Time) + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go new file mode 100644 index 0000000000000000000000000000000000000000..6efc77bf0932597668da2d11b38498cb46fe745f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -0,0 +1,100 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var ( + // ErrNoValidProvidersFoundInChain Is returned when there are no valid + // providers in the ChainProvider. + // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // + // @readonly + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) +) + +// A ChainProvider will search for a provider which returns credentials +// and cache that provider until Retrieve is called again. +// +// The ChainProvider provides a way of chaining multiple providers together +// which will pick the first available using priority order of the Providers +// in the list. +// +// If none of the Providers retrieve valid credentials Value, ChainProvider's +// Retrieve() will return the error ErrNoValidProvidersFoundInChain. +// +// If a Provider is found which returns valid credentials Value ChainProvider +// will cache that Provider for all calls to IsExpired(), until Retrieve is +// called again. +// +// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. +// In this example EnvProvider will first check if any credentials are available +// via the environment variables. If there are none ChainProvider will check +// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider +// does not return any credentials ChainProvider will return the error +// ErrNoValidProvidersFoundInChain +// +// creds := NewChainCredentials( +// []Provider{ +// &EnvProvider{}, +// &EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, +// }) +// +// // Usage of ChainCredentials with aws.Config +// svc := ec2.New(&aws.Config{Credentials: creds}) +// +type ChainProvider struct { + Providers []Provider + curr Provider + VerboseErrors bool +} + +// NewChainCredentials returns a pointer to a new Credentials object +// wrapping a chain of providers. +func NewChainCredentials(providers []Provider) *Credentials { + return NewCredentials(&ChainProvider{ + Providers: append([]Provider{}, providers...), + }) +} + +// Retrieve returns the credentials value or error if no provider returned +// without error. +// +// If a provider is found it will be cached and any calls to IsExpired() +// will return the expired state of the cached provider. +func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error + for _, p := range c.Providers { + creds, err := p.Retrieve() + if err == nil { + c.curr = p + return creds, nil + } + errs = append(errs, err) + } + c.curr = nil + + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err +} + +// IsExpired will returned the expired state of the currently cached provider +// if there is one. If there is no current provider, true will be returned. +func (c *ChainProvider) IsExpired() bool { + if c.curr != nil { + return c.curr.IsExpired() + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go new file mode 100644 index 0000000000000000000000000000000000000000..7b8ebf5f9d872236803ad9d0c10d8b14d4ec273a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -0,0 +1,223 @@ +// Package credentials provides credential retrieval and management +// +// The Credentials is the primary method of getting access to and managing +// credentials Values. Using dependency injection retrieval of the credential +// values is handled by a object which satisfies the Provider interface. +// +// By default the Credentials.Get() will cache the successful result of a +// Provider's Retrieve() until Provider.IsExpired() returns true. At which +// point Credentials will call Provider's Retrieve() to get new credential Value. +// +// The Provider is responsible for determining when credentials Value have expired. +// It is also important to note that Credentials will always call Retrieve the +// first time Credentials.Get() is called. +// +// Example of using the environment variable credentials. +// +// creds := NewEnvCredentials() +// +// // Retrieve the credentials value +// credValue, err := creds.Get() +// if err != nil { +// // handle error +// } +// +// Example of forcing credentials to expire and be refreshed on the next Get(). +// This may be helpful to proactively expire credentials and refresh them sooner +// than they would naturally expire on their own. +// +// creds := NewCredentials(&EC2RoleProvider{}) +// creds.Expire() +// credsValue, err := creds.Get() +// // New credentials will be retrieved instead of from cache. +// +// +// Custom Provider +// +// Each Provider built into this package also provides a helper method to generate +// a Credentials pointer setup with the provider. To use a custom Provider just +// create a type which satisfies the Provider interface and pass it to the +// NewCredentials method. +// +// type MyProvider struct{} +// func (m *MyProvider) Retrieve() (Value, error) {...} +// func (m *MyProvider) IsExpired() bool {...} +// +// creds := NewCredentials(&MyProvider{}) +// credValue, err := creds.Get() +// +package credentials + +import ( + "sync" + "time" +) + +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. +// +// This Credentials can be used to configure a service to not sign requests +// when making service API calls. For example, when accessing public +// s3 buckets. +// +// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// // Access public S3 buckets. +// +// @readonly +var AnonymousCredentials = NewStaticCredentials("", "", "") + +// A Value is the AWS credentials value for individual credential fields. +type Value struct { + // AWS Access key ID + AccessKeyID string + + // AWS Secret Access Key + SecretAccessKey string + + // AWS Session Token + SessionToken string + + // Provider used to get credentials + ProviderName string +} + +// A Provider is the interface for any component which will provide credentials +// Value. A provider is required to manage its own Expired state, and what to +// be expired means. +// +// The Provider should not need to implement its own mutexes, because +// that will be managed by Credentials. +type Provider interface { + // Refresh returns nil if it successfully retrieved the value. + // Error is returned if the value were not obtainable, or empty. + Retrieve() (Value, error) + + // IsExpired returns if the credentials are no longer valid, and need + // to be retrieved. + IsExpired() bool +} + +// A Expiry provides shared expiration logic to be used by credentials +// providers to implement expiry functionality. +// +// The best method to use this struct is as an anonymous field within the +// provider's struct. +// +// Example: +// type EC2RoleProvider struct { +// Expiry +// ... +// } +type Expiry struct { + // The date/time when to expire on + expiration time.Time + + // If set will be used by IsExpired to determine the current time. + // Defaults to time.Now if CurrentTime is not set. Available for testing + // to be able to mock out the current time. + CurrentTime func() time.Time +} + +// SetExpiration sets the expiration IsExpired will check when called. +// +// If window is greater than 0 the expiration time will be reduced by the +// window value. +// +// Using a window is helpful to trigger credentials to expire sooner than +// the expiration time given to ensure no requests are made with expired +// tokens. +func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { + e.expiration = expiration + if window > 0 { + e.expiration = e.expiration.Add(-window) + } +} + +// IsExpired returns if the credentials are expired. +func (e *Expiry) IsExpired() bool { + if e.CurrentTime == nil { + e.CurrentTime = time.Now + } + return e.expiration.Before(e.CurrentTime()) +} + +// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// Credentials will cache the credentials value until they expire. Once the value +// expires the next Get will attempt to retrieve valid credentials. +// +// Credentials is safe to use across multiple goroutines and will manage the +// synchronous state so the Providers do not need to implement their own +// synchronization. +// +// The first Credentials.Get() will always call Provider.Retrieve() to get the +// first instance of the credentials Value. All calls to Get() after that +// will return the cached credentials Value until IsExpired() returns true. +type Credentials struct { + creds Value + forceRefresh bool + m sync.Mutex + + provider Provider +} + +// NewCredentials returns a pointer to a new Credentials with the provider set. +func NewCredentials(provider Provider) *Credentials { + return &Credentials{ + provider: provider, + forceRefresh: true, + } +} + +// Get returns the credentials value, or error if the credentials Value failed +// to be retrieved. +// +// Will return the cached credentials Value if it has not expired. If the +// credentials Value has expired the Provider's Retrieve() will be called +// to refresh the credentials. +// +// If Credentials.Expire() was called the credentials Value will be force +// expired, and the next call to Get() will cause them to be refreshed. +func (c *Credentials) Get() (Value, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.isExpired() { + creds, err := c.provider.Retrieve() + if err != nil { + return Value{}, err + } + c.creds = creds + c.forceRefresh = false + } + + return c.creds, nil +} + +// Expire expires the credentials and forces them to be retrieved on the +// next call to Get(). +// +// This will override the Provider's expired state, and force Credentials +// to call the Provider's Retrieve(). +func (c *Credentials) Expire() { + c.m.Lock() + defer c.m.Unlock() + + c.forceRefresh = true +} + +// IsExpired returns if the credentials are no longer valid, and need +// to be retrieved. +// +// If the Credentials were forced to be expired with Expire() this will +// reflect that override. +func (c *Credentials) IsExpired() bool { + c.m.Lock() + defer c.m.Unlock() + + return c.isExpired() +} + +// isExpired helper method wrapping the definition of expired credentials. +func (c *Credentials) isExpired() bool { + return c.forceRefresh || c.provider.IsExpired() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go new file mode 100644 index 0000000000000000000000000000000000000000..96655bc46ae38537e71f243f29dc11f67d7644ae --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -0,0 +1,77 @@ +package credentials + +import ( + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + +var ( + // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be + // found in the process's environment. + // + // @readonly + ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) + + // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key + // can't be found in the process's environment. + // + // @readonly + ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) +) + +// A EnvProvider retrieves credentials from the environment variables of the +// running process. Environment credentials never expire. +// +// Environment variables used: +// +// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY +type EnvProvider struct { + retrieved bool +} + +// NewEnvCredentials returns a pointer to a new Credentials object +// wrapping the environment variable provider. +func NewEnvCredentials() *Credentials { + return NewCredentials(&EnvProvider{}) +} + +// Retrieve retrieves the keys from the environment. +func (e *EnvProvider) Retrieve() (Value, error) { + e.retrieved = false + + id := os.Getenv("AWS_ACCESS_KEY_ID") + if id == "" { + id = os.Getenv("AWS_ACCESS_KEY") + } + + secret := os.Getenv("AWS_SECRET_ACCESS_KEY") + if secret == "" { + secret = os.Getenv("AWS_SECRET_KEY") + } + + if id == "" { + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound + } + + if secret == "" { + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound + } + + e.retrieved = true + return Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, + }, nil +} + +// IsExpired returns if the credentials have been retrieved. +func (e *EnvProvider) IsExpired() bool { + return !e.retrieved +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini new file mode 100644 index 0000000000000000000000000000000000000000..7fc91d9d2047bc69b01fa0364a0cd64daa0c9923 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -0,0 +1,12 @@ +[default] +aws_access_key_id = accessKey +aws_secret_access_key = secret +aws_session_token = token + +[no_token] +aws_access_key_id = accessKey +aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go new file mode 100644 index 0000000000000000000000000000000000000000..7fb7cbf0db00c59220a18bc5d43703742070fe0f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -0,0 +1,151 @@ +package credentials + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-ini/ini" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + +var ( + // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. + // + // @readonly + ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) +) + +// A SharedCredentialsProvider retrieves credentials from the current user's home +// directory, and keeps track if those credentials are expired. +// +// Profile ini file example: $HOME/.aws/credentials +type SharedCredentialsProvider struct { + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" + Filename string + + // AWS Profile to extract credentials from the shared credentials file. If empty + // will default to environment variable "AWS_PROFILE" or "default" if + // environment variable is also not set. + Profile string + + // retrieved states if the credentials have been successfully retrieved. + retrieved bool +} + +// NewSharedCredentials returns a pointer to a new Credentials object +// wrapping the Profile file provider. +func NewSharedCredentials(filename, profile string) *Credentials { + return NewCredentials(&SharedCredentialsProvider{ + Filename: filename, + Profile: profile, + }) +} + +// Retrieve reads and extracts the shared credentials from the current +// users home directory. +func (p *SharedCredentialsProvider) Retrieve() (Value, error) { + p.retrieved = false + + filename, err := p.filename() + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + creds, err := loadProfile(filename, p.profile()) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, err + } + + p.retrieved = true + return creds, nil +} + +// IsExpired returns if the shared credentials have expired. +func (p *SharedCredentialsProvider) IsExpired() bool { + return !p.retrieved +} + +// loadProfiles loads from the file pointed to by shared credentials filename for profile. +// The credentials retrieved from the profile will be returned or error. Error will be +// returned if it fails to read from the file, or the data is invalid. +func loadProfile(filename, profile string) (Value, error) { + config, err := ini.Load(filename) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + } + + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", + fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), + err) + } + + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", + fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), + nil) + } + + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") + + return Value{ + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, + }, nil +} + +// filename returns the filename to use to read AWS shared credentials. +// +// Will return an error if the user's home directory path cannot be found. +func (p *SharedCredentialsProvider) filename() (string, error) { + if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = filepath.Join(homeDir, ".aws", "credentials") + } + + return p.Filename, nil +} + +// profile returns the AWS shared credentials profile. If empty will read +// environment variable "AWS_PROFILE". If that is not set profile will +// return "default". +func (p *SharedCredentialsProvider) profile() string { + if p.Profile == "" { + p.Profile = os.Getenv("AWS_PROFILE") + } + if p.Profile == "" { + p.Profile = "default" + } + + return p.Profile +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go new file mode 100644 index 0000000000000000000000000000000000000000..4f5dab3fcc477ba4214104366159af66b5a59017 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + +var ( + // ErrStaticCredentialsEmpty is emitted when static credentials are empty. + // + // @readonly + ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) +) + +// A StaticProvider is a set of credentials which are set programmatically, +// and will never expire. +type StaticProvider struct { + Value +} + +// NewStaticCredentials returns a pointer to a new Credentials object +// wrapping a static credentials value provider. +func NewStaticCredentials(id, secret, token string) *Credentials { + return NewCredentials(&StaticProvider{Value: Value{ + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, + }}) +} + +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + +// Retrieve returns the credentials or error if the credentials are invalid. +func (s *StaticProvider) Retrieve() (Value, error) { + if s.AccessKeyID == "" || s.SecretAccessKey == "" { + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty + } + + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } + return s.Value, nil +} + +// IsExpired returns if the credentials are expired. +// +// For StaticProvider, the credentials never expired. +func (s *StaticProvider) IsExpired() bool { + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000000000000000000000000000000000000..74f72de07356e6d1f686f0d85d1104a0ad486211 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,133 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000000000000000000000000000000000000..5f0f6dd37a4a00173ccd5643b4be98b1b77620d6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,2094 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + AcmServiceID = "acm" // Acm. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AutoscalingServiceID = "autoscaling" // Autoscaling. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultResolver() Resolver { + return defaultPartitions +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3-ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3-eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3-us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3-us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "http", "https", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + }, + Services: services{ + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "http", "https", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a0e9bc4547154966f31f782f78127bf94647a0f5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.Name) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.Name) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000000000000000000000000000000000000..37e19ab00dfaf829eccd88674b76e27b407698ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,397 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p *Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p *Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id := range p.p.Regions { + rs[id] = Region{ + id: id, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p *Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r *Region) ID() string { return r.id } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r *Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s *Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +func (s *Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e *Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e *Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError. +//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError { +// return EndpointNotFoundError{ +// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil), +// Partition: p, +// Service: s, +// Region: r, +// } +//} +// +//// Error returns string representation of the error. +//func (e EndpointNotFoundError) Error() string { +// extra := fmt.Sprintf("partition: %q, service: %q, region: %q", +// e.Partition, e.Service, e.Region) +// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +//} +// +//// String returns the string representation of the error. +//func (e EndpointNotFoundError) String() string { +// return e.Error() +//} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000000000000000000000000000000000000..13d968a249e382cffc970f3517ff352e3486ee39 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,303 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + if len(signingName) == 0 { + signingName = service + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000000000000000000000000000000000000..1e7369dbfd1c31c5fd40b0000809141d81e8d91c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,334 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Casting the return value of this func to a EnumPartitions will + // allow you to get a list of the partitions in the order the endpoints + // will be resolved in. + // + // resolver := endpoints.DefaultResolver() + // partitions := resolver.(endpoints.EnumPartitions).Partitions() + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultResolver() Resolver { + return defaultPartitions + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..57663616868fc305a6e0e63d79429292600b2045 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 0000000000000000000000000000000000000000..a94f041070a55c0c4ddf854790a0087111931555 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,11 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// values := JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go new file mode 100644 index 0000000000000000000000000000000000000000..db87188e20c7f694233e72a7d0b4d3e7e7b81e9e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -0,0 +1,112 @@ +package aws + +import ( + "log" + "os" +) + +// A LogLevelType defines the level logging should be performed at. Used to instruct +// the SDK which statements should be logged. +type LogLevelType uint + +// LogLevel returns the pointer to a LogLevel. Should be used to workaround +// not being able to take the address of a non-composite literal. +func LogLevel(l LogLevelType) *LogLevelType { + return &l +} + +// Value returns the LogLevel value or the default value LogOff if the LogLevel +// is nil. Safe to use on nil value LogLevelTypes. +func (l *LogLevelType) Value() LogLevelType { + if l != nil { + return *l + } + return LogOff +} + +// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be +// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If +// LogLevel is nill, will default to LogOff comparison. +func (l *LogLevelType) Matches(v LogLevelType) bool { + c := l.Value() + return c&v == v +} + +// AtLeast returns true if this LogLevel is at least high enough to satisfies v. +// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// to LogOff comparison. +func (l *LogLevelType) AtLeast(v LogLevelType) bool { + c := l.Value() + return c >= v +} + +const ( + // LogOff states that no logging should be performed by the SDK. This is the + // default state of the SDK, and should be use to disable all logging. + LogOff LogLevelType = iota * 0x1000 + + // LogDebug state that debug output should be logged by the SDK. This should + // be used to inspect request made and responses received. + LogDebug +) + +// Debug Logging Sub Levels +const ( + // LogDebugWithSigning states that the SDK should log request signing and + // presigning events. This should be used to log the signing details of + // requests for debugging. Will also enable LogDebug. + LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) + + // LogDebugWithHTTPBody states the SDK should log HTTP request and response + // HTTP bodys in addition to the headers and path. This should be used to + // see the body content of requests and responses made while using the SDK + // Will also enable LogDebug. + LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors +) + +// A Logger is a minimalistic interface for the SDK to log messages to. Should +// be used to provide custom logging writers for the SDK to use. +type Logger interface { + Log(...interface{}) +} + +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + +// NewDefaultLogger returns a Logger which will write log messages to stdout, and +// use same formatting runes as the stdlib log.Logger +func NewDefaultLogger() Logger { + return &defaultLogger{ + logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +// A defaultLogger provides a minimalistic logger satisfying the Logger interface. +type defaultLogger struct { + logger *log.Logger +} + +// Log logs the parameters to the stdlib logger. See log.Println. +func (l defaultLogger) Log(args ...interface{}) { + l.logger.Println(args...) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000000000000000000000000000000000000..5279c19c09dd34871e00cfaffcf7ba71577ac566 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,187 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + n.list = append([]NamedHandler{}, l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = []NamedHandler{} +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.list = append(l.list, NamedHandler{"__anonymous", f}) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + l.list = append(l.list, n) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + l.list = append([]NamedHandler{n}, l.list...) +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + newlist := []NamedHandler{} + for _, m := range l.list { + if m.Name != n.Name { + newlist = append(newlist, m) + } + } + l.list = newlist +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000000000000000000000000000000000000..79f79602b03f880fea6d4f504edce708fd9c1702 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000000000000000000000000000000000000..02f07f4a462f70a4373e38ab826ceec30ecfc08d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,58 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000000000000000000000000000000000000..77312bb66558e92b9353662d747e606b0c0098e2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,465 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + + built bool + + // Need to persist an intermideant body betweend the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and rewraps the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// Paginator keeps track of pagination configuration for an API operation. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + + if r.Operation.BeforePresignFn != nil { + r = r.copy() + err := r.Operation.BeforePresignFn(r) + if err != nil { + return "", err + } + } + + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +// ResetBody rewinds the request body backto its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +func (r *Request) ResetBody() { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := computeBodyLength(r.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to compute request body size", err) + return + } + + if l == 0 { + r.HTTPRequest.Body = noBodyReader + } else if l > 0 { + r.HTTPRequest.Body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + r.HTTPRequest.Body = noBodyReader + default: + r.HTTPRequest.Body = r.safeBody + } + } +} + +// Attempts to compute the length of the body of the reader using the +// io.Seeker interface. If the value is not seekable because of being +// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. +// If no error occurs the length of the body will be returned. +func computeBodyLength(r io.ReadSeeker) (int64, error) { + seekable := true + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := r.(type) { + case aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + case *aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + } + if !seekable { + return -1, nil + } + + curOffset, err := r.Seek(0, 1) + if err != nil { + return 0, err + } + + endOffset, err := r.Seek(0, 2) + if err != nil { + return 0, err + } + + _, err = r.Seek(curOffset, 0) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, r.Error) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.UnmarshalError.Run(r) + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, r.Error) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000000000000000000000000000000000000..1323af9007b3d5e919c77460382b636b7066b831 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,21 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// Is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var noBodyReader = noBody{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000000000000000000000000000000000000..8b963f4de27949c84b0246b8cc614807e8065ae1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package request + +import "net/http" + +// Is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var noBodyReader = http.NoBody diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000000000000000000000000000000000000..2939ec473f2bf0360b6c25f46cf3117a109cf6dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,104 @@ +package request + +import ( + "reflect" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +//type Paginater interface { +// HasNextPage() bool +// NextPage() *Request +// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error +//} + +// HasNextPage returns true if this request has more pages of data available. +func (r *Request) HasNextPage() bool { + return len(r.nextPageTokens()) > 0 +} + +// nextPageTokens returns the tokens to use when asking for the next page of +// data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +func (r *Request) NextPage() *Request { + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000000000000000000000000000000000000..ebd60ccc4fb80a9b9702fb545ea39972d2bf6dcc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,102 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +func (r *Request) IsErrorRetryable() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeRetryable(err.Code()) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +func (r *Request) IsErrorThrottle() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeThrottle(err.Code()) + } + } + return false +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +func (r *Request) IsErrorExpired() bool { + if r.Error != nil { + if err, ok := r.Error.(awserr.Error); ok { + return isCodeExpiredCreds(err.Code()) + } + } + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000000000000000000000000000000000000..2520286b75dc169a161061e6de370a2cc0ff05d0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000000000000000000000000000000000000..244c86da0543ac39cef68919be63e17779ef1fbe --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000000000000000000000000000000000000..bd082e9d1f784af980ef9905e9782b7d5bcb0f8a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000000000000000000000000000000000000..98bfe742b3b28407ac8d956c4913ee85338a459c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,740 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "//<hostname>/<path>" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000000000000000000000000000000000000..0e2d864e10a8e9864d21c3104fb37347a8dc4d19 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,118 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go new file mode 100644 index 0000000000000000000000000000000000000000..438506bf482c80e4c4bb4c9597a1c1ca1ff7abea --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -0,0 +1,8 @@ +// Package aws provides core functionality for making requests to AWS services. +package aws + +// SDKName is the name of this AWS SDK +const SDKName = "aws-sdk-go" + +// SDKVersion is the version of this SDK +const SDKVersion = "1.7.9" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000000000000000000000000000000000000..71618356493d43a926627012bf1e4f24d5418c8d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,290 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + case aws.JSONValue: + b, err := json.Marshal(value) + if err != nil { + return "", err + } + if tag.Get("location") == "header" { + str = base64.StdEncoding.EncodeToString(b) + } else { + str = string(b) + } + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go new file mode 100644 index 0000000000000000000000000000000000000000..4366de2e1e8f27af93b230da8f7765cce073f6e6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -0,0 +1,45 @@ +package rest + +import "reflect" + +// PayloadMember returns the payload field member of i if there is one, or nil. +func PayloadMember(i interface{}) interface{} { + if i == nil { + return nil + } + + v := reflect.ValueOf(i).Elem() + if !v.IsValid() { + return nil + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + field, _ := v.Type().FieldByName(payloadName) + if field.Tag.Get("type") != "structure" { + return nil + } + + payload := v.FieldByName(payloadName) + if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { + return payload.Interface() + } + } + } + return nil +} + +// PayloadType returns the type of a payload field member of i if there is one, or "". +func PayloadType(i interface{}) string { + v := reflect.Indirect(reflect.ValueOf(i)) + if !v.IsValid() { + return "" + } + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + if member, ok := v.Type().FieldByName(payloadName); ok { + return member.Tag.Get("type") + } + } + } + return "" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go new file mode 100644 index 0000000000000000000000000000000000000000..77e690ac37a991f492d4289ffb040d360cf8f12f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -0,0 +1,222 @@ +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals the REST component of a response in a REST service. +func Unmarshal(r *request.Request) { + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) + unmarshalLocationElements(r, v) + } +} + +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := v.FieldByName(payloadName) + if payload.IsValid() { + switch payload.Interface().(type) { + case []byte: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + payload.Set(reflect.ValueOf(b)) + } + case *string: + defer r.HTTPResponse.Body.Close() + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + } else { + str := string(b) + payload.Set(reflect.ValueOf(&str)) + } + default: + switch payload.Type().String() { + case "io.ReadCloser": + payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) + default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() + r.Error = awserr.New("SerializationError", + "failed to decode REST response", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } + } +} + +func unmarshalLocationElements(r *request.Request, v reflect.Value) { + for i := 0; i < v.NumField(); i++ { + m, field := v.Field(i), v.Type().Field(i) + if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + + switch field.Tag.Get("location") { + case "statusCode": + unmarshalStatusCode(m, r.HTTPResponse.StatusCode) + case "header": + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + case "headers": + prefix := field.Tag.Get("locationName") + err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + break + } + } + } + if r.Error != nil { + return + } + } +} + +func unmarshalStatusCode(v reflect.Value, statusCode int) { + if !v.IsValid() { + return + } + + switch v.Interface().(type) { + case *int64: + s := int64(statusCode) + v.Set(reflect.ValueOf(&s)) + } +} + +func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { + switch r.Interface().(type) { + case map[string]*string: // we only support string map value types + out := map[string]*string{} + for k, v := range headers { + k = http.CanonicalHeaderKey(k) + if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { + out[k[len(prefix):]] = &v[0] + } + } + r.Set(reflect.ValueOf(out)) + } + return nil +} + +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + if !v.IsValid() || (tag.Get("type") != "jsonvalue" && header == "" && v.Elem().Kind() != reflect.String) { + return nil + } + + switch v.Interface().(type) { + case *string: + v.Set(reflect.ValueOf(&header)) + case []byte: + b, err := base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *bool: + b, err := strconv.ParseBool(header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&b)) + case *int64: + i, err := strconv.ParseInt(header, 10, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&i)) + case *float64: + f, err := strconv.ParseFloat(header, 64) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&f)) + case *time.Time: + t, err := time.Parse(RFC822, header) + if err != nil { + return err + } + v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + b := []byte(header) + var err error + if tag.Get("location") == "header" { + b, err = base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return err + } + return nil +} diff --git a/vendor/github.com/deoxxa/aws_signing_client/LICENSE b/vendor/github.com/deoxxa/aws_signing_client/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4ba073201b993b6b0e280823a101e003ebd593be --- /dev/null +++ b/vendor/github.com/deoxxa/aws_signing_client/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Anthony Atkinson + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/deoxxa/aws_signing_client/README.md b/vendor/github.com/deoxxa/aws_signing_client/README.md new file mode 100644 index 0000000000000000000000000000000000000000..4da11fc46c1517acc53b313f429d09a2751aac31 --- /dev/null +++ b/vendor/github.com/deoxxa/aws_signing_client/README.md @@ -0,0 +1,46 @@ +# AWS Signing Client + +This package provides simple `http.Client` creation that wraps all outgoing HTTP requests with Amazon AWS signatures using the [AWS SDK for Go](https://github.com/aws/aws-sdk-go). + +## Requirements + +In order to use signing graciously provided by [@nicolai86](https://github.com/nikolai86) in the AWS SDK for Go, you must be using a version that has been updated since the merge of [pull request #735](https://github.com/aws/aws-sdk-go/pull/735) for the SDK (tagged release v1.2.0). + +## Usage + +You can provide your own `*http.Client` to have any existing fields persist or your `RoundTripper` wrapped: + +```go +import ( + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/sha1sum/aws_signing_client" +) + +var credentials *credentials.Credentials +// ... set credentials ... +var signer = v4.NewSigner(credentials) + +var myClient *http.Client +// ... + +// *v4.Signer, *http.Client, AWS service abbreviation, AWS region +var awsClient = aws_signing_client.New(signer, myClient, "es", "us-east-1") +``` + +... or you can simply have the default client with default client and transport created for you: + +```go +import ( + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/sha1sum/aws_signing_client" +) + +var credentials *credentials.Credentials +// ... set credentials ... +var signer = v4.NewSigner(credentials) + +// *v4.Signer, *http.Client, AWS service abbreviation, AWS region +var awsClient = aws_signing_client.New(signer, nil, "es", "us-east-1") +``` \ No newline at end of file diff --git a/vendor/github.com/deoxxa/aws_signing_client/client.go b/vendor/github.com/deoxxa/aws_signing_client/client.go new file mode 100644 index 0000000000000000000000000000000000000000..fc6c8e11195b30b5906549a9bc55f82e3afcc35a --- /dev/null +++ b/vendor/github.com/deoxxa/aws_signing_client/client.go @@ -0,0 +1,144 @@ +package aws_signing_client + +import ( + "net/http" + + "strings" + "time" + + "io/ioutil" + "log" + + "bytes" + + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +type ( + // Signer implements the http.RoundTripper interface and houses an optional RoundTripper that will be called between + // signing and response. + Signer struct { + transport http.RoundTripper + v4 *v4.Signer + service string + region string + logger *log.Logger + } + + // MissingSignerError is an implementation of the error interface that indicates that no AWS v4.Signer was + // provided in order to create a client. + MissingSignerError struct{} + + // MissingServiceError is an implementation of the error interface that indicates that no AWS service was + // provided in order to create a client. + MissingServiceError struct{} + + // MissingRegionError is an implementation of the error interface that indicates that no AWS region was + // provided in order to create a client. + MissingRegionError struct{} +) + +var signer *Signer + +// New obtains an HTTP client with a RoundTripper that signs AWS requests for the provided service. An +// existing client can be specified for the `client` value, or--if nil--a new HTTP client will be created. +func New(v4s *v4.Signer, client *http.Client, service string, region string) (*http.Client, error) { + c := client + switch { + case v4s == nil: + return nil, MissingSignerError{} + case service == "": + return nil, MissingServiceError{} + case region == "": + return nil, MissingRegionError{} + case c == nil: + c = http.DefaultClient + } + s := &Signer{ + transport: c.Transport, + v4: v4s, + service: service, + region: region, + logger: log.New(ioutil.Discard, "", 0), + } + if s.transport == nil { + s.transport = http.DefaultTransport + } + signer = s + c.Transport = s + return c, nil +} + +// SetDebugLog sets a logger for use in debugging requests and responses. +func SetDebugLog(l *log.Logger) { + signer.logger = l +} + +// RoundTrip implements the http.RoundTripper interface and is used to wrap HTTP requests in order to sign them for AWS +// API calls. The scheme for all requests will be changed to HTTPS. +func (s *Signer) RoundTrip(req *http.Request) (*http.Response, error) { + if h, ok := req.Header["Authorization"]; ok && len(h) > 0 && strings.HasPrefix(h[0], "AWS4") { + s.logger.Println("Received request to sign that is already signed. Skipping.") + return s.transport.RoundTrip(req) + } + s.logger.Printf("Receiving request for signing: %+v", req) + req.URL.Scheme = "https" + if strings.Contains(req.URL.RawPath, "%2C") { + s.logger.Printf("Escaping path for URL path '%s'", req.URL.RawPath) + req.URL.RawPath = rest.EscapePath(req.URL.RawPath, false) + } + t := time.Now() + req.Header.Set("Date", t.Format(time.RFC3339)) + s.logger.Printf("Final request to be signed: %+v", req) + var err error + switch req.Body { + case nil: + s.logger.Println("Signing request with no body...") + _, err = s.v4.Sign(req, nil, s.service, s.region, t) + default: + d, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + req.Body = ioutil.NopCloser(bytes.NewReader(d)) + s.logger.Println("Signing request with body...") + _, err = s.v4.Sign(req, bytes.NewReader(d), s.service, s.region, t) + } + if err != nil { + s.logger.Printf("Error while attempting to sign request: '%s'", err) + return nil, err + } + s.logger.Printf("Signing succesful. Set header to: '%+v'", req.Header) + s.logger.Printf("Sending signed request to RoundTripper: %+v", req) + resp, err := s.transport.RoundTrip(req) + if err != nil { + s.logger.Printf("Error from RoundTripper.\n\n\tResponse: %+v\n\n\tError: '%s'", resp, err) + return resp, err + } + respBody := "<nil>" + if resp.Body != nil { + defer resp.Body.Close() + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + respBody = buf.String() + resp.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + } + s.logger.Printf("Successful response from RoundTripper: %+v\n\nBody: '%s'\n", resp, respBody) + return resp, nil +} + +// Error implements the error interface. +func (err MissingSignerError) Error() string { + return "No signer was provided. Cannot create client." +} + +// Error implements the error interface. +func (err MissingServiceError) Error() string { + return "No AWS service abbreviation was provided. Cannot create client." +} + +// Error implements the error interface. +func (err MissingRegionError) Error() string { + return "No AWS region was provided. Cannot create client." +} diff --git a/vendor/github.com/deoxxa/aws_signing_client/doc.go b/vendor/github.com/deoxxa/aws_signing_client/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..58e1959cde6b18e083904f9cf6aeeac3b7f5be11 --- /dev/null +++ b/vendor/github.com/deoxxa/aws_signing_client/doc.go @@ -0,0 +1,4 @@ +// Package aws_signing_client returns an HTTP client for use with AWS services that require signatures but are not +// supported by the official AWS SDK for Go, such as the Amazon Elasticsearch Service, that signs requests before +// sending. +package aws_signing_client diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..37ec93a14fdcd0d6e525d97c0cfa6b314eaa98d8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..ac034e5258f442e810ed65478712aaa7bb3cd5b1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 0000000000000000000000000000000000000000..85947422d70e74c91e86cf1bee112d1748ddae42 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,740 @@ +INI [](https://travis-ci.org/go-ini/ini) [](https://sourcegraph.com/github.com/go-ini/ini?badge) +=== + + + +Package ini provides INI file read and write functionality in Go. + +[简体ä¸æ–‡](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1> +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 0000000000000000000000000000000000000000..163432db9affe5170ea7e63eb23cdbf62ae33f45 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,727 @@ +本包æ供了 Go è¯è¨€ä¸è¯»å†™ INI 文件的功能。 + +## 功能特性 + +- 支æŒè¦†ç›–åŠ è½½å¤šä¸ªæ•°æ®æºï¼ˆ`[]byte`ã€æ–‡ä»¶å’Œ `io.ReadCloser`) +- 支æŒé€’归读å–键值 +- 支æŒè¯»å–父å分区 +- 支æŒè¯»å–自增键å +- 支æŒè¯»å–多行的键值 +- 支æŒå¤§é‡è¾…助方法 +- 支æŒåœ¨è¯»å–时直接转æ¢ä¸º Go è¯è¨€ç±»åž‹ +- 支æŒè¯»å–å’Œ **写入** 分区和键的注释 +- è½»æ¾æ“作分区ã€é”®å€¼å’Œæ³¨é‡Š +- 在ä¿å˜æ–‡ä»¶æ—¶åˆ†åŒºå’Œé”®å€¼ä¼šä¿æŒåŽŸæœ‰çš„é¡ºåº + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +å¦‚éœ€æ›´æ–°è¯·æ·»åŠ `-u` 选项。 + +### 测试安装 + +如果您想è¦åœ¨è‡ªå·±çš„机器上è¿è¡Œæµ‹è¯•ï¼Œè¯·ä½¿ç”¨ `-t` æ ‡è®°ï¼š + + go get -t gopkg.in/ini.v1 + +å¦‚éœ€æ›´æ–°è¯·æ·»åŠ `-u` 选项。 + +## 开始使用 + +### 从数æ®æºåŠ è½½ + +一个 **æ•°æ®æº** å¯ä»¥æ˜¯ `[]byte` 类型的原始数æ®ï¼Œ`string` 类型的文件路径或 `io.ReadCloser`。您å¯ä»¥åŠ è½½ **ä»»æ„多个** æ•°æ®æºã€‚å¦‚æžœæ‚¨ä¼ é€’å…¶å®ƒç±»åž‹çš„æ•°æ®æºï¼Œåˆ™ä¼šç›´æŽ¥è¿”回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +å½“æ‚¨åœ¨ä¸€å¼€å§‹æ— æ³•å†³å®šéœ€è¦åŠ 载哪些数æ®æºæ—¶ï¼Œä»å¯ä»¥ä½¿ç”¨ **Append()** 在需è¦çš„æ—¶å€™åŠ è½½å®ƒä»¬ã€‚ + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想è¦åŠ 载一系列文件,但是ä¸èƒ½å¤Ÿç¡®å®šå…¶ä¸å“ªäº›æ–‡ä»¶æ˜¯ä¸å˜åœ¨çš„,å¯ä»¥é€šè¿‡è°ƒç”¨å‡½æ•° `LooseLoad` æ¥å¿½ç•¥å®ƒä»¬ï¼ˆ`Load` ä¼šå› ä¸ºæ–‡ä»¶ä¸å˜åœ¨è€Œè¿”回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之å‰ä¸å˜åœ¨çš„文件在é‡æ–°è°ƒç”¨ `Reload` 方法的时候çªç„¶å‡ºçŽ°äº†ï¼Œé‚£ä¹ˆå®ƒä»¬ä¼šè¢«æ£å¸¸åŠ 载。 + +#### 忽略键å的大å°å†™ + +有时候分区和键的å称大å°å†™æ··åˆéžå¸¸çƒ¦äººï¼Œè¿™ä¸ªæ—¶å€™å°±å¯ä»¥é€šè¿‡ `InsensitiveLoad` 将所有分区和键å在读å–里强制转æ¢ä¸ºå°å†™ï¼š + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 å’Œ sec2 指å‘åŒä¸€ä¸ªåˆ†åŒºå¯¹è±¡ +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 å’Œ key2 指å‘åŒä¸€ä¸ªé”®å¯¹è±¡ +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL é…ç½®ä¸çš„布尔值键 + +MySQL çš„é…置文件ä¸ä¼šå‡ºçŽ°æ²¡æœ‰å…·ä½“值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +é»˜è®¤æƒ…å†µä¸‹è¿™è¢«è®¤ä¸ºæ˜¯ç¼ºå¤±å€¼è€Œæ— æ³•å®Œæˆè§£æžï¼Œä½†å¯ä»¥é€šè¿‡é«˜çº§çš„åŠ è½½é€‰é¡¹å¯¹å®ƒä»¬è¿›è¡Œå¤„ç†ï¼š + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在ä¿å˜åˆ°æ–‡ä»¶æ—¶ä¹Ÿåªä¼šè¾“出键å。 + +如果您想è¦é€šè¿‡ç¨‹åºæ¥ç”Ÿæˆæ¤ç±»é”®ï¼Œåˆ™å¯ä»¥ä½¿ç”¨ `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +ä¸‹è¿°å‡ ç§æƒ…况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之åŽçš„内容 +3. åˆ†åŒºæ ‡ç¾åŽçš„æ–‡å— (å³ `[分区å]` 之åŽçš„内容) + +å¦‚æžœä½ å¸Œæœ›ä½¿ç”¨åŒ…å« `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +### æ“作分区(Section) + +获å–指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想è¦èŽ·å–默认分区,则å¯ä»¥ç”¨ç©ºå—符串代替分区å: + +```go +section, err := cfg.GetSection("") +``` + +当您éžå¸¸ç¡®å®šæŸä¸ªåˆ†åŒºæ˜¯å˜åœ¨çš„,å¯ä»¥ä½¿ç”¨ä»¥ä¸‹ç®€ä¾¿æ–¹æ³•ï¼š + +```go +section := cfg.Section("section name") +``` + +如果ä¸å°å¿ƒåˆ¤æ–错了,è¦èŽ·å–的分区其实是ä¸å˜åœ¨çš„,那会å‘生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获å–所有分区对象或å称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### æ“作键(Key) + +获å–æŸä¸ªåˆ†åŒºä¸‹çš„键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +å’Œåˆ†åŒºä¸€æ ·ï¼Œæ‚¨ä¹Ÿå¯ä»¥ç›´æŽ¥èŽ·å–键而忽略错误处ç†ï¼š + +```go +key := cfg.Section("").Key("key name") +``` + +判æ–æŸä¸ªé”®æ˜¯å¦å˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获å–分区下的所有键或键å: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获å–分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### æ“作键值(Value) + +获å–一个类型为å—符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获å–值的åŒæ—¶é€šè¿‡è‡ªå®šä¹‰å‡½æ•°è¿›è¡Œå¤„ç†éªŒè¯ï¼š + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您ä¸éœ€è¦ä»»ä½•å¯¹å€¼çš„自动转å˜åŠŸèƒ½ï¼ˆä¾‹å¦‚递归读å–),å¯ä»¥ç›´æŽ¥èŽ·å–原值(这ç§æ–¹å¼æ€§èƒ½æœ€ä½³ï¼‰ï¼š + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判æ–æŸä¸ªåŽŸå€¼æ˜¯å¦å˜åœ¨ï¼š + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获å–其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// ç”± Must 开头的方法åå…许接收一个相åŒç±»åž‹çš„å‚æ•°æ¥ä½œä¸ºé»˜è®¤å€¼ï¼Œ +// 当键ä¸å˜åœ¨æˆ–者转æ¢å¤±è´¥æ—¶ï¼Œåˆ™ä¼šç›´æŽ¥è¿”回该默认值。 +// 但是,MustString æ–¹æ³•å¿…é¡»ä¼ é€’ä¸€ä¸ªé»˜è®¤å€¼ã€‚ + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +å—¯å“¼ï¼Ÿå° caseï¼ + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了ï¼é‚£è¦æ˜¯æˆ‘属于一行的内容写ä¸ä¸‹æƒ³è¦å†™åˆ°ç¬¬äºŒè¡Œæ€Žä¹ˆåŠžï¼Ÿ + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是å°èœä¸€ç¢Ÿï¼ + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +å¯æ˜¯æˆ‘有时候觉得两行连在一起特别没劲,怎么æ‰èƒ½ä¸è‡ªåŠ¨è¿žæŽ¥ä¸¤è¡Œå‘¢ï¼Ÿ + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇é ç»™åŠ›å•Šï¼ + +需è¦æ³¨æ„的是,值两侧的å•å¼•å·ä¼šè¢«è‡ªåŠ¨å‰”除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然ä¸æ˜¯ã€‚ + +#### æ“作键值的辅助方法 + +获å–键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获å–到的值ä¸æ˜¯å€™é€‰å€¼çš„ä»»æ„一个,则会返回默认值,而默认值ä¸éœ€è¦æ˜¯å€™é€‰å€¼ä¸çš„一员。 + +验è¯èŽ·å–的值是å¦åœ¨æŒ‡å®šèŒƒå›´å†…: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当å˜åœ¨æ— 效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片ä¸å‰”é™¤æ— æ•ˆè¾“å…¥ï¼š + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当å˜åœ¨æ— 效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### ä¿å˜é…ç½® + +终于到了这个时刻,是时候ä¿å˜ä¸€ä¸‹é…置了。 + +比较原始的åšæ³•æ˜¯è¾“出é…置到æŸä¸ªæ–‡ä»¶ï¼š + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +å¦ä¸€ä¸ªæ¯”较高级的åšæ³•æ˜¯å†™å…¥åˆ°ä»»ä½•å®žçŽ° `io.Writer` 接å£çš„对象ä¸ï¼š + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +é»˜è®¤æƒ…å†µä¸‹ï¼Œç©ºæ ¼å°†è¢«ç”¨äºŽå¯¹é½é”®å€¼ä¹‹é—´çš„ç‰å·ä»¥ç¾ŽåŒ–输出结果,以下代ç å¯ä»¥ç¦ç”¨è¯¥åŠŸèƒ½ï¼š + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读å–键值 + +在获å–所有键值的过程ä¸ï¼Œç‰¹æ®Šè¯æ³• `%(<name>)s` ä¼šè¢«åº”ç”¨ï¼Œå…¶ä¸ `<name>` å¯ä»¥æ˜¯ç›¸åŒåˆ†åŒºæˆ–者默认分区下的键å。å—符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键ä¸å˜åœ¨ï¼Œåˆ™ä¼šç”¨ç©ºå—符串替代。您å¯ä»¥æœ€å¤šä½¿ç”¨ 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读å–父å分区 + +您å¯ä»¥åœ¨åˆ†åŒºå称ä¸ä½¿ç”¨ `.` æ¥è¡¨ç¤ºä¸¤ä¸ªæˆ–多个分区之间的父å关系。如果æŸä¸ªé”®åœ¨å分区ä¸ä¸å˜åœ¨ï¼Œåˆ™ä¼šåŽ»å®ƒçš„父分区ä¸å†æ¬¡å¯»æ‰¾ï¼Œç›´åˆ°æ²¡æœ‰çˆ¶åˆ†åŒºä¸ºæ¢ã€‚ + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获å–上级父分区下的所有键å + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### æ— æ³•è§£æžçš„分区 + +如果é‡åˆ°ä¸€äº›æ¯”较特殊的分区,它们ä¸åŒ…å«å¸¸è§çš„é”®å€¼å¯¹ï¼Œè€Œæ˜¯æ²¡æœ‰å›ºå®šæ ¼å¼çš„纯文本,则å¯ä»¥ä½¿ç”¨ `LoadOptions.UnparsableSections` 进行处ç†ï¼š + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1> +------ end --- */ +``` + +### 读å–自增键å + +如果数æ®æºä¸çš„é”®å为 `-`,则认为该键使用了自增键å的特殊è¯æ³•ã€‚计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### æ˜ å°„åˆ°ç»“æž„ + +想è¦ä½¿ç”¨æ›´åŠ é¢å‘对象的方å¼çŽ©è½¬ INI å—?好主æ„。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟å¯ä»¥å¦‚æ¤çš„简å•ã€‚ + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?åªéœ€è¦æ˜ 射一个分区å—? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的å—段怎么设置默认值呢?很简å•ï¼Œåªè¦åœ¨æ˜ 射之å‰å¯¹æŒ‡å®šå—段进行赋值就å¯ä»¥äº†ã€‚如果键未找到或者类型错误,该值ä¸ä¼šå‘生改å˜ã€‚ + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +è¿™æ ·çŽ© INI 真的好酷啊ï¼ç„¶è€Œï¼Œå¦‚æžœä¸èƒ½è¿˜ç»™æˆ‘原æ¥çš„é…置文件,有什么åµç”¨ï¼Ÿ + +### 从结构åå°„ + +å¯æ˜¯ï¼Œæˆ‘有说ä¸èƒ½å—? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹å‘生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### åç§°æ˜ å°„å™¨ï¼ˆName Mapper) + +为了节çœæ‚¨çš„时间并简化代ç ,本库支æŒç±»åž‹ä¸º [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) çš„åç§°æ˜ å°„å™¨ï¼Œè¯¥æ˜ å°„å™¨è´Ÿè´£ç»“æž„å—段å与分区å和键åä¹‹é—´çš„æ˜ å°„ã€‚ + +ç›®å‰æœ‰ 2 æ¬¾å†…ç½®çš„æ˜ å°„å™¨ï¼š + +- `AllCapsUnderscore`ï¼šè¯¥æ˜ å°„å™¨å°†å—段å转æ¢è‡³æ ¼å¼ `ALL_CAPS_UNDERSCORE` åŽå†åŽ»åŒ¹é…分区å和键å。 +- `TitleUnderscore`ï¼šè¯¥æ˜ å°„å™¨å°†å—段å转æ¢è‡³æ ¼å¼ `title_underscore` åŽå†åŽ»åŒ¹é…分区å和键å。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也å¯åº”用相åŒçš„规则。 + +#### å€¼æ˜ å°„å™¨ï¼ˆValue Mapper) + +å€¼æ˜ å°„å™¨å…许使用一个自定义函数自动展开值的具体内容,例如:è¿è¡Œæ—¶èŽ·å–环境å˜é‡ï¼š + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例ä¸ï¼Œ`env.Foo` 将会是è¿è¡Œæ—¶æ‰€èŽ·å–到环境å˜é‡ `MY_VAR` 的值。 + +#### æ˜ å°„/å射的其它说明 + +任何嵌入的结构都会被默认认作一个ä¸åŒçš„分区,并且ä¸ä¼šè‡ªåŠ¨äº§ç”Ÿæ‰€è°“的父å分区关è”: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是è¦åµŒå…¥ç»“构也在åŒä¸€ä¸ªåˆ†åŒºã€‚好å§ï¼Œä½ 爹是æŽåˆšï¼ + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例é…置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获å–帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工å•](https://github.com/go-ini/ini/issues/new) + +## 常è§é—®é¢˜ + +### å—段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写æ“作时采用é”机制æ¥ç¡®ä¿æ•°æ®æ—¶é—´ã€‚但在æŸäº›æƒ…况下,您éžå¸¸ç¡®å®šåªè¿›è¡Œè¯»æ“作。æ¤æ—¶ï¼Œæ‚¨å¯ä»¥é€šè¿‡è®¾ç½® `cfg.BlockMode = false` æ¥å°†è¯»æ“作æå‡å¤§çº¦ **50-70%** 的性能。 + +### 为什么è¦å†™å¦ä¸€ä¸ª INI 解æžåº“? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) æ¥å®Œæˆå¯¹ INI 文件的æ“ä½œï¼Œä½†æˆ‘å¸Œæœ›ä½¿ç”¨æ›´åŠ Go é£Žæ ¼çš„ä»£ç 。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能æå‡ã€‚ + +为了åšå‡ºè¿™äº›æ”¹å˜ï¼Œæˆ‘必须对 API è¿›è¡Œç ´å,所以新开一个仓库是最安全的åšæ³•ã€‚除æ¤ä¹‹å¤–,本库直接使用 `gopkg.in` æ¥è¿›è¡Œç‰ˆæœ¬åŒ–å‘布。(其实真相是导入路径更çŸäº†ï¼‰ diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 0000000000000000000000000000000000000000..80afe7431584aa2eb208a6b99f035a1dcc2b005f --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000000000000000000000000000000000000..68d73aa750c7f3956ac0b9883d94608b509704e8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,549 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.25.4" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // AllowShadows indicates whether to keep track of keys with same name under same section. + AllowShadows bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it allows have shadow keys. +func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{AllowShadows: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + KEY_LIST: + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + + for _, val := range key.ValueWithShadows() { + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue KEY_LIST + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000000000000000000000000000000000000..852696f4c4fa68f4014018d0b9d003a5a580b88e --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,703 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + isShadow bool + shadows []*Key + + Comment string +} + +// newKey simply return a key object with given values. +func newKey(s *Section, name, val string) *Key { + return &Key{ + s: s, + name: name, + value: val, + } +} + +func (k *Key) addShadow(val string) error { + if k.isShadow { + return errors.New("cannot add shadow to another shadow key") + } else if k.isAutoIncrement || k.isBooleanType { + return errors.New("cannot add shadow to auto-increment or boolean key") + } + + shadow := newKey(k.s, k.name, val) + shadow.isShadow = true + k.shadows = append(k.shadows, shadow) + return nil +} + +// AddShadow adds a new shadow key to itself. +func (k *Key) AddShadow(val string) error { + if !k.s.f.options.AllowShadows { + return errors.New("shadow key is not allowed") + } + return k.addShadow(val) +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// ValueWithShadows returns raw values of key and its shadows if any. +func (k *Key) ValueWithShadows() []string { + if len(k.shadows) == 0 { + return []string{k.value} + } + vals := make([]string, len(k.shadows)+1) + vals[0] = k.value + for i := range k.shadows { + vals[i+1] = k.shadows[i].value + } + return vals +} + +// transformValue takes a raw value and transforms to its final string. +func (k *Key) transformValue(val string) string { + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + + // Fail-fast if no indicate char found for recursive value + if !strings.Contains(val, "%") { + return val + } + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// String returns string representation of value. +func (k *Key) String() string { + return k.transformValue(k.value) +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + // vals[i] = k.transformValue(strings.TrimSpace(vals[i])) + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// StringsWithShadows returns list of string divided by given delimiter. +// Shadows will also be appended if any. +func (k *Key) StringsWithShadows(delim string) []string { + vals := k.ValueWithShadows() + results := make([]string, 0, len(vals)*2) + for i := range vals { + if len(vals) == 0 { + continue + } + + results = append(results, strings.Split(vals[i], delim)...) + } + + for i := range results { + results[i] = k.transformValue(strings.TrimSpace(results[i])) + } + return results +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.parseInts(k.Strings(delim), false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.parseInt64s(k.Strings(delim), false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.parseInts(k.Strings(delim), false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.parseInt64s(k.Strings(delim), false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInts transforms strings to ints. +func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// parseInt64s transforms strings to int64s. +func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..673ef80ca2674a0d81c95ade1f0d51ecc40a8de4 --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,358 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + + key, err := section.NewKey(kname, value) + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000000000000000000000000000000000000..c9fa27e9ca7c6c6260afefd41416e1d21e5814f1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,234 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + if s.f.options.AllowShadows { + if err := s.keys[name].addShadow(val); err != nil { + return nil, err + } + } else { + s.keys[name].value = val + } + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = newKey(s, name, val) + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000000000000000000000000000000000000..509c682fa6df41a2b83bfff193a4b7f18daaaa24 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,450 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error { + var strs []string + if allowShadow { + strs = key.StringsWithShadows(delim) + } else { + strs = key.Strings(delim) + } + + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals, _ = key.parseInts(strs, true, false) + case reflect.Int64: + vals, _ = key.parseInt64s(strs, true, false) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim, allowShadow) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { + opts := strings.SplitN(tag, ",", 3) + rawName = opts[0] + if len(opts) > 1 { + omitEmpty = opts[1] == "omitempty" + } + if len(opts) > 2 { + allowShadow = opts[2] == "allowshadow" + } + return rawName, omitEmpty, allowShadow +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + rawName, _, allowShadow := parseTagOptions(tag) + fieldName := s.parseFieldName(tpField.Name, rawName) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + delim := parseDelim(tpField.Tag.Get("delim")) + if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b03310a91fde044484bc7ddc33d06a28ccc97f2a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a828d2848f0d0d232222b0ca24d4ce155614c59f --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make <target>' where <target> is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000000000000000000000000000000000000..187ef676dc9c724994bd75c6c4127097226704b8 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000000000000000000000000000000000000..9cfa988bc5b8a28eb2765851aaae8f6a83d36fd9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000000000000000000000000000000000000..1cd2d239c969d9d7914842ce8998138c5c9f37c6 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000000000000000000000000000000000000..9b7cd89b4bcca44937d4cc762a3b308d4e220b18 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000000000000000000000000000000000000..13c74604c2c8eec42d46d439da89d7cb1b7270b9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000000000000000000000000000000000000..817900c8f529672ff94fbad4e8dd26974474be9d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..1240a17552157f5d97250bc24e9d31fc59da5ff0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000000000000000000000000000000000000..dae79cbdf338eed89458da2d6fe4780d242efae9 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000000000000000000000000000000000000..ddc1b7d7d46003aa6972ffb72e382cbd4b91589c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/vendor/github.com/lupine/icu/LICENCE b/vendor/github.com/lupine/icu/LICENCE new file mode 100644 index 0000000000000000000000000000000000000000..ac188702ec83fa51ebc7f5856bae2b7286b31ac5 --- /dev/null +++ b/vendor/github.com/lupine/icu/LICENCE @@ -0,0 +1,9 @@ +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT +SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT +OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, +EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/lupine/icu/LICENCE_icu b/vendor/github.com/lupine/icu/LICENCE_icu new file mode 100644 index 0000000000000000000000000000000000000000..dab93c1a7fc95a2293e423b911df948a7ecdaa5a --- /dev/null +++ b/vendor/github.com/lupine/icu/LICENCE_icu @@ -0,0 +1,307 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" + "http://www.w3.org/TR/html4/loose.dtd"> +<html> +<head> +<meta http-equiv="Content-Type" content="text/html;charset=UTF-8"> +<title>ICU License - ICU 1.8.1 and later</title> +</head> + +<body BGCOLOR="#ffffff"> +<h2>ICU License - ICU 1.8.1 and later</h2> + +<p>COPYRIGHT AND PERMISSION NOTICE</p> + +<p> +Copyright (c) 1995-2012 International Business Machines Corporation and others +</p> +<p> +All rights reserved. +</p> +<p> +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, and/or sell +copies of the Software, and to permit persons +to whom the Software is furnished to do so, provided that the above +copyright notice(s) and this permission notice appear in all copies +of the Software and that both the above copyright notice(s) and this +permission notice appear in supporting documentation. +</p> +<p> +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL +THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, +OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER +RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE +USE OR PERFORMANCE OF THIS SOFTWARE. +</p> +<p> +Except as contained in this notice, the name of a copyright holder shall not be +used in advertising or otherwise to promote the sale, use or other dealings in +this Software without prior written authorization of the copyright holder. +</p> + +<hr style="color:gray;background-color:gray"> +<p><small> +All trademarks and registered trademarks mentioned herein are the property of their respective owners. +</small></p> + +<hr style="height:3px;color:black;background-color:black"> + +<h2>Third-Party Software Licenses</h2> +This section contains third-party software notices and/or additional terms for licensed +third-party software components included within ICU libraries. + +<h3>1. Unicode Data Files and Software</h3> + +<h3 align="center"><a name="Exhibit1">EXHIBIT 1</a><br> +UNICODE, INC. LICENSE AGREEMENT - DATA FILES AND SOFTWARE</h3> +<blockquote> +<p>Unicode Data Files include all data files under the directories +<a href="http://www.unicode.org/Public/">http://www.unicode.org/Public/</a>, +<a href="http://www.unicode.org/reports/">http://www.unicode.org/reports/</a>, +and +<a title="http://www.unicode.org/cldr/data/" onClick="return top.js.OpenExtLink(window,event,this)" target="_blank" href="http://www.unicode.org/cldr/data/"> +http://www.unicode.org/cldr/data/</a>. Unicode Data Files do not include PDF online code charts under the directory <a href="http://www.unicode.org/Public/">http://www.unicode.org/Public/</a>. Software includes any source code +published in the Unicode Standard or under the directories <a href="http://www.unicode.org/Public/">http://www.unicode.org/Public/</a>, +<a href="http://www.unicode.org/reports/">http://www.unicode.org/reports/</a>, +and +<a title="http://www.unicode.org/cldr/data/" onClick="return top.js.OpenExtLink(window,event,this)" target="_blank" href="http://www.unicode.org/cldr/data/"> +http://www.unicode.org/cldr/data/</a>.</p> + +<p>NOTICE TO USER: Carefully read the following legal agreement. BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE THE DATA FILES OR SOFTWARE.</p> +<p>COPYRIGHT AND PERMISSION NOTICE</p> + +<p>Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the Terms of Use in +<a href="http://www.unicode.org/copyright.html">http://www.unicode.org/copyright.html</a>.</p> + +<p>Permission is hereby granted, free of charge, to any person obtaining a copy of the Unicode data files and +any associated documentation (the "Data Files") or Unicode software and any associated documentation (the "Software") to deal in the Data Files or Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, and/or sell copies of the Data Files or Software, and to permit persons to whom the Data Files or Software are furnished to do so, provided that (a) the above copyright notice(s) and this permission notice appear +with all copies of the Data Files or Software, (b) both the above copyright notice(s) and this permission notice appear in associated documentation, and (c) there is clear notice in each modified Data File or in the Software as well as in the documentation associated with the Data File(s) or Software that the data or software has been modified.</p> + +<p>THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE.</p> + +<p>Except as contained in this notice, the name of a copyright holder shall not be used in advertising or otherwise to promote the sale, use or other dealings in these Data Files or Software without prior written authorization of the copyright holder.</p> + + <hr width="80%"> + +<p>Unicode and the Unicode logo are trademarks of Unicode, Inc. in the United States and other countries. All third party trademarks referenced herein are the property of their respective owners.</p> + + +</blockquote> + +<h3>2. Chinese/Japanese Word Break Dictionary Data (cjdict.txt)</h3> +<pre> + # The Google Chrome software developed by Google is licensed under the BSD license. Other software included in this distribution is provided under other licenses, as set forth below. + # + # The BSD License + # http://opensource.org/licenses/bsd-license.php + # Copyright (C) 2006-2008, Google Inc. + # + # All rights reserved. + # + # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + # + # Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + # Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + # Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + # + # + # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + # + # + # The word list in cjdict.txt are generated by combining three word lists listed + # below with further processing for compound word breaking. The frequency is generated + # with an iterative training against Google web corpora. + # + # * Libtabe (Chinese) + # - https://sourceforge.net/project/?group_id=1519 + # - Its license terms and conditions are shown below. + # + # * IPADIC (Japanese) + # - http://chasen.aist-nara.ac.jp/chasen/distribution.html + # - Its license terms and conditions are shown below. + # + # ---------COPYING.libtabe ---- BEGIN-------------------- + # + # /* + # * Copyrighy (c) 1999 TaBE Project. + # * Copyright (c) 1999 Pai-Hsiang Hsiao. + # * All rights reserved. + # * + # * Redistribution and use in source and binary forms, with or without + # * modification, are permitted provided that the following conditions + # * are met: + # * + # * . Redistributions of source code must retain the above copyright + # * notice, this list of conditions and the following disclaimer. + # * . Redistributions in binary form must reproduce the above copyright + # * notice, this list of conditions and the following disclaimer in + # * the documentation and/or other materials provided with the + # * distribution. + # * . Neither the name of the TaBE Project nor the names of its + # * contributors may be used to endorse or promote products derived + # * from this software without specific prior written permission. + # * + # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + # * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + # * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + # * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + # * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + # * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + # * OF THE POSSIBILITY OF SUCH DAMAGE. + # */ + # + # /* + # * Copyright (c) 1999 Computer Systems and Communication Lab, + # * Institute of Information Science, Academia Sinica. + # * All rights reserved. + # * + # * Redistribution and use in source and binary forms, with or without + # * modification, are permitted provided that the following conditions + # * are met: + # * + # * . Redistributions of source code must retain the above copyright + # * notice, this list of conditions and the following disclaimer. + # * . Redistributions in binary form must reproduce the above copyright + # * notice, this list of conditions and the following disclaimer in + # * the documentation and/or other materials provided with the + # * distribution. + # * . Neither the name of the Computer Systems and Communication Lab + # * nor the names of its contributors may be used to endorse or + # * promote products derived from this software without specific + # * prior written permission. + # * + # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + # * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + # * REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + # * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + # * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + # * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + # * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + # * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED + # * OF THE POSSIBILITY OF SUCH DAMAGE. + # */ + # + # Copyright 1996 Chih-Hao Tsai @ Beckman Institute, University of Illinois + # c-tsai4@uiuc.edu http://casper.beckman.uiuc.edu/~c-tsai4 + # + # ---------------COPYING.libtabe-----END------------------------------------ + # + # + # ---------------COPYING.ipadic-----BEGIN------------------------------------ + # + # Copyright 2000, 2001, 2002, 2003 Nara Institute of Science + # and Technology. All Rights Reserved. + # + # Use, reproduction, and distribution of this software is permitted. + # Any copy of this software, whether in its original form or modified, + # must include both the above copyright notice and the following + # paragraphs. + # + # Nara Institute of Science and Technology (NAIST), + # the copyright holders, disclaims all warranties with regard to this + # software, including all implied warranties of merchantability and + # fitness, in no event shall NAIST be liable for + # any special, indirect or consequential damages or any damages + # whatsoever resulting from loss of use, data or profits, whether in an + # action of contract, negligence or other tortuous action, arising out + # of or in connection with the use or performance of this software. + # + # A large portion of the dictionary entries + # originate from ICOT Free Software. The following conditions for ICOT + # Free Software applies to the current dictionary as well. + # + # Each User may also freely distribute the Program, whether in its + # original form or modified, to any third party or parties, PROVIDED + # that the provisions of Section 3 ("NO WARRANTY") will ALWAYS appear + # on, or be attached to, the Program, which is distributed substantially + # in the same form as set out herein and that such intended + # distribution, if actually made, will neither violate or otherwise + # contravene any of the laws and regulations of the countries having + # jurisdiction over the User or the intended distribution itself. + # + # NO WARRANTY + # + # The program was produced on an experimental basis in the course of the + # research and development conducted during the project and is provided + # to users as so produced on an experimental basis. Accordingly, the + # program is provided without any warranty whatsoever, whether express, + # implied, statutory or otherwise. The term "warranty" used herein + # includes, but is not limited to, any warranty of the quality, + # performance, merchantability and fitness for a particular purpose of + # the program and the nonexistence of any infringement or violation of + # any right of any third party. + # + # Each user of the program will agree and understand, and be deemed to + # have agreed and understood, that there is no warranty whatsoever for + # the program and, accordingly, the entire risk arising from or + # otherwise connected with the program is assumed by the user. + # + # Therefore, neither ICOT, the copyright holder, or any other + # organization that participated in or was otherwise related to the + # development of the program and their respective officials, directors, + # officers and other employees shall be held liable for any and all + # damages, including, without limitation, general, special, incidental + # and consequential damages, arising out of or otherwise in connection + # with the use or inability to use the program or any product, material + # or result produced or otherwise obtained by using the program, + # regardless of whether they have been advised of, or otherwise had + # knowledge of, the possibility of such damages at any time during the + # project or thereafter. Each user will be deemed to have agreed to the + # foregoing by his or her commencement of use of the program. The term + # "use" as used herein includes, but is not limited to, the use, + # modification, copying and distribution of the program and the + # production of secondary products from the program. + # + # In the case where the program, whether in its original form or + # modified, was distributed or delivered to or received by a user from + # any person, organization or entity other than ICOT, unless it makes or + # grants independently of ICOT any specific warranty to the user in + # writing, such person, organization or entity, will also be exempted + # from and not be held liable to the user for any such damages as noted + # above as far as the program is concerned. + # + # ---------------COPYING.ipadic-----END------------------------------------ +</pre> + +<h3>3. Time Zone Database</h3> +<p>ICU uses the public domain data and code derived from <a href="http://www.iana.org/time-zones"> +Time Zone Database</a> for its time zone support. The ownership of the TZ database is explained +in <a href="http://tools.ietf.org/html/rfc6557">BCP 175: Procedure for Maintaining the Time Zone +Database</a> section 7.<p> + +<pre> +7. Database Ownership + + The TZ database itself is not an IETF Contribution or an IETF + document. Rather it is a pre-existing and regularly updated work + that is in the public domain, and is intended to remain in the public + domain. Therefore, BCPs 78 [RFC5378] and 79 [RFC3979] do not apply + to the TZ Database or contributions that individuals make to it. + Should any claims be made and substantiated against the TZ Database, + the organization that is providing the IANA Considerations defined in + this RFC, under the memorandum of understanding with the IETF, + currently ICANN, may act in accordance with all competent court + orders. No ownership claims will be made by ICANN or the IETF Trust + on the database or the code. Any person making a contribution to the + database or code waives all rights to future claims in that + contribution or in the TZ Database. + +</pre> + + +</body> +</html> \ No newline at end of file diff --git a/vendor/github.com/lupine/icu/README.md b/vendor/github.com/lupine/icu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3008cea1152709cb9bdc5ce219dd5aba6ab6b38d --- /dev/null +++ b/vendor/github.com/lupine/icu/README.md @@ -0,0 +1,144 @@ +About +========== + +Cgo binding for icu4c C library detection and conversion functions. Guaranteed compatibility with version 50.1. + +Installation +========== + +Installation consists of several simple steps. They may be a bit different on your target system (e.g. require more permissions) so adapt them to the parameters of your system. + +### Install build-essential + +Make sure you have **build-essential** installed. Otherwise icu would fail on the configuration stage. + +Installation example using apt-get (Ubuntu): + +``` +sudo apt-get install build-essential +``` + +### Install pkg-config + +Make sure you have **pkg-config** installed. + +Installation example using apt-get (Ubuntu): + +``` +sudo apt-get install pkg-config +``` + +### Get icu4c C library code + +Download and unarchive original icu4c archive from [icu download section](http://site.icu-project.org/download). + +Example (for version 50.1): + +``` +wget http://download.icu-project.org/files/icu4c/50.1/icu4c-50_1-src.tgz +tar -zxvf icu4c-50_1-src.tgz +mv -i ./icu ~/where-you-store-libs +``` + +NOTE: If this link is not working or there are some problems with downloading, there is a stable version 50.1 snapshot saved in [Github Downloads](https://github.com/downloads/goodsign/icu/icu4c-50_1-src.tgz). + +### Build and install icu4c C library + +From the directory, where you unarchived icu4c, run: + +``` +cd source +./configure +make +sudo make install +sudo ldconfig +``` + +### Install Go wrapper + +``` +go get github.com/goodsign/icu +go test github.com/goodsign/icu (must PASS) +``` + +Installation notes +========== + +* Make sure that you have your local library paths set correctly and that installation was successful. Otherwise, **go build** or **go test** may fail. + +* icu4c is installed in your local library directory (e.g. **/usr/local/lib**) and puts its libraries there. This path should be registered in your system (using ldconfig or exporting LD_LIBRARY_PATH, etc.) or the linker would fail. + +* icu4c installs its header files to local include folders (e.g. **/usr/local/include/unicode**) so there is no need to have additional .h files with this package, but the system must be properly set up to detect .h files in those directories. + +Usage +========== + +Note: check icu documentation for returned encoding identifiers. + +Detector +---------- + +```go +// Create detector +detector, err := NewCharsetDetector() + +if err != nil { + //... Handle error ... +} +defer detector.Close() + +// Guess encoding +encMatches, err := detector.GuessCharset(encodedText) + +if err != nil { + //... Handle error ... +} + +// Get charset with max confidence (goes first) +maxenc := encMatches[0].Charset + +// Use maxenc. +// ... +``` + +Converter +---------- + +```go +... + +// Create converter +converter := NewCharsetConverter(DefaultMaxTextSize) + +// Convert to utf-8 +converted, err := converter.ConvertToUtf8(encodedText, maxenc) + +if nil != err { + //... Handle error ... +} +``` + +Usage notes +========== + +* Check **NewCharsetConverter** func comments for details on max text size parameter. +* Often you would use detector and converter in pair. So, the 'converter' usage example actually continues the 'detector' example and uses the 'maxenc' result from it. + +More info +---------- + +For more information on icu refer to the original [website](http://site.icu-project.org/), which contains links on theory and other details. + +icu4c Licence +========== + +ICU is released under a nonrestrictive open source license that is suitable for use with both commercial software and with other open source or free software. + +[LICENCE file](https://github.com/goodsign/icu/blob/master/LICENCE_icu) + +Licence +========== + +The goodsign/icu binding is released under the [BSD Licence](http://opensource.org/licenses/bsd-license.php) + +[LICENCE file](https://github.com/goodsign/icu/blob/master/LICENCE) \ No newline at end of file diff --git a/vendor/github.com/lupine/icu/c_bridge.c b/vendor/github.com/lupine/icu/c_bridge.c new file mode 100644 index 0000000000000000000000000000000000000000..2a574d3dc1294498af1a3cf54dd8929143d5a64d --- /dev/null +++ b/vendor/github.com/lupine/icu/c_bridge.c @@ -0,0 +1,118 @@ +#include "c_bridge.h" +#include <string.h> +#include <unicode/utypes.h> +#include <unicode/ucsdet.h> +#include <stdlib.h> +#include <unicode/ucnv.h> + +// See description in c_bridge.h +const int detectCharset(void *detector, + void *input, + int input_len, + int *status, + MatchData *matchBuffer, + int matchBufferSize) { + + // Put input bytes in the detector. + ucsdet_setText((UCharsetDetector*)detector, (char*)input, input_len, status); + if U_FAILURE(*status) { + return 0; + } + + // Prepare vars for returned count and guesses. + int matchCount; + const UCharsetMatch **bestGuesses; + + // Perform analysis and return all guesses and their count. + bestGuesses = ucsdet_detectAll((UCharsetDetector*)detector, &matchCount, status); + if U_FAILURE(*status) { + return 0; + } + + // Fill the matchBuffer. Its size is matchBufferSize, so it is filled with + // less or equal to matchBufferSize number of entries. + int i; + int retCount = matchCount > matchBufferSize ? matchBufferSize : matchCount; + + for (i = 0; i < retCount; i++) { + + const UCharsetMatch* bestGuess = bestGuesses[i]; + const char *bestGuessedCharset = NULL; + const char *bestGuessedLanguage = NULL; + + // Fill guessed encoding + bestGuessedCharset = ucsdet_getName(bestGuess, status); + if U_FAILURE(*status) { + return 0; + } + + // Fill guessed language + bestGuessedLanguage = ucsdet_getLanguage(bestGuess, status); + if U_FAILURE(*status) { + return 0; + } + + // Fill its confidence rating + int32_t conf = ucsdet_getConfidence(bestGuess, status); + if U_FAILURE(*status) { + return 0; + } + + matchBuffer[i].confidence = conf; + matchBuffer[i].charset = bestGuessedCharset; + matchBuffer[i].language = bestGuessedLanguage; + } + + // Return the number of guesses put into matchBuffer. + return retCount; +} + +// See description in c_bridge.h +int convertToUtf16(const char *srcEncoding, + UChar *dest, + int32_t destCapacity, + const char *src, + int32_t srcLength, + int *status){ + UConverter *conv; + + conv = ucnv_open(srcEncoding, status); + if U_FAILURE(*status) { + return 0; + } + + /* Convert from original encoding to UTF-16 */ + int len = ucnv_toUChars(conv, dest, destCapacity, src, srcLength, status); + if U_FAILURE(*status) { + return 0; + } + + ucnv_close(conv); + + return len; +} + +// See description in c_bridge.h +int convertFromUtf16(const char *destEncoding, + char *dest, + int32_t destCapacity, + const UChar *src, + int32_t srcLength, + int *status){ + UConverter *conv; + + conv = ucnv_open(destEncoding, status); + if U_FAILURE(*status) { + return 0; + } + + /* Convert from UTF-16 to destination encoding */ + int len = ucnv_fromUChars(conv, dest, destCapacity, src, srcLength, status); + if U_FAILURE(*status) { + return 0; + } + + ucnv_close(conv); + + return len; +} diff --git a/vendor/github.com/lupine/icu/c_bridge.h b/vendor/github.com/lupine/icu/c_bridge.h new file mode 100644 index 0000000000000000000000000000000000000000..f00b0087eca7f91c2e27f29b800af26869ec391d --- /dev/null +++ b/vendor/github.com/lupine/icu/c_bridge.h @@ -0,0 +1,61 @@ +#ifndef __C_BRIDGE_H__ +#define __C_BRIDGE_H__ + +// C_BRIDGE is a bridge between go and native pure c functions used to +// operate with ICU library code. + +#include <unicode/utypes.h> +#include <unicode/ucsdet.h> + +// MatchData contains information about one 'guess' of the +// encoding detector. It contains the guessed charset (ICU string identifiers, +// see ICU documentation for them) and a confidence coefficient, which is a +// number between 0 and 100 (100 is the best). +typedef struct MatchData { + const char* charset; + const char* language; + short int confidence; +} MatchData; + +// detectCharset performs the detection (guessing) operation using a given detector (ICU internals), +// input data (bytes), input length and error status pointer (Read ICU docs abour error codes). +// +// After the detection is performed, all possible matches are put into the matchBuffer. If there are +// more results than matchBufferSize, then only matchBufferSize entries are put (So no overflow can +// ever happen). +// +// The results of this function are put into the matchBuffer, so it MUST NOT be called asynchronously. +// Caller should guarantee thread safety and perform locks while working with it. +const int detectCharset(void *detector, + void *input, + int input_len, + int *status, + MatchData *matchBuffer, + int matchBufferSize); + +// convertToUtf16 performs conversion from any encoding to utf16. Utf16 is the ICU standard so +// it is easier to convert to/from it. +// +// The results of this function are put into the dest buffer, so it MUST NOT be called asynchronously. +// Caller should guarantee thread safety and perform locks while working with it. +int convertToUtf16(const char *srcEncoding, + UChar *dest, + int32_t destCapacity, + const char *src, + int32_t srcLength, + int *status); + +// convertFromUtf16 performs conversion from utf16 to any other encoding. Utf16 is the ICU standard so +// it is easier to convert to/from it. +// +// The results of this function are put into the dest buffer, so it MUST NOT be called asynchronously. +// Caller should guarantee thread safety and perform locks while working with it. +int convertFromUtf16(const char *destEncoding, + char *dest, + int32_t destCapacity, + const UChar *src, + int32_t srcLength, + int *status); + + +#endif //__C_BRIDGE_H__ \ No newline at end of file diff --git a/vendor/github.com/lupine/icu/convert.go b/vendor/github.com/lupine/icu/convert.go new file mode 100644 index 0000000000000000000000000000000000000000..4d5e3abcbceb268cc2454e5c8a075466aabc8cec --- /dev/null +++ b/vendor/github.com/lupine/icu/convert.go @@ -0,0 +1,98 @@ +package icu + +// #cgo pkg-config: icu-i18n +// #include "c_bridge.h" +// #include "stdlib.h" +import "C" +import ( + "fmt" + "sync" + "unsafe" +) + +const ( + DefaultMaxTextSize = 1024 * 1024 // Default value for the max text length in conversion operations + utf8MaxCharSize = 4 + utf16MaxCharSize = 4 +) + +var ( + Utf8CString = C.CString("UTF-8") +) + +// CharsetConverter provides ICU charset conversion functionality. +type CharsetConverter struct { + utf16Buffer []byte + utf8Buffer []byte + maxTextSize int + cMutex sync.Mutex // Mutex used to guarantee thread safety for ICU calls +} + +// NewCharsetConverter creates a new charset converter. It doesn't need to be closed as +// it doesn't allocate any resources. +// +// For better performance, conversion buffers are not allocated on each operation. Instead they +// are created in memory once and then used. 'maxTextSize' sets the size of these buffers. +// ICU library would return error if any processed text is longer than this parameter. +// +// NOTE: +// +// UTF8 uses 1 to 4 bytes for each symbol. +// UTF16 uses 2 bytes to 4 bytes for each symbol. +// +// So, to guarantee successful conversion of text with size = 'maxTextSize' we need: +// maxTextSize * 8 bytes (utf8 buffer + utf16 buffer). +func NewCharsetConverter(maxTextSize int) (*CharsetConverter) { + conv := new(CharsetConverter) + + conv.utf16Buffer = make([]byte, utf16MaxCharSize * maxTextSize) + conv.utf8Buffer = make([]byte, utf8MaxCharSize * maxTextSize) + + return conv +} + +// ConvertToUtf8 converts input bytes encoded with srcEncoding to UTF-8. +func (conv *CharsetConverter) ConvertToUtf8(input []byte, srcEncoding string) ([]byte, error) { + // As described in c_bridge.h, conversion operations are not thread safe and + // should be called consequently. So a mutex is used here. + conv.cMutex.Lock() + defer conv.cMutex.Unlock() + + inputLen := len(input) + if inputLen == 0 { + return nil, fmt.Errorf("Nil length of input") + } + + var status int + + encCString := C.CString(srcEncoding) + inputCString := C.CString(string(input)) + + defer C.free(unsafe.Pointer(encCString)) + defer C.free(unsafe.Pointer(inputCString)) + + convLen := C.convertToUtf16( + encCString, + (*C.UChar)(unsafe.Pointer(&conv.utf16Buffer[0])), + C.int32_t(len(conv.utf16Buffer)), + inputCString, + C.int32_t(len(input)), + (*C.int)(unsafe.Pointer(&status))) + + if isSuccess(status) { + nConvLen := C.convertFromUtf16( + Utf8CString, + (*C.char)(unsafe.Pointer(&conv.utf8Buffer[0])), + C.int32_t(len(conv.utf8Buffer)), + (*C.UChar)(unsafe.Pointer(&conv.utf16Buffer[0])), + C.int32_t(convLen), + (*C.int)(unsafe.Pointer(&status))) + + if isSuccess(status) { + resStr := conv.utf8Buffer[:nConvLen] + return ([]byte)(resStr), nil + } + } + + return nil, fmt.Errorf("ICU Error code returned: %d", status) +} diff --git a/vendor/github.com/lupine/icu/detect.go b/vendor/github.com/lupine/icu/detect.go new file mode 100644 index 0000000000000000000000000000000000000000..649424b0ee3e8193cb4e3a8f1504be301b58c0e8 --- /dev/null +++ b/vendor/github.com/lupine/icu/detect.go @@ -0,0 +1,112 @@ +package icu + +// #cgo pkg-config: icu-i18n +// #include "c_bridge.h" +// #include "stdlib.h" +import "C" +import ( + "fmt" + "sync" + "unsafe" +) + +const ( + U_ZERO_ERROR = 0 // ICU common constant error code which means that no error occured + U_ERROR_LIMIT = 0x7FFFFFFF // Dirty hack, negative error codes are are being turned into large positive ints + MatchDataBufferSize = 25 // Size of the buffer for detection results (Max count of returned guesses per detect call) +) + +// Go implementation of the icu U_SUCCESS macro. Negative status codes are +// warnings, 0 is a success without warnings, > 0 is an error +func isSuccess(status int) bool { + return status <= U_ZERO_ERROR || status >= U_ERROR_LIMIT +} + +// Go implementation of the icu U_FAILURE macro. +func isFailure(status int) bool { + return status > U_ZERO_ERROR && status < U_ERROR_LIMIT +} + +// CharsetDetector provides ICU charset detection functionality. +type CharsetDetector struct { + ptr *C.UCharsetDetector // ICU struct needed for detection + resBuffer [MatchDataBufferSize]C.MatchData + gMutex sync.Mutex // Mutex used to guarantee thread safety for ICU calls +} + +// An equivalent of MatchData C structure (see c_bridge.h) +type Match struct { + Charset string + Language string + Confidence int +} + +// Creates new charset detector. If it is successfully created, it +// must be closed as it needs to free native ICU resources. +func NewCharsetDetector() (*CharsetDetector, error) { + det := new(CharsetDetector) + + var status int + statusPtr := unsafe.Pointer(&status) + + det.ptr = C.ucsdet_open((*C.UErrorCode)(statusPtr)) + + if isFailure(status) { + return nil, fmt.Errorf("ICU Error code returned: %d", status) + } + + return det, nil +} + +func (det *CharsetDetector) GuessCharset(input []byte) (matches []Match, err error) { + + // As described in c_bridge.h, detection operations are not thread safe and + // should be called consequently. So a mutex is used here. + det.gMutex.Lock() + defer det.gMutex.Unlock() + + inputLen := len(input) + if inputLen == 0 { + return nil, fmt.Errorf("Input data len is 0") + } + + var status int + + // Perform detection. Guess count is the number of matches returned. + // The matches themself are put in the result buffer + guessCount := C.detectCharset( + unsafe.Pointer(det.ptr), + unsafe.Pointer(&input[0]), + C.int(inputLen), + (*C.int)(unsafe.Pointer(&status)), + (*C.MatchData)(unsafe.Pointer(&det.resBuffer[0])), + C.int(MatchDataBufferSize)) + + if isSuccess(status) { + // Convert the returned number of entries from result buffer to a slice + // that will be returned + count := int(guessCount) + mt := make([]Match, count, count) + + for i := 0; i < count; i++ { + mData := det.resBuffer[i] + charset := C.GoString(mData.charset) + language := C.GoString(mData.language) + mt[i] = Match{charset, language, int(mData.confidence)} + } + + return mt, nil + } + + return nil, fmt.Errorf("ICU Error code returned: %d", status) +} + +// Close frees native C resources +func (det *CharsetDetector) Close() { + det.gMutex.Lock() + defer det.gMutex.Unlock() + + if det.ptr != nil { + C.ucsdet_close(det.ptr) + } +} diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..937942c2b2c4cf2c0b3043210ac71e594ee4f5df --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 0000000000000000000000000000000000000000..59b8851683f75416876e4ec36e1765d534f94bba --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1339 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: <del>abcxxx</del><ins>xxxdef</ins> + // -> <del>abc</del>xxx<ins>def</ins> + // e.g: <del>xxxabc</del><ins>defxxx</ins> + // -> <ins>def</ins>xxx<del>abc</del> + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> + // <ins>A</ins>X<ins>C</ins><del>D</del> + // <ins>A</ins><del>B</del>X<ins>C</ins> + // <ins>A</del>X<ins>C</ins><del>D</del> + // <ins>A</ins><del>B</del>X<del>C</del> + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: A<ins>BA</ins>C -> <ins>AB</ins>AC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶<br>", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("<ins style=\"background:#e6ffe6;\">") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</ins>") + case DiffDelete: + _, _ = buff.WriteString("<del style=\"background:#ffe6e6;\">") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</del>") + case DiffEqual: + _, _ = buff.WriteString("<span>") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</span>") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + // Remember that string slicing is by byte - we want by rune here. + text := string([]rune(text1)[i : i+int(n)]) + i += int(n) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len([]rune(text1)) { + return nil, fmt.Errorf("Delta length (%v) smaller than source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 0000000000000000000000000000000000000000..d3acc32ce13a0439319b69db12c6f74d9854e4f1 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 0000000000000000000000000000000000000000..17374e109fef2d55431fe4a5e08a8b5259d203a7 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)<<uint((len(pattern)-i-1))) + s[c] = value + i++ + } + return s +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go new file mode 100644 index 0000000000000000000000000000000000000000..aed242bc6e763e7bcea7bd139700e82753fc719d --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go @@ -0,0 +1,23 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 0000000000000000000000000000000000000000..116c0434811208176b7af7658beef5b3cc06957f --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + start1 int + start2 int + length1 int + length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indicies are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.length1 == 0 { + coords1 = strconv.Itoa(p.start1) + ",0" + } else if p.length1 == 1 { + coords1 = strconv.Itoa(p.start1 + 1) + } else { + coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) + } + + if p.length2 == 0 { + coords2 = strconv.Itoa(p.start2) + ",0" + } else if p.length2 == 1 { + coords2 = strconv.Itoa(p.start2 + 1) + } else { + coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.start2 : patch.start2+patch.length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.start2-padding) + minEnd := min(len(text), patch.start2+patch.length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.start2-padding):patch.start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.start1 -= len(prefix) + patch.start2 -= len(prefix) + // Extend the lengths. + patch.length1 += len(prefix) + len(suffix) + patch.length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.start1 = charCount1 + patch.start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.length1 += len(aDiff.Text) + patch.length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.start1 = aPatch.start1 + patchCopy.start2 = aPatch.start2 + patchCopy.length1 = aPatch.length1 + patchCopy.length2 = aPatch.length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.length2 - aPatch.length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].start1 += paddingLength + patches[i].start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].start1 -= paddingLength // Should be 0. + patches[0].start2 -= paddingLength // Should be 0. + patches[0].length1 += paddingLength + patches[0].length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].start1 -= extraLength + patches[0].start2 -= extraLength + patches[0].length1 += extraLength + patches[0].length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].length1 += paddingLength + patches[last].length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].length1 += extraLength + patches[last].length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + start1 := bigpatch.start1 + start2 := bigpatch.start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.start1 = start1 - len(precontext) + patch.start2 = start2 - len(precontext) + if len(precontext) != 0 { + patch.length1 = len(precontext) + patch.length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.length2 += len(diffText) + start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += len(diffText) + start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] + + patch.length1 += len(diffText) + start1 += len(diffText) + if diffType == DiffEqual { + patch.length2 += len(diffText) + start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.length1 += len(postcontext) + patch.length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.start1-- + patch.length1 = 1 + } else if m[2] == "0" { + patch.length1 = 0 + } else { + patch.start1-- + patch.length1, _ = strconv.Atoi(m[2]) + } + + patch.start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.start2-- + patch.length2 = 1 + } else if m[4] == "0" { + patch.length2 = 0 + } else { + patch.start2-- + patch.length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 0000000000000000000000000000000000000000..265f29cc7e595ef097d6733587cc49636f637ba2 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/github.com/src-d/gcfg/LICENSE b/vendor/github.com/src-d/gcfg/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..87a5cede33929e3d2aedebbe7a2497b16c14f23b --- /dev/null +++ b/vendor/github.com/src-d/gcfg/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/src-d/gcfg/README b/vendor/github.com/src-d/gcfg/README new file mode 100644 index 0000000000000000000000000000000000000000..1ff233a529dd25eb6c6870b4287f05d477cd683b --- /dev/null +++ b/vendor/github.com/src-d/gcfg/README @@ -0,0 +1,4 @@ +Gcfg reads INI-style configuration files into Go structs; +supports user-defined types and subsections. + +Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/vendor/github.com/src-d/gcfg/doc.go b/vendor/github.com/src-d/gcfg/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..2edcb41a0818504624f1f7cf4d16fc40e96cc121 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/doc.go @@ -0,0 +1,145 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// It is possible to provide default values for subsections in the section +// "default-<sectionname>" (or by setting values in the corresponding struct +// field "Default_<sectionname>"). +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "github.com/src-d/gcfg" diff --git a/vendor/github.com/src-d/gcfg/errors.go b/vendor/github.com/src-d/gcfg/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..853c76021de43507c71475802de5836533773bde --- /dev/null +++ b/vendor/github.com/src-d/gcfg/errors.go @@ -0,0 +1,41 @@ +package gcfg + +import ( + "gopkg.in/warnings.v0" +) + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type extraData struct { + section string + subsection *string + variable *string +} + +func (e extraData) Error() string { + s := "can't store data at section \"" + e.section + "\"" + if e.subsection != nil { + s += ", subsection \"" + *e.subsection + "\"" + } + if e.variable != nil { + s += ", variable \"" + *e.variable + "\"" + } + return s +} + +var _ error = extraData{} diff --git a/vendor/github.com/src-d/gcfg/go1_0.go b/vendor/github.com/src-d/gcfg/go1_0.go new file mode 100644 index 0000000000000000000000000000000000000000..6670210791dd3bcdcc07aea49f5c2b6f734c4e1f --- /dev/null +++ b/vendor/github.com/src-d/gcfg/go1_0.go @@ -0,0 +1,7 @@ +// +build !go1.2 + +package gcfg + +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/src-d/gcfg/go1_2.go b/vendor/github.com/src-d/gcfg/go1_2.go new file mode 100644 index 0000000000000000000000000000000000000000..6f5843bc7cd963d17786081a91ff2f23957135aa --- /dev/null +++ b/vendor/github.com/src-d/gcfg/go1_2.go @@ -0,0 +1,9 @@ +// +build go1.2 + +package gcfg + +import ( + "encoding" +) + +type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/src-d/gcfg/read.go b/vendor/github.com/src-d/gcfg/read.go new file mode 100644 index 0000000000000000000000000000000000000000..ea014f1b3392223761d9b303788c396845f63e7e --- /dev/null +++ b/vendor/github.com/src-d/gcfg/read.go @@ -0,0 +1,273 @@ +package gcfg + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/src-d/gcfg/scanner" + "github.com/src-d/gcfg/token" + "gopkg.in/warnings.v0" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'} + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func read(c *warnings.Collector, callback func(string,string,string,string,bool)error, + fset *token.FileSet, file *token.File, src []byte) error { + // + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.IDENT { + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + } + if tok != token.RBRACK { + if sectsub == "" { + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := c.Collect(callback(sect, sectsub, "", "", true)) + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.STRING { + if err := c.Collect(errfn("expected value")); err != nil { + return err + } + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + } + err := c.Collect(callback(sect, sectsub, n, v, blank)) + if err != nil { + return err + } + default: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err + } + } + } + panic("never reached") +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, false) + } + err := read(c, firstPassCallback, fset, file, src) + if err != nil { + return err + } + secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, true) + } + err = read(c, secondPassCallback, fset, file, src) + if err != nil { + return err + } + return c.Done() +} + +// ReadWithCallback reads gcfg formatted data from reader and calls +// callback with each section and option found. +// +// Callback is called with section, subsection, option key, option value +// and blank value flag as arguments. +// +// When a section is found, callback is called with nil subsection, option key +// and option value. +// +// When a subsection is found, callback is called with nil option key and +// option value. +// +// If blank value flag is true, it means that the value was not set for an option +// (as opposed to set to empty string). +// +// If callback returns an error, ReadWithCallback terminates with an error too. +func ReadWithCallback(reader io.Reader, callback func(string,string,string,string,bool)error) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + c := warnings.NewCollector(isFatal) + + return read(c, callback, fset, file, src) +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} diff --git a/vendor/github.com/src-d/gcfg/scanner/errors.go b/vendor/github.com/src-d/gcfg/scanner/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..f3fcecacbb6150353a3c5401e0351dadc188fd00 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "github.com/src-d/gcfg/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "<unknown position>" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/github.com/src-d/gcfg/scanner/scanner.go b/vendor/github.com/src-d/gcfg/scanner/scanner.go new file mode 100644 index 0000000000000000000000000000000000000000..f15867648c59df14c5a467ab17b7248b10a74bfa --- /dev/null +++ b/vendor/github.com/src-d/gcfg/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "github.com/src-d/gcfg/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' { + s.error(offs, "unquoted '\\' must be followed by new line") + break loop + } + s.next() + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/github.com/src-d/gcfg/set.go b/vendor/github.com/src-d/gcfg/set.go new file mode 100644 index 0000000000000000000000000000000000000000..771258f0ef2fed18037aa2978bc4b98de7129cb2 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/set.go @@ -0,0 +1,332 @@ +package gcfg + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/src-d/gcfg/types" + "gopkg.in/warnings.v0" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(textUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + value string, blankValue bool, subsectPass bool) error { + // + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + if !vSect.IsValid() { + err := extraData{section: sect} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil + } + if isSubsect { + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + var err error + if pv, err = newValue(c, sect, vCfg, vType); err != nil { + return err + } + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + err := extraData{section: sect, subsection: &sub} + return c.Collect(err) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + if !vVar.IsValid() { + var err error + if isSubsect { + err = extraData{section: sect, subsection: &sub, variable: &name} + } else { + err = extraData{section: sect, variable: &name} + } + return c.Collect(err) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blankValue { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blankValue, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return err + } + } + if !ok { + // in case all setters returned errUnsupportedType + return err + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/github.com/src-d/gcfg/token/position.go b/vendor/github.com/src-d/gcfg/token/position.go new file mode 100644 index 0000000000000000000000000000000000000000..fc45c1e7693efd56fc38784556f4c7e27d6602a3 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/github.com/src-d/gcfg/token/serialize.go b/vendor/github.com/src-d/gcfg/token/serialize.go new file mode 100644 index 0000000000000000000000000000000000000000..4adc8f9e33422f656039c98a50c65713b3734698 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/github.com/src-d/gcfg/token/token.go b/vendor/github.com/src-d/gcfg/token/token.go new file mode 100644 index 0000000000000000000000000000000000000000..b3c7c83fa9e6c67f59bc4ddfbf66164e9ba3b819 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/github.com/src-d/gcfg/types/bool.go b/vendor/github.com/src-d/gcfg/types/bool.go new file mode 100644 index 0000000000000000000000000000000000000000..8dcae0d8cfd1dfc115e9234d87ea7f7c5246ba71 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/github.com/src-d/gcfg/types/doc.go b/vendor/github.com/src-d/gcfg/types/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..9f9c345f6eab92e782476eef2a0319e7fd85710f --- /dev/null +++ b/vendor/github.com/src-d/gcfg/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/github.com/src-d/gcfg/types/enum.go b/vendor/github.com/src-d/gcfg/types/enum.go new file mode 100644 index 0000000000000000000000000000000000000000..1a0c7ef453dbacf08ddfa05f898c3149734da1ea --- /dev/null +++ b/vendor/github.com/src-d/gcfg/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/github.com/src-d/gcfg/types/int.go b/vendor/github.com/src-d/gcfg/types/int.go new file mode 100644 index 0000000000000000000000000000000000000000..af7e75c1250c5a15c5b7dc0eaa4b330c3d0cb1b7 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/github.com/src-d/gcfg/types/scan.go b/vendor/github.com/src-d/gcfg/types/scan.go new file mode 100644 index 0000000000000000000000000000000000000000..db2f6ed3caf622ce811c5d31b64b73d3bfe2f4c3 --- /dev/null +++ b/vendor/github.com/src-d/gcfg/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6a66aea5eafe0ca6a688840c47219556c552488e --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000000000000000000000000000000000000..733099041f84fa1e58611ab2e11af51c1f26d1d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h new file mode 100644 index 0000000000000000000000000000000000000000..80ad2220fde81cee134b1a018b6ffcc90868d183 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h @@ -0,0 +1,8 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +#define REDMASK51 0x0007FFFFFFFFFFFF diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..0ad539885b7bad7cc5b3c70fdeaf5e344d1e65d8 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// These constants cannot be encoded in non-MOVQ immediates. +// We access them directly from memory instead. + +DATA ·_121666_213(SB)/8, $996687872 +GLOBL ·_121666_213(SB), 8, $8 + +DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA +GLOBL ·_2P0(SB), 8, $8 + +DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE +GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..45484d1b596f07092c1db2f3b33bdb13dd95551d --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s @@ -0,0 +1,88 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +// func cswap(inout *[5]uint64, v uint64) +TEXT ·cswap(SB),7,$0 + MOVQ inout+0(FP),DI + MOVQ v+8(FP),SI + + CMPQ SI,$1 + MOVQ 0(DI),SI + MOVQ 80(DI),DX + MOVQ 8(DI),CX + MOVQ 88(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,0(DI) + MOVQ DX,80(DI) + MOVQ CX,8(DI) + MOVQ R8,88(DI) + MOVQ 16(DI),SI + MOVQ 96(DI),DX + MOVQ 24(DI),CX + MOVQ 104(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,16(DI) + MOVQ DX,96(DI) + MOVQ CX,24(DI) + MOVQ R8,104(DI) + MOVQ 32(DI),SI + MOVQ 112(DI),DX + MOVQ 40(DI),CX + MOVQ 120(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,32(DI) + MOVQ DX,112(DI) + MOVQ CX,40(DI) + MOVQ R8,120(DI) + MOVQ 48(DI),SI + MOVQ 128(DI),DX + MOVQ 56(DI),CX + MOVQ 136(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,48(DI) + MOVQ DX,128(DI) + MOVQ CX,56(DI) + MOVQ R8,136(DI) + MOVQ 64(DI),SI + MOVQ 144(DI),DX + MOVQ 72(DI),CX + MOVQ 152(DI),R8 + MOVQ SI,R9 + CMOVQEQ DX,SI + CMOVQEQ R9,DX + MOVQ CX,R9 + CMOVQEQ R8,CX + CMOVQEQ R9,R8 + MOVQ SI,64(DI) + MOVQ DX,144(DI) + MOVQ CX,72(DI) + MOVQ R8,152(DI) + MOVQ DI,AX + MOVQ SI,DX + RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go new file mode 100644 index 0000000000000000000000000000000000000000..6918c47fc2eceeaf07a08dc0251efcb2e9af5958 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -0,0 +1,841 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// We have a implementation in amd64 assembly so this code is only run on +// non-amd64 platforms. The amd64 assembly does not support gccgo. +// +build !amd64 gccgo appengine + +package curve25519 + +// This code is a port of the public domain, "ref10" implementation of +// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. + +// fieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type fieldElement [10]int32 + +func feZero(fe *fieldElement) { + for i := range fe { + fe[i] = 0 + } +} + +func feOne(fe *fieldElement) { + feZero(fe) + fe[0] = 1 +} + +func feAdd(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] + b[i] + } +} + +func feSub(dst, a, b *fieldElement) { + for i := range dst { + dst[i] = a[i] - b[i] + } +} + +func feCopy(dst, src *fieldElement) { + for i := range dst { + dst[i] = src[i] + } +} + +// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func feCSwap(f, g *fieldElement, b int32) { + var x fieldElement + b = -b + for i := range x { + x[i] = b & (f[i] ^ g[i]) + } + + for i := range f { + f[i] ^= x[i] + } + for i := range g { + g[i] ^= x[i] + } +} + +// load3 reads a 24-bit, little-endian value from in. +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +// load4 reads a 32-bit, little-endian value from in. +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func feFromBytes(dst *fieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := load3(src[29:]) << 2 + + var carry [10]int64 + carry[9] = (h9 + 1<<24) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + 1<<24) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + 1<<24) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + 1<<24) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + 1<<24) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + 1<<25) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + 1<<25) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + 1<<25) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + 1<<25) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + 1<<25) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + dst[0] = int32(h0) + dst[1] = int32(h1) + dst[2] = int32(h2) + dst[3] = int32(h3) + dst[4] = int32(h4) + dst[5] = int32(h5) + dst[6] = int32(h6) + dst[7] = int32(h7) + dst[8] = int32(h8) + dst[9] = int32(h9) +} + +// feToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0<y<1. +// +// Write r=h-pq. +// Have 0<=r<=p-1=2^255-20. +// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1. +// +// Write x=r+19(2^-255)r+y. +// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q. +// +// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1)) +// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q. +func feToBytes(s *[32]byte, h *fieldElement) { + var carry [10]int32 + + q := (19*h[9] + (1 << 24)) >> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +// feMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs can squeeze carries into int32. +func feMul(h, f, g *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + g0 := g[0] + g1 := g[1] + g2 := g[2] + g3 := g[3] + g4 := g[4] + g5 := g[5] + g6 := g[6] + g7 := g[7] + g8 := g[8] + g9 := g[9] + g1_19 := 19 * g1 // 1.4*2^29 + g2_19 := 19 * g2 // 1.4*2^30; still ok + g3_19 := 19 * g3 + g4_19 := 19 * g4 + g5_19 := 19 * g5 + g6_19 := 19 * g6 + g7_19 := 19 * g7 + g8_19 := 19 * g8 + g9_19 := 19 * g9 + f1_2 := 2 * f1 + f3_2 := 2 * f3 + f5_2 := 2 * f5 + f7_2 := 2 * f7 + f9_2 := 2 * f9 + f0g0 := int64(f0) * int64(g0) + f0g1 := int64(f0) * int64(g1) + f0g2 := int64(f0) * int64(g2) + f0g3 := int64(f0) * int64(g3) + f0g4 := int64(f0) * int64(g4) + f0g5 := int64(f0) * int64(g5) + f0g6 := int64(f0) * int64(g6) + f0g7 := int64(f0) * int64(g7) + f0g8 := int64(f0) * int64(g8) + f0g9 := int64(f0) * int64(g9) + f1g0 := int64(f1) * int64(g0) + f1g1_2 := int64(f1_2) * int64(g1) + f1g2 := int64(f1) * int64(g2) + f1g3_2 := int64(f1_2) * int64(g3) + f1g4 := int64(f1) * int64(g4) + f1g5_2 := int64(f1_2) * int64(g5) + f1g6 := int64(f1) * int64(g6) + f1g7_2 := int64(f1_2) * int64(g7) + f1g8 := int64(f1) * int64(g8) + f1g9_38 := int64(f1_2) * int64(g9_19) + f2g0 := int64(f2) * int64(g0) + f2g1 := int64(f2) * int64(g1) + f2g2 := int64(f2) * int64(g2) + f2g3 := int64(f2) * int64(g3) + f2g4 := int64(f2) * int64(g4) + f2g5 := int64(f2) * int64(g5) + f2g6 := int64(f2) * int64(g6) + f2g7 := int64(f2) * int64(g7) + f2g8_19 := int64(f2) * int64(g8_19) + f2g9_19 := int64(f2) * int64(g9_19) + f3g0 := int64(f3) * int64(g0) + f3g1_2 := int64(f3_2) * int64(g1) + f3g2 := int64(f3) * int64(g2) + f3g3_2 := int64(f3_2) * int64(g3) + f3g4 := int64(f3) * int64(g4) + f3g5_2 := int64(f3_2) * int64(g5) + f3g6 := int64(f3) * int64(g6) + f3g7_38 := int64(f3_2) * int64(g7_19) + f3g8_19 := int64(f3) * int64(g8_19) + f3g9_38 := int64(f3_2) * int64(g9_19) + f4g0 := int64(f4) * int64(g0) + f4g1 := int64(f4) * int64(g1) + f4g2 := int64(f4) * int64(g2) + f4g3 := int64(f4) * int64(g3) + f4g4 := int64(f4) * int64(g4) + f4g5 := int64(f4) * int64(g5) + f4g6_19 := int64(f4) * int64(g6_19) + f4g7_19 := int64(f4) * int64(g7_19) + f4g8_19 := int64(f4) * int64(g8_19) + f4g9_19 := int64(f4) * int64(g9_19) + f5g0 := int64(f5) * int64(g0) + f5g1_2 := int64(f5_2) * int64(g1) + f5g2 := int64(f5) * int64(g2) + f5g3_2 := int64(f5_2) * int64(g3) + f5g4 := int64(f5) * int64(g4) + f5g5_38 := int64(f5_2) * int64(g5_19) + f5g6_19 := int64(f5) * int64(g6_19) + f5g7_38 := int64(f5_2) * int64(g7_19) + f5g8_19 := int64(f5) * int64(g8_19) + f5g9_38 := int64(f5_2) * int64(g9_19) + f6g0 := int64(f6) * int64(g0) + f6g1 := int64(f6) * int64(g1) + f6g2 := int64(f6) * int64(g2) + f6g3 := int64(f6) * int64(g3) + f6g4_19 := int64(f6) * int64(g4_19) + f6g5_19 := int64(f6) * int64(g5_19) + f6g6_19 := int64(f6) * int64(g6_19) + f6g7_19 := int64(f6) * int64(g7_19) + f6g8_19 := int64(f6) * int64(g8_19) + f6g9_19 := int64(f6) * int64(g9_19) + f7g0 := int64(f7) * int64(g0) + f7g1_2 := int64(f7_2) * int64(g1) + f7g2 := int64(f7) * int64(g2) + f7g3_38 := int64(f7_2) * int64(g3_19) + f7g4_19 := int64(f7) * int64(g4_19) + f7g5_38 := int64(f7_2) * int64(g5_19) + f7g6_19 := int64(f7) * int64(g6_19) + f7g7_38 := int64(f7_2) * int64(g7_19) + f7g8_19 := int64(f7) * int64(g8_19) + f7g9_38 := int64(f7_2) * int64(g9_19) + f8g0 := int64(f8) * int64(g0) + f8g1 := int64(f8) * int64(g1) + f8g2_19 := int64(f8) * int64(g2_19) + f8g3_19 := int64(f8) * int64(g3_19) + f8g4_19 := int64(f8) * int64(g4_19) + f8g5_19 := int64(f8) * int64(g5_19) + f8g6_19 := int64(f8) * int64(g6_19) + f8g7_19 := int64(f8) * int64(g7_19) + f8g8_19 := int64(f8) * int64(g8_19) + f8g9_19 := int64(f8) * int64(g9_19) + f9g0 := int64(f9) * int64(g0) + f9g1_38 := int64(f9_2) * int64(g1_19) + f9g2_19 := int64(f9) * int64(g2_19) + f9g3_38 := int64(f9_2) * int64(g3_19) + f9g4_19 := int64(f9) * int64(g4_19) + f9g5_38 := int64(f9_2) * int64(g5_19) + f9g6_19 := int64(f9) * int64(g6_19) + f9g7_38 := int64(f9_2) * int64(g7_19) + f9g8_19 := int64(f9) * int64(g8_19) + f9g9_38 := int64(f9_2) * int64(g9_19) + h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 + h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 + h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 + h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 + h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 + h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 + h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 + h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 + h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 + h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 + var carry [10]int64 + + // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + // |h0| <= 2^25 + // |h4| <= 2^25 + // |h1| <= 1.51*2^58 + // |h5| <= 1.51*2^58 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + // |h1| <= 2^24; from now on fits into int32 + // |h5| <= 2^24; from now on fits into int32 + // |h2| <= 1.21*2^59 + // |h6| <= 1.21*2^59 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + // |h2| <= 2^25; from now on fits into int32 unchanged + // |h6| <= 2^25; from now on fits into int32 unchanged + // |h3| <= 1.51*2^58 + // |h7| <= 1.51*2^58 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + // |h3| <= 2^24; from now on fits into int32 unchanged + // |h7| <= 2^24; from now on fits into int32 unchanged + // |h4| <= 1.52*2^33 + // |h8| <= 1.52*2^33 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + // |h4| <= 2^25; from now on fits into int32 unchanged + // |h8| <= 2^25; from now on fits into int32 unchanged + // |h5| <= 1.01*2^24 + // |h9| <= 1.51*2^58 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + // |h9| <= 2^24; from now on fits into int32 unchanged + // |h0| <= 1.8*2^37 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + // |h0| <= 2^25; from now on fits into int32 unchanged + // |h1| <= 1.01*2^24 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feSquare(h, f *fieldElement) { + f0 := f[0] + f1 := f[1] + f2 := f[2] + f3 := f[3] + f4 := f[4] + f5 := f[5] + f6 := f[6] + f7 := f[7] + f8 := f[8] + f9 := f[9] + f0_2 := 2 * f0 + f1_2 := 2 * f1 + f2_2 := 2 * f2 + f3_2 := 2 * f3 + f4_2 := 2 * f4 + f5_2 := 2 * f5 + f6_2 := 2 * f6 + f7_2 := 2 * f7 + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + f0f0 := int64(f0) * int64(f0) + f0f1_2 := int64(f0_2) * int64(f1) + f0f2_2 := int64(f0_2) * int64(f2) + f0f3_2 := int64(f0_2) * int64(f3) + f0f4_2 := int64(f0_2) * int64(f4) + f0f5_2 := int64(f0_2) * int64(f5) + f0f6_2 := int64(f0_2) * int64(f6) + f0f7_2 := int64(f0_2) * int64(f7) + f0f8_2 := int64(f0_2) * int64(f8) + f0f9_2 := int64(f0_2) * int64(f9) + f1f1_2 := int64(f1_2) * int64(f1) + f1f2_2 := int64(f1_2) * int64(f2) + f1f3_4 := int64(f1_2) * int64(f3_2) + f1f4_2 := int64(f1_2) * int64(f4) + f1f5_4 := int64(f1_2) * int64(f5_2) + f1f6_2 := int64(f1_2) * int64(f6) + f1f7_4 := int64(f1_2) * int64(f7_2) + f1f8_2 := int64(f1_2) * int64(f8) + f1f9_76 := int64(f1_2) * int64(f9_38) + f2f2 := int64(f2) * int64(f2) + f2f3_2 := int64(f2_2) * int64(f3) + f2f4_2 := int64(f2_2) * int64(f4) + f2f5_2 := int64(f2_2) * int64(f5) + f2f6_2 := int64(f2_2) * int64(f6) + f2f7_2 := int64(f2_2) * int64(f7) + f2f8_38 := int64(f2_2) * int64(f8_19) + f2f9_38 := int64(f2) * int64(f9_38) + f3f3_2 := int64(f3_2) * int64(f3) + f3f4_2 := int64(f3_2) * int64(f4) + f3f5_4 := int64(f3_2) * int64(f5_2) + f3f6_2 := int64(f3_2) * int64(f6) + f3f7_76 := int64(f3_2) * int64(f7_38) + f3f8_38 := int64(f3_2) * int64(f8_19) + f3f9_76 := int64(f3_2) * int64(f9_38) + f4f4 := int64(f4) * int64(f4) + f4f5_2 := int64(f4_2) * int64(f5) + f4f6_38 := int64(f4_2) * int64(f6_19) + f4f7_38 := int64(f4) * int64(f7_38) + f4f8_38 := int64(f4_2) * int64(f8_19) + f4f9_38 := int64(f4) * int64(f9_38) + f5f5_38 := int64(f5) * int64(f5_38) + f5f6_38 := int64(f5_2) * int64(f6_19) + f5f7_76 := int64(f5_2) * int64(f7_38) + f5f8_38 := int64(f5_2) * int64(f8_19) + f5f9_76 := int64(f5_2) * int64(f9_38) + f6f6_19 := int64(f6) * int64(f6_19) + f6f7_38 := int64(f6) * int64(f7_38) + f6f8_38 := int64(f6_2) * int64(f8_19) + f6f9_38 := int64(f6) * int64(f9_38) + f7f7_38 := int64(f7) * int64(f7_38) + f7f8_38 := int64(f7_2) * int64(f8_19) + f7f9_76 := int64(f7_2) * int64(f9_38) + f8f8_19 := int64(f8) * int64(f8_19) + f8f9_38 := int64(f8) * int64(f9_38) + f9f9_38 := int64(f9) * int64(f9_38) + h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 + h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 + h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 + h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 + h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 + h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 + h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 + h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 + h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 + h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 + var carry [10]int64 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feMul121666 calculates h = f * 121666. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func feMul121666(h, f *fieldElement) { + h0 := int64(f[0]) * 121666 + h1 := int64(f[1]) * 121666 + h2 := int64(f[2]) * 121666 + h3 := int64(f[3]) * 121666 + h4 := int64(f[4]) * 121666 + h5 := int64(f[5]) * 121666 + h6 := int64(f[6]) * 121666 + h7 := int64(f[7]) * 121666 + h8 := int64(f[8]) * 121666 + h9 := int64(f[9]) * 121666 + var carry [10]int64 + + carry[9] = (h9 + (1 << 24)) >> 25 + h0 += carry[9] * 19 + h9 -= carry[9] << 25 + carry[1] = (h1 + (1 << 24)) >> 25 + h2 += carry[1] + h1 -= carry[1] << 25 + carry[3] = (h3 + (1 << 24)) >> 25 + h4 += carry[3] + h3 -= carry[3] << 25 + carry[5] = (h5 + (1 << 24)) >> 25 + h6 += carry[5] + h5 -= carry[5] << 25 + carry[7] = (h7 + (1 << 24)) >> 25 + h8 += carry[7] + h7 -= carry[7] << 25 + + carry[0] = (h0 + (1 << 25)) >> 26 + h1 += carry[0] + h0 -= carry[0] << 26 + carry[2] = (h2 + (1 << 25)) >> 26 + h3 += carry[2] + h2 -= carry[2] << 26 + carry[4] = (h4 + (1 << 25)) >> 26 + h5 += carry[4] + h4 -= carry[4] << 26 + carry[6] = (h6 + (1 << 25)) >> 26 + h7 += carry[6] + h6 -= carry[6] << 26 + carry[8] = (h8 + (1 << 25)) >> 26 + h9 += carry[8] + h8 -= carry[8] << 26 + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// feInvert sets out = z^-1. +func feInvert(out, z *fieldElement) { + var t0, t1, t2, t3 fieldElement + var i int + + feSquare(&t0, z) + for i = 1; i < 1; i++ { + feSquare(&t0, &t0) + } + feSquare(&t1, &t0) + for i = 1; i < 2; i++ { + feSquare(&t1, &t1) + } + feMul(&t1, z, &t1) + feMul(&t0, &t0, &t1) + feSquare(&t2, &t0) + for i = 1; i < 1; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t1, &t2) + feSquare(&t2, &t1) + for i = 1; i < 5; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 20; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 10; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t2, &t1) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t2, &t2, &t1) + feSquare(&t3, &t2) + for i = 1; i < 100; i++ { + feSquare(&t3, &t3) + } + feMul(&t2, &t3, &t2) + feSquare(&t2, &t2) + for i = 1; i < 50; i++ { + feSquare(&t2, &t2) + } + feMul(&t1, &t2, &t1) + feSquare(&t1, &t1) + for i = 1; i < 5; i++ { + feSquare(&t1, &t1) + } + feMul(out, &t1, &t0) +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + + copy(e[:], in[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement + feFromBytes(&x1, base) + feOne(&x2) + feCopy(&x3, &x1) + feOne(&z3) + + swap := int32(0) + for pos := 254; pos >= 0; pos-- { + b := e[pos/8] >> uint(pos&7) + b &= 1 + swap ^= int32(b) + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + swap = int32(b) + + feSub(&tmp0, &x3, &z3) + feSub(&tmp1, &x2, &z2) + feAdd(&x2, &x2, &z2) + feAdd(&z2, &x3, &z3) + feMul(&z3, &tmp0, &x2) + feMul(&z2, &z2, &tmp1) + feSquare(&tmp0, &tmp1) + feSquare(&tmp1, &x2) + feAdd(&x3, &z3, &z2) + feSub(&z2, &z3, &z2) + feMul(&x2, &tmp1, &tmp0) + feSub(&tmp1, &tmp1, &tmp0) + feSquare(&z2, &z2) + feMul121666(&z3, &tmp1) + feSquare(&x3, &x3) + feAdd(&tmp0, &tmp0, &z3) + feMul(&z3, &x1, &z2) + feMul(&z2, &tmp1, &tmp0) + } + + feCSwap(&x2, &x3, swap) + feCSwap(&z2, &z3, swap) + + feInvert(&z2, &z2) + feMul(&x2, &x2, &z2) + feToBytes(out, &x2) +} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ebeea3c2d6a58be6357daa32d8f54a0cae76cd88 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/doc.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package curve25519 provides an implementation of scalar multiplication on +// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html +package curve25519 // import "golang.org/x/crypto/curve25519" + +// basePoint is the x coordinate of the generator of the curve. +var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + +// ScalarMult sets dst to the product in*base where dst and base are the x +// coordinates of group points and all values are in little-endian form. +func ScalarMult(dst, in, base *[32]byte) { + scalarMult(dst, in, base) +} + +// ScalarBaseMult sets dst to the product in*base where dst and base are the x +// coordinates of group points, base is the standard generator and all values +// are in little-endian form. +func ScalarBaseMult(dst, in *[32]byte) { + ScalarMult(dst, in, &basePoint) +} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..536479bf626c488fe51eda21e672532fbc56aca1 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func freeze(inout *[5]uint64) +TEXT ·freeze(SB),7,$0-8 + MOVQ inout+0(FP), DI + + MOVQ 0(DI),SI + MOVQ 8(DI),DX + MOVQ 16(DI),CX + MOVQ 24(DI),R8 + MOVQ 32(DI),R9 + MOVQ $REDMASK51,AX + MOVQ AX,R10 + SUBQ $18,R10 + MOVQ $3,R11 +REDUCELOOP: + MOVQ SI,R12 + SHRQ $51,R12 + ANDQ AX,SI + ADDQ R12,DX + MOVQ DX,R12 + SHRQ $51,R12 + ANDQ AX,DX + ADDQ R12,CX + MOVQ CX,R12 + SHRQ $51,R12 + ANDQ AX,CX + ADDQ R12,R8 + MOVQ R8,R12 + SHRQ $51,R12 + ANDQ AX,R8 + ADDQ R12,R9 + MOVQ R9,R12 + SHRQ $51,R12 + ANDQ AX,R9 + IMUL3Q $19,R12,R12 + ADDQ R12,SI + SUBQ $1,R11 + JA REDUCELOOP + MOVQ $1,R12 + CMPQ R10,SI + CMOVQLT R11,R12 + CMPQ AX,DX + CMOVQNE R11,R12 + CMPQ AX,CX + CMOVQNE R11,R12 + CMPQ AX,R8 + CMOVQNE R11,R12 + CMPQ AX,R9 + CMOVQNE R11,R12 + NEGQ R12 + ANDQ R12,AX + ANDQ R12,R10 + SUBQ R10,SI + SUBQ AX,DX + SUBQ AX,CX + SUBQ AX,R8 + SUBQ AX,R9 + MOVQ SI,0(DI) + MOVQ DX,8(DI) + MOVQ CX,16(DI) + MOVQ R8,24(DI) + MOVQ R9,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..7074e5cd9dcd4b1d01c3906b5459f6b042237445 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s @@ -0,0 +1,1377 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func ladderstep(inout *[5][5]uint64) +TEXT ·ladderstep(SB),0,$296-8 + MOVQ inout+0(FP),DI + + MOVQ 40(DI),SI + MOVQ 48(DI),DX + MOVQ 56(DI),CX + MOVQ 64(DI),R8 + MOVQ 72(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 80(DI),SI + ADDQ 88(DI),DX + ADDQ 96(DI),CX + ADDQ 104(DI),R8 + ADDQ 112(DI),R9 + SUBQ 80(DI),AX + SUBQ 88(DI),R10 + SUBQ 96(DI),R11 + SUBQ 104(DI),R12 + SUBQ 112(DI),R13 + MOVQ SI,0(SP) + MOVQ DX,8(SP) + MOVQ CX,16(SP) + MOVQ R8,24(SP) + MOVQ R9,32(SP) + MOVQ AX,40(SP) + MOVQ R10,48(SP) + MOVQ R11,56(SP) + MOVQ R12,64(SP) + MOVQ R13,72(SP) + MOVQ 40(SP),AX + MULQ 40(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 40(SP),AX + SHLQ $1,AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 48(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 48(SP),AX + SHLQ $1,AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 48(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 56(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 64(SP),DX + IMUL3Q $38,DX,AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 72(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(SP) + MOVQ R8,88(SP) + MOVQ R9,96(SP) + MOVQ AX,104(SP) + MOVQ R10,112(SP) + MOVQ 0(SP),AX + MULQ 0(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SP),AX + SHLQ $1,AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 8(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + SHLQ $1,AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 16(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 24(SP),DX + IMUL3Q $38,DX,AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 32(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(SP) + MOVQ R8,128(SP) + MOVQ R9,136(SP) + MOVQ AX,144(SP) + MOVQ R10,152(SP) + MOVQ SI,SI + MOVQ R8,DX + MOVQ R9,CX + MOVQ AX,R8 + MOVQ R10,R9 + ADDQ ·_2P0(SB),SI + ADDQ ·_2P1234(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R8 + ADDQ ·_2P1234(SB),R9 + SUBQ 80(SP),SI + SUBQ 88(SP),DX + SUBQ 96(SP),CX + SUBQ 104(SP),R8 + SUBQ 112(SP),R9 + MOVQ SI,160(SP) + MOVQ DX,168(SP) + MOVQ CX,176(SP) + MOVQ R8,184(SP) + MOVQ R9,192(SP) + MOVQ 120(DI),SI + MOVQ 128(DI),DX + MOVQ 136(DI),CX + MOVQ 144(DI),R8 + MOVQ 152(DI),R9 + MOVQ SI,AX + MOVQ DX,R10 + MOVQ CX,R11 + MOVQ R8,R12 + MOVQ R9,R13 + ADDQ ·_2P0(SB),AX + ADDQ ·_2P1234(SB),R10 + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 160(DI),SI + ADDQ 168(DI),DX + ADDQ 176(DI),CX + ADDQ 184(DI),R8 + ADDQ 192(DI),R9 + SUBQ 160(DI),AX + SUBQ 168(DI),R10 + SUBQ 176(DI),R11 + SUBQ 184(DI),R12 + SUBQ 192(DI),R13 + MOVQ SI,200(SP) + MOVQ DX,208(SP) + MOVQ CX,216(SP) + MOVQ R8,224(SP) + MOVQ R9,232(SP) + MOVQ AX,240(SP) + MOVQ R10,248(SP) + MOVQ R11,256(SP) + MOVQ R12,264(SP) + MOVQ R13,272(SP) + MOVQ 224(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,280(SP) + MULQ 56(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 232(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,288(SP) + MULQ 48(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 40(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 200(SP),AX + MULQ 48(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 200(SP),AX + MULQ 56(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 200(SP),AX + MULQ 64(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 200(SP),AX + MULQ 72(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 208(SP),AX + MULQ 40(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 48(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 56(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 208(SP),AX + MULQ 64(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),AX + MULQ 40(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 216(SP),AX + MULQ 48(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 216(SP),AX + MULQ 56(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 64(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 216(SP),DX + IMUL3Q $19,DX,AX + MULQ 72(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 224(SP),AX + MULQ 40(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 224(SP),AX + MULQ 48(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 280(SP),AX + MULQ 64(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 280(SP),AX + MULQ 72(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 232(SP),AX + MULQ 40(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 288(SP),AX + MULQ 56(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 288(SP),AX + MULQ 64(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 288(SP),AX + MULQ 72(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(SP) + MOVQ R8,48(SP) + MOVQ R9,56(SP) + MOVQ AX,64(SP) + MOVQ R10,72(SP) + MOVQ 264(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,200(SP) + MULQ 16(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 272(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,208(SP) + MULQ 8(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 0(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 240(SP),AX + MULQ 8(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 240(SP),AX + MULQ 16(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 240(SP),AX + MULQ 24(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 240(SP),AX + MULQ 32(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 248(SP),AX + MULQ 0(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 248(SP),AX + MULQ 8(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 248(SP),AX + MULQ 16(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 248(SP),AX + MULQ 24(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 248(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),AX + MULQ 0(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 256(SP),AX + MULQ 8(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 256(SP),AX + MULQ 16(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 24(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 256(SP),DX + IMUL3Q $19,DX,AX + MULQ 32(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 264(SP),AX + MULQ 0(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 264(SP),AX + MULQ 8(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 200(SP),AX + MULQ 24(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 200(SP),AX + MULQ 32(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 272(SP),AX + MULQ 0(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 208(SP),AX + MULQ 16(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 208(SP),AX + MULQ 24(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 208(SP),AX + MULQ 32(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,DX + MOVQ R8,CX + MOVQ R9,R11 + MOVQ AX,R12 + MOVQ R10,R13 + ADDQ ·_2P0(SB),DX + ADDQ ·_2P1234(SB),CX + ADDQ ·_2P1234(SB),R11 + ADDQ ·_2P1234(SB),R12 + ADDQ ·_2P1234(SB),R13 + ADDQ 40(SP),SI + ADDQ 48(SP),R8 + ADDQ 56(SP),R9 + ADDQ 64(SP),AX + ADDQ 72(SP),R10 + SUBQ 40(SP),DX + SUBQ 48(SP),CX + SUBQ 56(SP),R11 + SUBQ 64(SP),R12 + SUBQ 72(SP),R13 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ DX,160(DI) + MOVQ CX,168(DI) + MOVQ R11,176(DI) + MOVQ R12,184(DI) + MOVQ R13,192(DI) + MOVQ 120(DI),AX + MULQ 120(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 128(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 136(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 144(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(DI),AX + SHLQ $1,AX + MULQ 152(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(DI),AX + MULQ 128(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 136(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(DI),AX + SHLQ $1,AX + MULQ 144(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),AX + MULQ 136(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 144(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $19,DX,AX + MULQ 144(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(DI),DX + IMUL3Q $38,DX,AX + MULQ 152(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(DI),DX + IMUL3Q $19,DX,AX + MULQ 152(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,120(DI) + MOVQ R8,128(DI) + MOVQ R9,136(DI) + MOVQ AX,144(DI) + MOVQ R10,152(DI) + MOVQ 160(DI),AX + MULQ 160(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 168(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 176(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 184(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + SHLQ $1,AX + MULQ 192(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 168(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 176(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + SHLQ $1,AX + MULQ 184(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 176(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 184(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),DX + IMUL3Q $38,DX,AX + MULQ 192(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + ANDQ DX,SI + MOVQ CX,R8 + SHRQ $51,CX + ADDQ R10,CX + ANDQ DX,R8 + MOVQ CX,R9 + SHRQ $51,CX + ADDQ R12,CX + ANDQ DX,R9 + MOVQ CX,AX + SHRQ $51,CX + ADDQ R14,CX + ANDQ DX,AX + MOVQ CX,R10 + SHRQ $51,CX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 184(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 16(DI) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 192(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 0(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 160(DI),AX + MULQ 8(DI) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 160(DI),AX + MULQ 16(DI) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 160(DI),AX + MULQ 24(DI) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 160(DI),AX + MULQ 32(DI) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 168(DI),AX + MULQ 0(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 168(DI),AX + MULQ 8(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 168(DI),AX + MULQ 16(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 168(DI),AX + MULQ 24(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 168(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),AX + MULQ 0(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 176(DI),AX + MULQ 8(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 176(DI),AX + MULQ 16(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 24(DI) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 176(DI),DX + IMUL3Q $19,DX,AX + MULQ 32(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 184(DI),AX + MULQ 0(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 184(DI),AX + MULQ 8(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 24(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 32(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 192(DI),AX + MULQ 0(DI) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 16(DI) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 24(DI) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 32(DI) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,160(DI) + MOVQ R8,168(DI) + MOVQ R9,176(DI) + MOVQ AX,184(DI) + MOVQ R10,192(DI) + MOVQ 144(SP),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 96(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 152(SP),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 88(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 80(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 120(SP),AX + MULQ 88(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 120(SP),AX + MULQ 96(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 120(SP),AX + MULQ 104(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 120(SP),AX + MULQ 112(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 128(SP),AX + MULQ 80(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 128(SP),AX + MULQ 88(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 128(SP),AX + MULQ 96(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 128(SP),AX + MULQ 104(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 128(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),AX + MULQ 80(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 136(SP),AX + MULQ 88(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 136(SP),AX + MULQ 96(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 104(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 136(SP),DX + IMUL3Q $19,DX,AX + MULQ 112(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 144(SP),AX + MULQ 80(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 144(SP),AX + MULQ 88(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 104(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 112(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 152(SP),AX + MULQ 80(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 96(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 104(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 112(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,40(DI) + MOVQ R8,48(DI) + MOVQ R9,56(DI) + MOVQ AX,64(DI) + MOVQ R10,72(DI) + MOVQ 160(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + MOVQ AX,SI + MOVQ DX,CX + MOVQ 168(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,CX + MOVQ DX,R8 + MOVQ 176(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R8 + MOVQ DX,R9 + MOVQ 184(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R9 + MOVQ DX,R10 + MOVQ 192(SP),AX + MULQ ·_121666_213(SB) + SHRQ $13,AX + ADDQ AX,R10 + IMUL3Q $19,DX,DX + ADDQ DX,SI + ADDQ 80(SP),SI + ADDQ 88(SP),CX + ADDQ 96(SP),R8 + ADDQ 104(SP),R9 + ADDQ 112(SP),R10 + MOVQ SI,80(DI) + MOVQ CX,88(DI) + MOVQ R8,96(DI) + MOVQ R9,104(DI) + MOVQ R10,112(DI) + MOVQ 104(DI),SI + IMUL3Q $19,SI,AX + MOVQ AX,0(SP) + MULQ 176(SP) + MOVQ AX,SI + MOVQ DX,CX + MOVQ 112(DI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 168(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 160(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 80(DI),AX + MULQ 168(SP) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 80(DI),AX + MULQ 176(SP) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 80(DI),AX + MULQ 184(SP) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 80(DI),AX + MULQ 192(SP) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 88(DI),AX + MULQ 160(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 88(DI),AX + MULQ 168(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 88(DI),AX + MULQ 176(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 88(DI),AX + MULQ 184(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 88(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),AX + MULQ 160(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 96(DI),AX + MULQ 168(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 96(DI),AX + MULQ 176(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 184(SP) + ADDQ AX,SI + ADCQ DX,CX + MOVQ 96(DI),DX + IMUL3Q $19,DX,AX + MULQ 192(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 104(DI),AX + MULQ 160(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 104(DI),AX + MULQ 168(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 0(SP),AX + MULQ 184(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SP),AX + MULQ 192(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 112(DI),AX + MULQ 160(SP) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SP),AX + MULQ 176(SP) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 8(SP),AX + MULQ 184(SP) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 192(SP) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ $REDMASK51,DX + SHLQ $13,CX:SI + ANDQ DX,SI + SHLQ $13,R9:R8 + ANDQ DX,R8 + ADDQ CX,R8 + SHLQ $13,R11:R10 + ANDQ DX,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ DX,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ DX,R14 + ADDQ R13,R14 + IMUL3Q $19,R15,CX + ADDQ CX,SI + MOVQ SI,CX + SHRQ $51,CX + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $51,CX + ANDQ DX,SI + ADDQ R10,CX + MOVQ CX,R9 + SHRQ $51,CX + ANDQ DX,R8 + ADDQ R12,CX + MOVQ CX,AX + SHRQ $51,CX + ANDQ DX,R9 + ADDQ R14,CX + MOVQ CX,R10 + SHRQ $51,CX + ANDQ DX,AX + IMUL3Q $19,CX,CX + ADDQ CX,SI + ANDQ DX,R10 + MOVQ SI,80(DI) + MOVQ R8,88(DI) + MOVQ R9,96(DI) + MOVQ AX,104(DI) + MOVQ R10,112(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go new file mode 100644 index 0000000000000000000000000000000000000000..5822bd53383495f484075e14cdc5c18dc658101a --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package curve25519 + +// These functions are implemented in the .s files. The names of the functions +// in the rest of the file are also taken from the SUPERCOP sources to help +// people following along. + +//go:noescape + +func cswap(inout *[5]uint64, v uint64) + +//go:noescape + +func ladderstep(inout *[5][5]uint64) + +//go:noescape + +func freeze(inout *[5]uint64) + +//go:noescape + +func mul(dest, a, b *[5]uint64) + +//go:noescape + +func square(out, in *[5]uint64) + +// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. +func mladder(xr, zr *[5]uint64, s *[32]byte) { + var work [5][5]uint64 + + work[0] = *xr + setint(&work[1], 1) + setint(&work[2], 0) + work[3] = *xr + setint(&work[4], 1) + + j := uint(6) + var prevbit byte + + for i := 31; i >= 0; i-- { + for j < 8 { + bit := ((*s)[i] >> j) & 1 + swap := bit ^ prevbit + prevbit = bit + cswap(&work[1], uint64(swap)) + ladderstep(&work) + j-- + } + j = 7 + } + + *xr = work[1] + *zr = work[2] +} + +func scalarMult(out, in, base *[32]byte) { + var e [32]byte + copy(e[:], (*in)[:]) + e[0] &= 248 + e[31] &= 127 + e[31] |= 64 + + var t, z [5]uint64 + unpack(&t, base) + mladder(&t, &z, &e) + invert(&z, &z) + mul(&t, &t, &z) + pack(out, &t) +} + +func setint(r *[5]uint64, v uint64) { + r[0] = v + r[1] = 0 + r[2] = 0 + r[3] = 0 + r[4] = 0 +} + +// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian +// order. +func unpack(r *[5]uint64, x *[32]byte) { + r[0] = uint64(x[0]) | + uint64(x[1])<<8 | + uint64(x[2])<<16 | + uint64(x[3])<<24 | + uint64(x[4])<<32 | + uint64(x[5])<<40 | + uint64(x[6]&7)<<48 + + r[1] = uint64(x[6])>>3 | + uint64(x[7])<<5 | + uint64(x[8])<<13 | + uint64(x[9])<<21 | + uint64(x[10])<<29 | + uint64(x[11])<<37 | + uint64(x[12]&63)<<45 + + r[2] = uint64(x[12])>>6 | + uint64(x[13])<<2 | + uint64(x[14])<<10 | + uint64(x[15])<<18 | + uint64(x[16])<<26 | + uint64(x[17])<<34 | + uint64(x[18])<<42 | + uint64(x[19]&1)<<50 + + r[3] = uint64(x[19])>>1 | + uint64(x[20])<<7 | + uint64(x[21])<<15 | + uint64(x[22])<<23 | + uint64(x[23])<<31 | + uint64(x[24])<<39 | + uint64(x[25]&15)<<47 + + r[4] = uint64(x[25])>>4 | + uint64(x[26])<<4 | + uint64(x[27])<<12 | + uint64(x[28])<<20 | + uint64(x[29])<<28 | + uint64(x[30])<<36 | + uint64(x[31]&127)<<44 +} + +// pack sets out = x where out is the usual, little-endian form of the 5, +// 51-bit limbs in x. +func pack(out *[32]byte, x *[5]uint64) { + t := *x + freeze(&t) + + out[0] = byte(t[0]) + out[1] = byte(t[0] >> 8) + out[2] = byte(t[0] >> 16) + out[3] = byte(t[0] >> 24) + out[4] = byte(t[0] >> 32) + out[5] = byte(t[0] >> 40) + out[6] = byte(t[0] >> 48) + + out[6] ^= byte(t[1]<<3) & 0xf8 + out[7] = byte(t[1] >> 5) + out[8] = byte(t[1] >> 13) + out[9] = byte(t[1] >> 21) + out[10] = byte(t[1] >> 29) + out[11] = byte(t[1] >> 37) + out[12] = byte(t[1] >> 45) + + out[12] ^= byte(t[2]<<6) & 0xc0 + out[13] = byte(t[2] >> 2) + out[14] = byte(t[2] >> 10) + out[15] = byte(t[2] >> 18) + out[16] = byte(t[2] >> 26) + out[17] = byte(t[2] >> 34) + out[18] = byte(t[2] >> 42) + out[19] = byte(t[2] >> 50) + + out[19] ^= byte(t[3]<<1) & 0xfe + out[20] = byte(t[3] >> 7) + out[21] = byte(t[3] >> 15) + out[22] = byte(t[3] >> 23) + out[23] = byte(t[3] >> 31) + out[24] = byte(t[3] >> 39) + out[25] = byte(t[3] >> 47) + + out[25] ^= byte(t[4]<<4) & 0xf0 + out[26] = byte(t[4] >> 4) + out[27] = byte(t[4] >> 12) + out[28] = byte(t[4] >> 20) + out[29] = byte(t[4] >> 28) + out[30] = byte(t[4] >> 36) + out[31] = byte(t[4] >> 44) +} + +// invert calculates r = x^-1 mod p using Fermat's little theorem. +func invert(r *[5]uint64, x *[5]uint64) { + var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 + + square(&z2, x) /* 2 */ + square(&t, &z2) /* 4 */ + square(&t, &t) /* 8 */ + mul(&z9, &t, x) /* 9 */ + mul(&z11, &z9, &z2) /* 11 */ + square(&t, &z11) /* 22 */ + mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ + + square(&t, &z2_5_0) /* 2^6 - 2^1 */ + for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ + + square(&t, &z2_10_0) /* 2^11 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ + square(&t, &t) + } + mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ + + square(&t, &z2_20_0) /* 2^21 - 2^1 */ + for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ + square(&t, &t) + } + mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ + + square(&t, &t) /* 2^41 - 2^1 */ + for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ + square(&t, &t) + } + mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ + + square(&t, &z2_50_0) /* 2^51 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ + square(&t, &t) + } + mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ + + square(&t, &z2_100_0) /* 2^101 - 2^1 */ + for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ + square(&t, &t) + } + mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ + + square(&t, &t) /* 2^201 - 2^1 */ + for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ + square(&t, &t) + } + mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ + + square(&t, &t) /* 2^251 - 2^1 */ + square(&t, &t) /* 2^252 - 2^2 */ + square(&t, &t) /* 2^253 - 2^3 */ + + square(&t, &t) /* 2^254 - 2^4 */ + + square(&t, &t) /* 2^255 - 2^5 */ + mul(r, &t, &z11) /* 2^255 - 21 */ +} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..b162e6515984ea83ebcdca8d96479fb9ab5780ad --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s @@ -0,0 +1,169 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func mul(dest, a, b *[5]uint64) +TEXT ·mul(SB),0,$16-24 + MOVQ dest+0(FP), DI + MOVQ a+8(FP), SI + MOVQ b+16(FP), DX + + MOVQ DX,CX + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,0(SP) + MULQ 16(CX) + MOVQ AX,R8 + MOVQ DX,R9 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MOVQ AX,8(SP) + MULQ 8(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 0(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 0(SI),AX + MULQ 8(CX) + MOVQ AX,R10 + MOVQ DX,R11 + MOVQ 0(SI),AX + MULQ 16(CX) + MOVQ AX,R12 + MOVQ DX,R13 + MOVQ 0(SI),AX + MULQ 24(CX) + MOVQ AX,R14 + MOVQ DX,R15 + MOVQ 0(SI),AX + MULQ 32(CX) + MOVQ AX,BX + MOVQ DX,BP + MOVQ 8(SI),AX + MULQ 0(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SI),AX + MULQ 8(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SI),AX + MULQ 16(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 8(SI),AX + MULQ 24(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),AX + MULQ 0(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 16(SI),AX + MULQ 8(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 16(SI),AX + MULQ 16(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(CX) + ADDQ AX,R8 + ADCQ DX,R9 + MOVQ 16(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 24(SI),AX + MULQ 0(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ 24(SI),AX + MULQ 8(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 0(SP),AX + MULQ 24(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 0(SP),AX + MULQ 32(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 32(SI),AX + MULQ 0(CX) + ADDQ AX,BX + ADCQ DX,BP + MOVQ 8(SP),AX + MULQ 16(CX) + ADDQ AX,R10 + ADCQ DX,R11 + MOVQ 8(SP),AX + MULQ 24(CX) + ADDQ AX,R12 + ADCQ DX,R13 + MOVQ 8(SP),AX + MULQ 32(CX) + ADDQ AX,R14 + ADCQ DX,R15 + MOVQ $REDMASK51,SI + SHLQ $13,R9:R8 + ANDQ SI,R8 + SHLQ $13,R11:R10 + ANDQ SI,R10 + ADDQ R9,R10 + SHLQ $13,R13:R12 + ANDQ SI,R12 + ADDQ R11,R12 + SHLQ $13,R15:R14 + ANDQ SI,R14 + ADDQ R13,R14 + SHLQ $13,BP:BX + ANDQ SI,BX + ADDQ R15,BX + IMUL3Q $19,BP,DX + ADDQ DX,R8 + MOVQ R8,DX + SHRQ $51,DX + ADDQ R10,DX + MOVQ DX,CX + SHRQ $51,DX + ANDQ SI,R8 + ADDQ R12,DX + MOVQ DX,R9 + SHRQ $51,DX + ANDQ SI,CX + ADDQ R14,DX + MOVQ DX,AX + SHRQ $51,DX + ANDQ SI,R9 + ADDQ BX,DX + MOVQ DX,R10 + SHRQ $51,DX + ANDQ SI,AX + IMUL3Q $19,DX,DX + ADDQ DX,R8 + ANDQ SI,R10 + MOVQ R8,0(DI) + MOVQ CX,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s new file mode 100644 index 0000000000000000000000000000000000000000..4e864a83ef5ecd751c900f12fd74f9828b803804 --- /dev/null +++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s @@ -0,0 +1,132 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html + +// +build amd64,!gccgo,!appengine + +#include "const_amd64.h" + +// func square(out, in *[5]uint64) +TEXT ·square(SB),7,$0-16 + MOVQ out+0(FP), DI + MOVQ in+8(FP), SI + + MOVQ 0(SI),AX + MULQ 0(SI) + MOVQ AX,CX + MOVQ DX,R8 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 8(SI) + MOVQ AX,R9 + MOVQ DX,R10 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 16(SI) + MOVQ AX,R11 + MOVQ DX,R12 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 24(SI) + MOVQ AX,R13 + MOVQ DX,R14 + MOVQ 0(SI),AX + SHLQ $1,AX + MULQ 32(SI) + MOVQ AX,R15 + MOVQ DX,BX + MOVQ 8(SI),AX + MULQ 8(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 16(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ 8(SI),AX + SHLQ $1,AX + MULQ 24(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 8(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),AX + MULQ 16(SI) + ADDQ AX,R15 + ADCQ DX,BX + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 24(SI) + ADDQ AX,CX + ADCQ DX,R8 + MOVQ 16(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $19,DX,AX + MULQ 24(SI) + ADDQ AX,R9 + ADCQ DX,R10 + MOVQ 24(SI),DX + IMUL3Q $38,DX,AX + MULQ 32(SI) + ADDQ AX,R11 + ADCQ DX,R12 + MOVQ 32(SI),DX + IMUL3Q $19,DX,AX + MULQ 32(SI) + ADDQ AX,R13 + ADCQ DX,R14 + MOVQ $REDMASK51,SI + SHLQ $13,R8:CX + ANDQ SI,CX + SHLQ $13,R10:R9 + ANDQ SI,R9 + ADDQ R8,R9 + SHLQ $13,R12:R11 + ANDQ SI,R11 + ADDQ R10,R11 + SHLQ $13,R14:R13 + ANDQ SI,R13 + ADDQ R12,R13 + SHLQ $13,BX:R15 + ANDQ SI,R15 + ADDQ R14,R15 + IMUL3Q $19,BX,DX + ADDQ DX,CX + MOVQ CX,DX + SHRQ $51,DX + ADDQ R9,DX + ANDQ SI,CX + MOVQ DX,R8 + SHRQ $51,DX + ADDQ R11,DX + ANDQ SI,R8 + MOVQ DX,R9 + SHRQ $51,DX + ADDQ R13,DX + ANDQ SI,R9 + MOVQ DX,AX + SHRQ $51,DX + ADDQ R15,DX + ANDQ SI,AX + MOVQ DX,R10 + SHRQ $51,DX + IMUL3Q $19,DX,DX + ADDQ DX,CX + ANDQ SI,R10 + MOVQ CX,0(DI) + MOVQ R8,8(DI) + MOVQ R9,16(DI) + MOVQ AX,24(DI) + MOVQ R10,32(DI) + RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 0000000000000000000000000000000000000000..f1d95674ac3f7562ee613705d0ff281ac97e9772 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,181 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// http://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519†function defined in +// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05. +package ed25519 + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +import ( + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "crypto/subtle" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { + if rand == nil { + rand = cryptorand.Reader + } + + privateKey = make([]byte, PrivateKeySize) + publicKey = make([]byte, PublicKeySize) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + digest := sha512.Sum512(privateKey[:32]) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + copy(privateKey[32:], publicKeyBytes[:]) + copy(publicKey, publicKeyBytes[:]) + + return publicKey, privateKey, nil +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var b [32]byte + copy(b[:], sig[32:]) + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) + + var checkR [32]byte + R.ToBytes(&checkR) + return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 0000000000000000000000000000000000000000..e39f086c1d87dbedcb0cd0ccb4d302bae2c04dbc --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 0000000000000000000000000000000000000000..5f8b9947872716c50edb0392432670240973ce3b --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1771 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// This code is a port of the public domain, “ref10†implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0<y<1. +// +// Write r=h-pq. +// Have 0<=r<=p-1=2^255-20. +// Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1. +// +// Write x=r+19(2^-255)r+y. +// Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q. +// +// Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1)) +// so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q. +func FeToBytes(s *[32]byte, h *FieldElement) { + var carry [10]int32 + + q := (19*h[9] + (1 << 24)) >> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<<uint(b)) <= 15 { + r[i] += r[i+b] << uint(b) + r[i+b] = 0 + } else if r[i]-(r[i+b]<<uint(b)) >= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go new file mode 100644 index 0000000000000000000000000000000000000000..ecfd7c58d134f9d1e3feb34d1f050573d12cb2ea --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/client.go @@ -0,0 +1,659 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package agent implements the ssh-agent protocol, and provides both +// a client and a server. The client can talk to a standard ssh-agent +// that uses UNIX sockets, and one could implement an alternative +// ssh-agent process using the sample server. +// +// References: +// [PROTOCOL.agent]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.agent?rev=HEAD +package agent // import "golang.org/x/crypto/ssh/agent" + +import ( + "bytes" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "sync" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// Agent represents the capabilities of an ssh-agent. +type Agent interface { + // List returns the identities known to the agent. + List() ([]*Key, error) + + // Sign has the agent sign the data using a protocol 2 key as defined + // in [PROTOCOL.agent] section 2.6.2. + Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) + + // Add adds a private key to the agent. + Add(key AddedKey) error + + // Remove removes all identities with the given public key. + Remove(key ssh.PublicKey) error + + // RemoveAll removes all identities. + RemoveAll() error + + // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. + Lock(passphrase []byte) error + + // Unlock undoes the effect of Lock + Unlock(passphrase []byte) error + + // Signers returns signers for all the known keys. + Signers() ([]ssh.Signer, error) +} + +// AddedKey describes an SSH key to be added to an Agent. +type AddedKey struct { + // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey or + // *ecdsa.PrivateKey, which will be inserted into the agent. + PrivateKey interface{} + // Certificate, if not nil, is communicated to the agent and will be + // stored with the key. + Certificate *ssh.Certificate + // Comment is an optional, free-form string. + Comment string + // LifetimeSecs, if not zero, is the number of seconds that the + // agent will store the key for. + LifetimeSecs uint32 + // ConfirmBeforeUse, if true, requests that the agent confirm with the + // user before each use of this key. + ConfirmBeforeUse bool +} + +// See [PROTOCOL.agent], section 3. +const ( + agentRequestV1Identities = 1 + agentRemoveAllV1Identities = 9 + + // 3.2 Requests from client to agent for protocol 2 key operations + agentAddIdentity = 17 + agentRemoveIdentity = 18 + agentRemoveAllIdentities = 19 + agentAddIdConstrained = 25 + + // 3.3 Key-type independent requests from client to agent + agentAddSmartcardKey = 20 + agentRemoveSmartcardKey = 21 + agentLock = 22 + agentUnlock = 23 + agentAddSmartcardKeyConstrained = 26 + + // 3.7 Key constraint identifiers + agentConstrainLifetime = 1 + agentConstrainConfirm = 2 +) + +// maxAgentResponseBytes is the maximum agent reply size that is accepted. This +// is a sanity check, not a limit in the spec. +const maxAgentResponseBytes = 16 << 20 + +// Agent messages: +// These structures mirror the wire format of the corresponding ssh agent +// messages found in [PROTOCOL.agent]. + +// 3.4 Generic replies from agent to client +const agentFailure = 5 + +type failureAgentMsg struct{} + +const agentSuccess = 6 + +type successAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentRequestIdentities = 11 + +type requestIdentitiesAgentMsg struct{} + +// See [PROTOCOL.agent], section 2.5.2. +const agentIdentitiesAnswer = 12 + +type identitiesAnswerAgentMsg struct { + NumKeys uint32 `sshtype:"12"` + Keys []byte `ssh:"rest"` +} + +// See [PROTOCOL.agent], section 2.6.2. +const agentSignRequest = 13 + +type signRequestAgentMsg struct { + KeyBlob []byte `sshtype:"13"` + Data []byte + Flags uint32 +} + +// See [PROTOCOL.agent], section 2.6.2. + +// 3.6 Replies from agent to client for protocol 2 key operations +const agentSignResponse = 14 + +type signResponseAgentMsg struct { + SigBlob []byte `sshtype:"14"` +} + +type publicKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +// Key represents a protocol 2 public key as defined in +// [PROTOCOL.agent], section 2.5.2. +type Key struct { + Format string + Blob []byte + Comment string +} + +func clientErr(err error) error { + return fmt.Errorf("agent: client error: %v", err) +} + +// String returns the storage form of an agent key with the format, base64 +// encoded serialized key, and the comment if it is not empty. +func (k *Key) String() string { + s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) + + if k.Comment != "" { + s += " " + k.Comment + } + + return s +} + +// Type returns the public key type. +func (k *Key) Type() string { + return k.Format +} + +// Marshal returns key blob to satisfy the ssh.PublicKey interface. +func (k *Key) Marshal() []byte { + return k.Blob +} + +// Verify satisfies the ssh.PublicKey interface. +func (k *Key) Verify(data []byte, sig *ssh.Signature) error { + pubKey, err := ssh.ParsePublicKey(k.Blob) + if err != nil { + return fmt.Errorf("agent: bad public key: %v", err) + } + return pubKey.Verify(data, sig) +} + +type wireKey struct { + Format string + Rest []byte `ssh:"rest"` +} + +func parseKey(in []byte) (out *Key, rest []byte, err error) { + var record struct { + Blob []byte + Comment string + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(in, &record); err != nil { + return nil, nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(record.Blob, &wk); err != nil { + return nil, nil, err + } + + return &Key{ + Format: wk.Format, + Blob: record.Blob, + Comment: record.Comment, + }, record.Rest, nil +} + +// client is a client for an ssh-agent process. +type client struct { + // conn is typically a *net.UnixConn + conn io.ReadWriter + // mu is used to prevent concurrent access to the agent + mu sync.Mutex +} + +// NewClient returns an Agent that talks to an ssh-agent process over +// the given connection. +func NewClient(rw io.ReadWriter) Agent { + return &client{conn: rw} +} + +// call sends an RPC to the agent. On success, the reply is +// unmarshaled into reply and replyType is set to the first byte of +// the reply, which contains the type of the message. +func (c *client) call(req []byte) (reply interface{}, err error) { + c.mu.Lock() + defer c.mu.Unlock() + + msg := make([]byte, 4+len(req)) + binary.BigEndian.PutUint32(msg, uint32(len(req))) + copy(msg[4:], req) + if _, err = c.conn.Write(msg); err != nil { + return nil, clientErr(err) + } + + var respSizeBuf [4]byte + if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { + return nil, clientErr(err) + } + respSize := binary.BigEndian.Uint32(respSizeBuf[:]) + if respSize > maxAgentResponseBytes { + return nil, clientErr(err) + } + + buf := make([]byte, respSize) + if _, err = io.ReadFull(c.conn, buf); err != nil { + return nil, clientErr(err) + } + reply, err = unmarshal(buf) + if err != nil { + return nil, clientErr(err) + } + return reply, err +} + +func (c *client) simpleCall(req []byte) error { + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +func (c *client) RemoveAll() error { + return c.simpleCall([]byte{agentRemoveAllIdentities}) +} + +func (c *client) Remove(key ssh.PublicKey) error { + req := ssh.Marshal(&agentRemoveIdentityMsg{ + KeyBlob: key.Marshal(), + }) + return c.simpleCall(req) +} + +func (c *client) Lock(passphrase []byte) error { + req := ssh.Marshal(&agentLockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +func (c *client) Unlock(passphrase []byte) error { + req := ssh.Marshal(&agentUnlockMsg{ + Passphrase: passphrase, + }) + return c.simpleCall(req) +} + +// List returns the identities known to the agent. +func (c *client) List() ([]*Key, error) { + // see [PROTOCOL.agent] section 2.5.2. + req := []byte{agentRequestIdentities} + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *identitiesAnswerAgentMsg: + if msg.NumKeys > maxAgentResponseBytes/8 { + return nil, errors.New("agent: too many keys in agent reply") + } + keys := make([]*Key, msg.NumKeys) + data := msg.Keys + for i := uint32(0); i < msg.NumKeys; i++ { + var key *Key + var err error + if key, data, err = parseKey(data); err != nil { + return nil, err + } + keys[i] = key + } + return keys, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to list keys") + } + panic("unreachable") +} + +// Sign has the agent sign the data using a protocol 2 key as defined +// in [PROTOCOL.agent] section 2.6.2. +func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + req := ssh.Marshal(signRequestAgentMsg{ + KeyBlob: key.Marshal(), + Data: data, + }) + + msg, err := c.call(req) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *signResponseAgentMsg: + var sig ssh.Signature + if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { + return nil, err + } + + return &sig, nil + case *failureAgentMsg: + return nil, errors.New("agent: failed to sign challenge") + } + panic("unreachable") +} + +// unmarshal parses an agent message in packet, returning the parsed +// form and the message type of packet. +func unmarshal(packet []byte) (interface{}, error) { + if len(packet) < 1 { + return nil, errors.New("agent: empty packet") + } + var msg interface{} + switch packet[0] { + case agentFailure: + return new(failureAgentMsg), nil + case agentSuccess: + return new(successAgentMsg), nil + case agentIdentitiesAnswer: + msg = new(identitiesAnswerAgentMsg) + case agentSignResponse: + msg = new(signResponseAgentMsg) + case agentV1IdentitiesAnswer: + msg = new(agentV1IdentityMsg) + default: + return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) + } + if err := ssh.Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} + +type rsaKeyMsg struct { + Type string `sshtype:"17|25"` + N *big.Int + E *big.Int + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaKeyMsg struct { + Type string `sshtype:"17|25"` + P *big.Int + Q *big.Int + G *big.Int + Y *big.Int + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaKeyMsg struct { + Type string `sshtype:"17|25"` + Curve string + KeyBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519KeyMsg struct { + Type string `sshtype:"17|25"` + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Insert adds a private key to the agent. +func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaKeyMsg{ + Type: ssh.KeyAlgoRSA, + N: k.N, + E: big.NewInt(int64(k.E)), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaKeyMsg{ + Type: ssh.KeyAlgoDSA, + P: k.P, + Q: k.Q, + G: k.G, + Y: k.Y, + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) + req = ssh.Marshal(ecdsaKeyMsg{ + Type: "ecdsa-sha2-" + nistID, + Curve: nistID, + KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519KeyMsg{ + Type: ssh.KeyAlgoED25519, + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +type rsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Iqmp *big.Int // IQMP = Inverse Q Mod P + P *big.Int + Q *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type dsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + X *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ecdsaCertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + D *big.Int + Comments string + Constraints []byte `ssh:"rest"` +} + +type ed25519CertMsg struct { + Type string `sshtype:"17|25"` + CertBytes []byte + Pub []byte + Priv []byte + Comments string + Constraints []byte `ssh:"rest"` +} + +// Add adds a private key to the agent. If a certificate is given, +// that certificate is added instead as public key. +func (c *client) Add(key AddedKey) error { + var constraints []byte + + if secs := key.LifetimeSecs; secs != 0 { + constraints = append(constraints, agentConstrainLifetime) + + var secsBytes [4]byte + binary.BigEndian.PutUint32(secsBytes[:], secs) + constraints = append(constraints, secsBytes[:]...) + } + + if key.ConfirmBeforeUse { + constraints = append(constraints, agentConstrainConfirm) + } + + if cert := key.Certificate; cert == nil { + return c.insertKey(key.PrivateKey, key.Comment, constraints) + } else { + return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) + } +} + +func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { + var req []byte + switch k := s.(type) { + case *rsa.PrivateKey: + if len(k.Primes) != 2 { + return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) + } + k.Precompute() + req = ssh.Marshal(rsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Iqmp: k.Precomputed.Qinv, + P: k.Primes[0], + Q: k.Primes[1], + Comments: comment, + Constraints: constraints, + }) + case *dsa.PrivateKey: + req = ssh.Marshal(dsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + X: k.X, + Comments: comment, + Constraints: constraints, + }) + case *ecdsa.PrivateKey: + req = ssh.Marshal(ecdsaCertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + D: k.D, + Comments: comment, + Constraints: constraints, + }) + case *ed25519.PrivateKey: + req = ssh.Marshal(ed25519CertMsg{ + Type: cert.Type(), + CertBytes: cert.Marshal(), + Pub: []byte(*k)[32:], + Priv: []byte(*k), + Comments: comment, + Constraints: constraints, + }) + default: + return fmt.Errorf("agent: unsupported key type %T", s) + } + + // if constraints are present then the message type needs to be changed. + if len(constraints) != 0 { + req[0] = agentAddIdConstrained + } + + signer, err := ssh.NewSignerFromKey(s) + if err != nil { + return err + } + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return errors.New("agent: signer and cert have different public key") + } + + resp, err := c.call(req) + if err != nil { + return err + } + if _, ok := resp.(*successAgentMsg); ok { + return nil + } + return errors.New("agent: failure") +} + +// Signers provides a callback for client authentication. +func (c *client) Signers() ([]ssh.Signer, error) { + keys, err := c.List() + if err != nil { + return nil, err + } + + var result []ssh.Signer + for _, k := range keys { + result = append(result, &agentKeyringSigner{c, k}) + } + return result, nil +} + +type agentKeyringSigner struct { + agent *client + pub ssh.PublicKey +} + +func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { + return s.pub +} + +func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { + // The agent has its own entropy source, so the rand argument is ignored. + return s.agent.Sign(s.pub, data) +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go new file mode 100644 index 0000000000000000000000000000000000000000..fd24ba900d2541ff523678b62b424086d5ee853d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/forward.go @@ -0,0 +1,103 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "errors" + "io" + "net" + "sync" + + "golang.org/x/crypto/ssh" +) + +// RequestAgentForwarding sets up agent forwarding for the session. +// ForwardToAgent or ForwardToRemote should be called to route +// the authentication requests. +func RequestAgentForwarding(session *ssh.Session) error { + ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) + if err != nil { + return err + } + if !ok { + return errors.New("forwarding request denied") + } + return nil +} + +// ForwardToAgent routes authentication requests to the given keyring. +func ForwardToAgent(client *ssh.Client, keyring Agent) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go func() { + ServeAgent(keyring, channel) + channel.Close() + }() + } + }() + return nil +} + +const channelType = "auth-agent@openssh.com" + +// ForwardToRemote routes authentication requests to the ssh-agent +// process serving on the given unix socket. +func ForwardToRemote(client *ssh.Client, addr string) error { + channels := client.HandleChannelOpen(channelType) + if channels == nil { + return errors.New("agent: already have handler for " + channelType) + } + conn, err := net.Dial("unix", addr) + if err != nil { + return err + } + conn.Close() + + go func() { + for ch := range channels { + channel, reqs, err := ch.Accept() + if err != nil { + continue + } + go ssh.DiscardRequests(reqs) + go forwardUnixSocket(channel, addr) + } + }() + return nil +} + +func forwardUnixSocket(channel ssh.Channel, addr string) { + conn, err := net.Dial("unix", addr) + if err != nil { + return + } + + var wg sync.WaitGroup + wg.Add(2) + go func() { + io.Copy(conn, channel) + conn.(*net.UnixConn).CloseWrite() + wg.Done() + }() + go func() { + io.Copy(channel, conn) + channel.CloseWrite() + wg.Done() + }() + + wg.Wait() + conn.Close() + channel.Close() +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go new file mode 100644 index 0000000000000000000000000000000000000000..a6ba06ab3011916bee1737fe20fc8fc24e9b5382 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/keyring.go @@ -0,0 +1,215 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "bytes" + "crypto/rand" + "crypto/subtle" + "errors" + "fmt" + "sync" + "time" + + "golang.org/x/crypto/ssh" +) + +type privKey struct { + signer ssh.Signer + comment string + expire *time.Time +} + +type keyring struct { + mu sync.Mutex + keys []privKey + + locked bool + passphrase []byte +} + +var errLocked = errors.New("agent: locked") + +// NewKeyring returns an Agent that holds keys in memory. It is safe +// for concurrent use by multiple goroutines. +func NewKeyring() Agent { + return &keyring{} +} + +// RemoveAll removes all identities. +func (r *keyring) RemoveAll() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.keys = nil + return nil +} + +// removeLocked does the actual key removal. The caller must already be holding the +// keyring mutex. +func (r *keyring) removeLocked(want []byte) error { + found := false + for i := 0; i < len(r.keys); { + if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { + found = true + r.keys[i] = r.keys[len(r.keys)-1] + r.keys = r.keys[:len(r.keys)-1] + continue + } else { + i++ + } + } + + if !found { + return errors.New("agent: key not found") + } + return nil +} + +// Remove removes all identities with the given public key. +func (r *keyring) Remove(key ssh.PublicKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + return r.removeLocked(key.Marshal()) +} + +// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. +func (r *keyring) Lock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + + r.locked = true + r.passphrase = passphrase + return nil +} + +// Unlock undoes the effect of Lock +func (r *keyring) Unlock(passphrase []byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.locked { + return errors.New("agent: not locked") + } + if len(passphrase) != len(r.passphrase) || 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { + return fmt.Errorf("agent: incorrect passphrase") + } + + r.locked = false + r.passphrase = nil + return nil +} + +// expireKeysLocked removes expired keys from the keyring. If a key was added +// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have +// ellapsed, it is removed. The caller *must* be holding the keyring mutex. +func (r *keyring) expireKeysLocked() { + for _, k := range r.keys { + if k.expire != nil && time.Now().After(*k.expire) { + r.removeLocked(k.signer.PublicKey().Marshal()) + } + } +} + +// List returns the identities known to the agent. +func (r *keyring) List() ([]*Key, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + // section 2.7: locked agents return empty. + return nil, nil + } + + r.expireKeysLocked() + var ids []*Key + for _, k := range r.keys { + pub := k.signer.PublicKey() + ids = append(ids, &Key{ + Format: pub.Type(), + Blob: pub.Marshal(), + Comment: k.comment}) + } + return ids, nil +} + +// Insert adds a private key to the keyring. If a certificate +// is given, that certificate is added as public key. Note that +// any constraints given are ignored. +func (r *keyring) Add(key AddedKey) error { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return errLocked + } + signer, err := ssh.NewSignerFromKey(key.PrivateKey) + + if err != nil { + return err + } + + if cert := key.Certificate; cert != nil { + signer, err = ssh.NewCertSigner(cert, signer) + if err != nil { + return err + } + } + + p := privKey{ + signer: signer, + comment: key.Comment, + } + + if key.LifetimeSecs > 0 { + t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) + p.expire = &t + } + + r.keys = append(r.keys, p) + + return nil +} + +// Sign returns a signature for the data. +func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + wanted := key.Marshal() + for _, k := range r.keys { + if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { + return k.signer.Sign(rand.Reader, data) + } + } + return nil, errors.New("not found") +} + +// Signers returns signers for all the known keys. +func (r *keyring) Signers() ([]ssh.Signer, error) { + r.mu.Lock() + defer r.mu.Unlock() + if r.locked { + return nil, errLocked + } + + r.expireKeysLocked() + s := make([]ssh.Signer, 0, len(r.keys)) + for _, k := range r.keys { + s = append(s, k.signer) + } + return s, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go new file mode 100644 index 0000000000000000000000000000000000000000..68a333fa5d8b62a8a8d2f3d233b05bb32094bf4e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/agent/server.go @@ -0,0 +1,451 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package agent + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "math/big" + + "golang.org/x/crypto/ed25519" + "golang.org/x/crypto/ssh" +) + +// Server wraps an Agent and uses it to implement the agent side of +// the SSH-agent, wire protocol. +type server struct { + agent Agent +} + +func (s *server) processRequestBytes(reqData []byte) []byte { + rep, err := s.processRequest(reqData) + if err != nil { + if err != errLocked { + // TODO(hanwen): provide better logging interface? + log.Printf("agent %d: %v", reqData[0], err) + } + return []byte{agentFailure} + } + + if err == nil && rep == nil { + return []byte{agentSuccess} + } + + return ssh.Marshal(rep) +} + +func marshalKey(k *Key) []byte { + var record struct { + Blob []byte + Comment string + } + record.Blob = k.Marshal() + record.Comment = k.Comment + + return ssh.Marshal(&record) +} + +// See [PROTOCOL.agent], section 2.5.1. +const agentV1IdentitiesAnswer = 2 + +type agentV1IdentityMsg struct { + Numkeys uint32 `sshtype:"2"` +} + +type agentRemoveIdentityMsg struct { + KeyBlob []byte `sshtype:"18"` +} + +type agentLockMsg struct { + Passphrase []byte `sshtype:"22"` +} + +type agentUnlockMsg struct { + Passphrase []byte `sshtype:"23"` +} + +func (s *server) processRequest(data []byte) (interface{}, error) { + switch data[0] { + case agentRequestV1Identities: + return &agentV1IdentityMsg{0}, nil + + case agentRemoveAllV1Identities: + return nil, nil + + case agentRemoveIdentity: + var req agentRemoveIdentityMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) + + case agentRemoveAllIdentities: + return nil, s.agent.RemoveAll() + + case agentLock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + return nil, s.agent.Lock(req.Passphrase) + + case agentUnlock: + var req agentLockMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + return nil, s.agent.Unlock(req.Passphrase) + + case agentSignRequest: + var req signRequestAgentMsg + if err := ssh.Unmarshal(data, &req); err != nil { + return nil, err + } + + var wk wireKey + if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { + return nil, err + } + + k := &Key{ + Format: wk.Format, + Blob: req.KeyBlob, + } + + sig, err := s.agent.Sign(k, req.Data) // TODO(hanwen): flags. + if err != nil { + return nil, err + } + return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil + + case agentRequestIdentities: + keys, err := s.agent.List() + if err != nil { + return nil, err + } + + rep := identitiesAnswerAgentMsg{ + NumKeys: uint32(len(keys)), + } + for _, k := range keys { + rep.Keys = append(rep.Keys, marshalKey(k)...) + } + return rep, nil + + case agentAddIdConstrained, agentAddIdentity: + return nil, s.insertIdentity(data) + } + + return nil, fmt.Errorf("unknown opcode %d", data[0]) +} + +func parseRSAKey(req []byte) (*AddedKey, error) { + var k rsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + if k.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + priv := &rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(k.E.Int64()), + N: k.N, + }, + D: k.D, + Primes: []*big.Int{k.P, k.Q}, + } + priv.Precompute() + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func parseEd25519Key(req []byte) (*AddedKey, error) { + var k ed25519KeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + return &AddedKey{PrivateKey: &priv, Comment: k.Comments}, nil +} + +func parseDSAKey(req []byte) (*AddedKey, error) { + var k dsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { + priv = &ecdsa.PrivateKey{ + D: privScalar, + } + + switch curveName { + case "nistp256": + priv.Curve = elliptic.P256() + case "nistp384": + priv.Curve = elliptic.P384() + case "nistp521": + priv.Curve = elliptic.P521() + default: + return nil, fmt.Errorf("agent: unknown curve %q", curveName) + } + + priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) + if priv.X == nil || priv.Y == nil { + return nil, errors.New("agent: point not on curve") + } + + return priv, nil +} + +func parseEd25519Cert(req []byte) (*AddedKey, error) { + var k ed25519CertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + priv := ed25519.PrivateKey(k.Priv) + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ED25519 certificate") + } + return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseECDSAKey(req []byte) (*AddedKey, error) { + var k ecdsaKeyMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) + if err != nil { + return nil, err + } + + return &AddedKey{PrivateKey: priv, Comment: k.Comments}, nil +} + +func parseRSACert(req []byte) (*AddedKey, error) { + var k rsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad RSA certificate") + } + + // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go + var rsaPub struct { + Name string + E *big.Int + N *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + if rsaPub.E.BitLen() > 30 { + return nil, errors.New("agent: RSA public exponent too large") + } + + priv := rsa.PrivateKey{ + PublicKey: rsa.PublicKey{ + E: int(rsaPub.E.Int64()), + N: rsaPub.N, + }, + D: k.D, + Primes: []*big.Int{k.Q, k.P}, + } + priv.Precompute() + + return &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseDSACert(req []byte) (*AddedKey, error) { + var k dsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad DSA certificate") + } + + // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go + var w struct { + Name string + P, Q, G, Y *big.Int + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { + return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) + } + + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + }, + X: k.X, + } + + return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil +} + +func parseECDSACert(req []byte) (*AddedKey, error) { + var k ecdsaCertMsg + if err := ssh.Unmarshal(req, &k); err != nil { + return nil, err + } + + pubKey, err := ssh.ParsePublicKey(k.CertBytes) + if err != nil { + return nil, err + } + cert, ok := pubKey.(*ssh.Certificate) + if !ok { + return nil, errors.New("agent: bad ECDSA certificate") + } + + // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go + var ecdsaPub struct { + Name string + ID string + Key []byte + } + if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { + return nil, err + } + + priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) + if err != nil { + return nil, err + } + + return &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments}, nil +} + +func (s *server) insertIdentity(req []byte) error { + var record struct { + Type string `sshtype:"17|25"` + Rest []byte `ssh:"rest"` + } + + if err := ssh.Unmarshal(req, &record); err != nil { + return err + } + + var addedKey *AddedKey + var err error + + switch record.Type { + case ssh.KeyAlgoRSA: + addedKey, err = parseRSAKey(req) + case ssh.KeyAlgoDSA: + addedKey, err = parseDSAKey(req) + case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: + addedKey, err = parseECDSAKey(req) + case ssh.KeyAlgoED25519: + addedKey, err = parseEd25519Key(req) + case ssh.CertAlgoRSAv01: + addedKey, err = parseRSACert(req) + case ssh.CertAlgoDSAv01: + addedKey, err = parseDSACert(req) + case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: + addedKey, err = parseECDSACert(req) + case ssh.CertAlgoED25519v01: + addedKey, err = parseEd25519Cert(req) + default: + return fmt.Errorf("agent: not implemented: %q", record.Type) + } + + if err != nil { + return err + } + return s.agent.Add(*addedKey) +} + +// ServeAgent serves the agent protocol on the given connection. It +// returns when an I/O error occurs. +func ServeAgent(agent Agent, c io.ReadWriter) error { + s := &server{agent} + + var length [4]byte + for { + if _, err := io.ReadFull(c, length[:]); err != nil { + return err + } + l := binary.BigEndian.Uint32(length[:]) + if l > maxAgentResponseBytes { + // We also cap requests. + return fmt.Errorf("agent: request too large: %d", l) + } + + req := make([]byte, l) + if _, err := io.ReadFull(c, req); err != nil { + return err + } + + repData := s.processRequestBytes(req) + if len(repData) > maxAgentResponseBytes { + return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) + } + + binary.BigEndian.PutUint32(length[:], uint32(len(repData))) + if _, err := c.Write(length[:]); err != nil { + return err + } + if _, err := c.Write(repData); err != nil { + return err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go new file mode 100644 index 0000000000000000000000000000000000000000..6931b5114fe8301e1ee955e6a2878ff10e43d4df --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/buffer.go @@ -0,0 +1,98 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "io" + "sync" +) + +// buffer provides a linked list buffer for data exchange +// between producer and consumer. Theoretically the buffer is +// of unlimited capacity as it does no allocation of its own. +type buffer struct { + // protects concurrent access to head, tail and closed + *sync.Cond + + head *element // the buffer that will be read first + tail *element // the buffer that will be read last + + closed bool +} + +// An element represents a single link in a linked list. +type element struct { + buf []byte + next *element +} + +// newBuffer returns an empty buffer that is not closed. +func newBuffer() *buffer { + e := new(element) + b := &buffer{ + Cond: newCond(), + head: e, + tail: e, + } + return b +} + +// write makes buf available for Read to receive. +// buf must not be modified after the call to write. +func (b *buffer) write(buf []byte) { + b.Cond.L.Lock() + e := &element{buf: buf} + b.tail.next = e + b.tail = e + b.Cond.Signal() + b.Cond.L.Unlock() +} + +// eof closes the buffer. Reads from the buffer once all +// the data has been consumed will receive os.EOF. +func (b *buffer) eof() error { + b.Cond.L.Lock() + b.closed = true + b.Cond.Signal() + b.Cond.L.Unlock() + return nil +} + +// Read reads data from the internal buffer in buf. Reads will block +// if no data is available, or until the buffer is closed. +func (b *buffer) Read(buf []byte) (n int, err error) { + b.Cond.L.Lock() + defer b.Cond.L.Unlock() + + for len(buf) > 0 { + // if there is data in b.head, copy it + if len(b.head.buf) > 0 { + r := copy(buf, b.head.buf) + buf, b.head.buf = buf[r:], b.head.buf[r:] + n += r + continue + } + // if there is a next buffer, make it the head + if len(b.head.buf) == 0 && b.head != b.tail { + b.head = b.head.next + continue + } + + // if at least one byte has been copied, return + if n > 0 { + break + } + + // if nothing was read, and there is nothing outstanding + // check to see if the buffer is closed. + if b.closed { + err = io.EOF + break + } + // out of buffers, wait for producer + b.Cond.Wait() + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go new file mode 100644 index 0000000000000000000000000000000000000000..6331c94d53bc991332064fb0cfbd62075337f6a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/certs.go @@ -0,0 +1,503 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "sort" + "time" +) + +// These constants from [PROTOCOL.certkeys] represent the algorithm names +// for certificate types supported by this package. +const ( + CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" + CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" + CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" + CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" + CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" + CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" +) + +// Certificate types distinguish between host and user +// certificates. The values can be set in the CertType field of +// Certificate. +const ( + UserCert = 1 + HostCert = 2 +) + +// Signature represents a cryptographic signature. +type Signature struct { + Format string + Blob []byte +} + +// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that +// a certificate does not expire. +const CertTimeInfinity = 1<<64 - 1 + +// An Certificate represents an OpenSSH certificate as defined in +// [PROTOCOL.certkeys]?rev=1.8. +type Certificate struct { + Nonce []byte + Key PublicKey + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []string + ValidAfter uint64 + ValidBefore uint64 + Permissions + Reserved []byte + SignatureKey PublicKey + Signature *Signature +} + +// genericCertData holds the key-independent part of the certificate data. +// Overall, certificates contain an nonce, public key fields and +// key-independent fields. +type genericCertData struct { + Serial uint64 + CertType uint32 + KeyId string + ValidPrincipals []byte + ValidAfter uint64 + ValidBefore uint64 + CriticalOptions []byte + Extensions []byte + Reserved []byte + SignatureKey []byte + Signature []byte +} + +func marshalStringList(namelist []string) []byte { + var to []byte + for _, name := range namelist { + s := struct{ N string }{name} + to = append(to, Marshal(&s)...) + } + return to +} + +type optionsTuple struct { + Key string + Value []byte +} + +type optionsTupleValue struct { + Value string +} + +// serialize a map of critical options or extensions +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty string value +func marshalTuples(tups map[string]string) []byte { + keys := make([]string, 0, len(tups)) + for key := range tups { + keys = append(keys, key) + } + sort.Strings(keys) + + var ret []byte + for _, key := range keys { + s := optionsTuple{Key: key} + if value := tups[key]; len(value) > 0 { + s.Value = Marshal(&optionsTupleValue{value}) + } + ret = append(ret, Marshal(&s)...) + } + return ret +} + +// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, +// we need two length prefixes for a non-empty option value +func parseTuples(in []byte) (map[string]string, error) { + tups := map[string]string{} + var lastKey string + var haveLastKey bool + + for len(in) > 0 { + var key, val, extra []byte + var ok bool + + if key, in, ok = parseString(in); !ok { + return nil, errShortRead + } + keyStr := string(key) + // according to [PROTOCOL.certkeys], the names must be in + // lexical order. + if haveLastKey && keyStr <= lastKey { + return nil, fmt.Errorf("ssh: certificate options are not in lexical order") + } + lastKey, haveLastKey = keyStr, true + // the next field is a data field, which if non-empty has a string embedded + if val, in, ok = parseString(in); !ok { + return nil, errShortRead + } + if len(val) > 0 { + val, extra, ok = parseString(val) + if !ok { + return nil, errShortRead + } + if len(extra) > 0 { + return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") + } + tups[keyStr] = string(val) + } else { + tups[keyStr] = "" + } + } + return tups, nil +} + +func parseCert(in []byte, privAlgo string) (*Certificate, error) { + nonce, rest, ok := parseString(in) + if !ok { + return nil, errShortRead + } + + key, rest, err := parsePubKey(rest, privAlgo) + if err != nil { + return nil, err + } + + var g genericCertData + if err := Unmarshal(rest, &g); err != nil { + return nil, err + } + + c := &Certificate{ + Nonce: nonce, + Key: key, + Serial: g.Serial, + CertType: g.CertType, + KeyId: g.KeyId, + ValidAfter: g.ValidAfter, + ValidBefore: g.ValidBefore, + } + + for principals := g.ValidPrincipals; len(principals) > 0; { + principal, rest, ok := parseString(principals) + if !ok { + return nil, errShortRead + } + c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) + principals = rest + } + + c.CriticalOptions, err = parseTuples(g.CriticalOptions) + if err != nil { + return nil, err + } + c.Extensions, err = parseTuples(g.Extensions) + if err != nil { + return nil, err + } + c.Reserved = g.Reserved + k, err := ParsePublicKey(g.SignatureKey) + if err != nil { + return nil, err + } + + c.SignatureKey = k + c.Signature, rest, ok = parseSignatureBody(g.Signature) + if !ok || len(rest) > 0 { + return nil, errors.New("ssh: signature parse error") + } + + return c, nil +} + +type openSSHCertSigner struct { + pub *Certificate + signer Signer +} + +// NewCertSigner returns a Signer that signs with the given Certificate, whose +// private key is held by signer. It returns an error if the public key in cert +// doesn't match the key used by signer. +func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { + if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { + return nil, errors.New("ssh: signer and cert have different public key") + } + + return &openSSHCertSigner{cert, signer}, nil +} + +func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + return s.signer.Sign(rand, data) +} + +func (s *openSSHCertSigner) PublicKey() PublicKey { + return s.pub +} + +const sourceAddressCriticalOption = "source-address" + +// CertChecker does the work of verifying a certificate. Its methods +// can be plugged into ClientConfig.HostKeyCallback and +// ServerConfig.PublicKeyCallback. For the CertChecker to work, +// minimally, the IsAuthority callback should be set. +type CertChecker struct { + // SupportedCriticalOptions lists the CriticalOptions that the + // server application layer understands. These are only used + // for user certificates. + SupportedCriticalOptions []string + + // IsAuthority should return true if the key is recognized as + // an authority. This allows for certificates to be signed by other + // certificates. + IsAuthority func(auth PublicKey) bool + + // Clock is used for verifying time stamps. If nil, time.Now + // is used. + Clock func() time.Time + + // UserKeyFallback is called when CertChecker.Authenticate encounters a + // public key that is not a certificate. It must implement validation + // of user keys or else, if nil, all such keys are rejected. + UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // HostKeyFallback is called when CertChecker.CheckHostKey encounters a + // public key that is not a certificate. It must implement host key + // validation or else, if nil, all such keys are rejected. + HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error + + // IsRevoked is called for each certificate so that revocation checking + // can be implemented. It should return true if the given certificate + // is revoked and false otherwise. If nil, no certificates are + // considered to have been revoked. + IsRevoked func(cert *Certificate) bool +} + +// CheckHostKey checks a host key certificate. This method can be +// plugged into ClientConfig.HostKeyCallback. +func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { + cert, ok := key.(*Certificate) + if !ok { + if c.HostKeyFallback != nil { + return c.HostKeyFallback(addr, remote, key) + } + return errors.New("ssh: non-certificate host key") + } + if cert.CertType != HostCert { + return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) + } + + return c.CheckCert(addr, cert) +} + +// Authenticate checks a user certificate. Authenticate can be used as +// a value for ServerConfig.PublicKeyCallback. +func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { + cert, ok := pubKey.(*Certificate) + if !ok { + if c.UserKeyFallback != nil { + return c.UserKeyFallback(conn, pubKey) + } + return nil, errors.New("ssh: normal key pairs not accepted") + } + + if cert.CertType != UserCert { + return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) + } + + if err := c.CheckCert(conn.User(), cert); err != nil { + return nil, err + } + + return &cert.Permissions, nil +} + +// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and +// the signature of the certificate. +func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { + if c.IsRevoked != nil && c.IsRevoked(cert) { + return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial) + } + + for opt, _ := range cert.CriticalOptions { + // sourceAddressCriticalOption will be enforced by + // serverAuthenticate + if opt == sourceAddressCriticalOption { + continue + } + + found := false + for _, supp := range c.SupportedCriticalOptions { + if supp == opt { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) + } + } + + if len(cert.ValidPrincipals) > 0 { + // By default, certs are valid for all users/hosts. + found := false + for _, p := range cert.ValidPrincipals { + if p == principal { + found = true + break + } + } + if !found { + return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) + } + } + + if !c.IsAuthority(cert.SignatureKey) { + return fmt.Errorf("ssh: certificate signed by unrecognized authority") + } + + clock := c.Clock + if clock == nil { + clock = time.Now + } + + unixNow := clock().Unix() + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { + return fmt.Errorf("ssh: certificate signature does not verify") + } + + return nil +} + +// SignCert sets c.SignatureKey to the authority's public key and stores a +// Signature, by authority, in the certificate. +func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { + c.Nonce = make([]byte, 32) + if _, err := io.ReadFull(rand, c.Nonce); err != nil { + return err + } + c.SignatureKey = authority.PublicKey() + + sig, err := authority.Sign(rand, c.bytesForSigning()) + if err != nil { + return err + } + c.Signature = sig + return nil +} + +var certAlgoNames = map[string]string{ + KeyAlgoRSA: CertAlgoRSAv01, + KeyAlgoDSA: CertAlgoDSAv01, + KeyAlgoECDSA256: CertAlgoECDSA256v01, + KeyAlgoECDSA384: CertAlgoECDSA384v01, + KeyAlgoECDSA521: CertAlgoECDSA521v01, + KeyAlgoED25519: CertAlgoED25519v01, +} + +// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. +// Panics if a non-certificate algorithm is passed. +func certToPrivAlgo(algo string) string { + for privAlgo, pubAlgo := range certAlgoNames { + if pubAlgo == algo { + return privAlgo + } + } + panic("unknown cert algorithm") +} + +func (cert *Certificate) bytesForSigning() []byte { + c2 := *cert + c2.Signature = nil + out := c2.Marshal() + // Drop trailing signature length. + return out[:len(out)-4] +} + +// Marshal serializes c into OpenSSH's wire format. It is part of the +// PublicKey interface. +func (c *Certificate) Marshal() []byte { + generic := genericCertData{ + Serial: c.Serial, + CertType: c.CertType, + KeyId: c.KeyId, + ValidPrincipals: marshalStringList(c.ValidPrincipals), + ValidAfter: uint64(c.ValidAfter), + ValidBefore: uint64(c.ValidBefore), + CriticalOptions: marshalTuples(c.CriticalOptions), + Extensions: marshalTuples(c.Extensions), + Reserved: c.Reserved, + SignatureKey: c.SignatureKey.Marshal(), + } + if c.Signature != nil { + generic.Signature = Marshal(c.Signature) + } + genericBytes := Marshal(&generic) + keyBytes := c.Key.Marshal() + _, keyBytes, _ = parseString(keyBytes) + prefix := Marshal(&struct { + Name string + Nonce []byte + Key []byte `ssh:"rest"` + }{c.Type(), c.Nonce, keyBytes}) + + result := make([]byte, 0, len(prefix)+len(genericBytes)) + result = append(result, prefix...) + result = append(result, genericBytes...) + return result +} + +// Type returns the key name. It is part of the PublicKey interface. +func (c *Certificate) Type() string { + algo, ok := certAlgoNames[c.Key.Type()] + if !ok { + panic("unknown cert key type " + c.Key.Type()) + } + return algo +} + +// Verify verifies a signature against the certificate's public +// key. It is part of the PublicKey interface. +func (c *Certificate) Verify(data []byte, sig *Signature) error { + return c.Key.Verify(data, sig) +} + +func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { + format, in, ok := parseString(in) + if !ok { + return + } + + out = &Signature{ + Format: string(format), + } + + if out.Blob, in, ok = parseString(in); !ok { + return + } + + return out, in, ok +} + +func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { + sigBytes, rest, ok := parseString(in) + if !ok { + return + } + + out, trailing, ok := parseSignatureBody(sigBytes) + if !ok || len(trailing) > 0 { + return nil, nil, false + } + return +} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go new file mode 100644 index 0000000000000000000000000000000000000000..195530ea0da2fc9b602f8a17c7b514cb98263166 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/channel.go @@ -0,0 +1,633 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "sync" +) + +const ( + minPacketLength = 9 + // channelMaxPacket contains the maximum number of bytes that will be + // sent in a single packet. As per RFC 4253, section 6.1, 32k is also + // the minimum. + channelMaxPacket = 1 << 15 + // We follow OpenSSH here. + channelWindowSize = 64 * channelMaxPacket +) + +// NewChannel represents an incoming request to a channel. It must either be +// accepted for use by calling Accept, or rejected by calling Reject. +type NewChannel interface { + // Accept accepts the channel creation request. It returns the Channel + // and a Go channel containing SSH requests. The Go channel must be + // serviced otherwise the Channel will hang. + Accept() (Channel, <-chan *Request, error) + + // Reject rejects the channel creation request. After calling + // this, no other methods on the Channel may be called. + Reject(reason RejectionReason, message string) error + + // ChannelType returns the type of the channel, as supplied by the + // client. + ChannelType() string + + // ExtraData returns the arbitrary payload for this channel, as supplied + // by the client. This data is specific to the channel type. + ExtraData() []byte +} + +// A Channel is an ordered, reliable, flow-controlled, duplex stream +// that is multiplexed over an SSH connection. +type Channel interface { + // Read reads up to len(data) bytes from the channel. + Read(data []byte) (int, error) + + // Write writes len(data) bytes to the channel. + Write(data []byte) (int, error) + + // Close signals end of channel use. No data may be sent after this + // call. + Close() error + + // CloseWrite signals the end of sending in-band + // data. Requests may still be sent, and the other side may + // still send data + CloseWrite() error + + // SendRequest sends a channel request. If wantReply is true, + // it will wait for a reply and return the result as a + // boolean, otherwise the return value will be false. Channel + // requests are out-of-band messages so they may be sent even + // if the data stream is closed or blocked by flow control. + // If the channel is closed before a reply is returned, io.EOF + // is returned. + SendRequest(name string, wantReply bool, payload []byte) (bool, error) + + // Stderr returns an io.ReadWriter that writes to this channel + // with the extended data type set to stderr. Stderr may + // safely be read and written from a different goroutine than + // Read and Write respectively. + Stderr() io.ReadWriter +} + +// Request is a request sent outside of the normal stream of +// data. Requests can either be specific to an SSH channel, or they +// can be global. +type Request struct { + Type string + WantReply bool + Payload []byte + + ch *channel + mux *mux +} + +// Reply sends a response to a request. It must be called for all requests +// where WantReply is true and is a no-op otherwise. The payload argument is +// ignored for replies to channel-specific requests. +func (r *Request) Reply(ok bool, payload []byte) error { + if !r.WantReply { + return nil + } + + if r.ch == nil { + return r.mux.ackRequest(ok, payload) + } + + return r.ch.ackRequest(ok) +} + +// RejectionReason is an enumeration used when rejecting channel creation +// requests. See RFC 4254, section 5.1. +type RejectionReason uint32 + +const ( + Prohibited RejectionReason = iota + 1 + ConnectionFailed + UnknownChannelType + ResourceShortage +) + +// String converts the rejection reason to human readable form. +func (r RejectionReason) String() string { + switch r { + case Prohibited: + return "administratively prohibited" + case ConnectionFailed: + return "connect failed" + case UnknownChannelType: + return "unknown channel type" + case ResourceShortage: + return "resource shortage" + } + return fmt.Sprintf("unknown reason %d", int(r)) +} + +func min(a uint32, b int) uint32 { + if a < uint32(b) { + return a + } + return uint32(b) +} + +type channelDirection uint8 + +const ( + channelInbound channelDirection = iota + channelOutbound +) + +// channel is an implementation of the Channel interface that works +// with the mux class. +type channel struct { + // R/O after creation + chanType string + extraData []byte + localId, remoteId uint32 + + // maxIncomingPayload and maxRemotePayload are the maximum + // payload sizes of normal and extended data packets for + // receiving and sending, respectively. The wire packet will + // be 9 or 13 bytes larger (excluding encryption overhead). + maxIncomingPayload uint32 + maxRemotePayload uint32 + + mux *mux + + // decided is set to true if an accept or reject message has been sent + // (for outbound channels) or received (for inbound channels). + decided bool + + // direction contains either channelOutbound, for channels created + // locally, or channelInbound, for channels created by the peer. + direction channelDirection + + // Pending internal channel messages. + msg chan interface{} + + // Since requests have no ID, there can be only one request + // with WantReply=true outstanding. This lock is held by a + // goroutine that has such an outgoing request pending. + sentRequestMu sync.Mutex + + incomingRequests chan *Request + + sentEOF bool + + // thread-safe data + remoteWin window + pending *buffer + extPending *buffer + + // windowMu protects myWindow, the flow-control window. + windowMu sync.Mutex + myWindow uint32 + + // writeMu serializes calls to mux.conn.writePacket() and + // protects sentClose and packetPool. This mutex must be + // different from windowMu, as writePacket can block if there + // is a key exchange pending. + writeMu sync.Mutex + sentClose bool + + // packetPool has a buffer for each extended channel ID to + // save allocations during writes. + packetPool map[uint32][]byte +} + +// writePacket sends a packet. If the packet is a channel close, it updates +// sentClose. This method takes the lock c.writeMu. +func (c *channel) writePacket(packet []byte) error { + c.writeMu.Lock() + if c.sentClose { + c.writeMu.Unlock() + return io.EOF + } + c.sentClose = (packet[0] == msgChannelClose) + err := c.mux.conn.writePacket(packet) + c.writeMu.Unlock() + return err +} + +func (c *channel) sendMessage(msg interface{}) error { + if debugMux { + log.Printf("send(%d): %#v", c.mux.chanList.offset, msg) + } + + p := Marshal(msg) + binary.BigEndian.PutUint32(p[1:], c.remoteId) + return c.writePacket(p) +} + +// WriteExtended writes data to a specific extended stream. These streams are +// used, for example, for stderr. +func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { + if c.sentEOF { + return 0, io.EOF + } + // 1 byte message type, 4 bytes remoteId, 4 bytes data length + opCode := byte(msgChannelData) + headerLength := uint32(9) + if extendedCode > 0 { + headerLength += 4 + opCode = msgChannelExtendedData + } + + c.writeMu.Lock() + packet := c.packetPool[extendedCode] + // We don't remove the buffer from packetPool, so + // WriteExtended calls from different goroutines will be + // flagged as errors by the race detector. + c.writeMu.Unlock() + + for len(data) > 0 { + space := min(c.maxRemotePayload, len(data)) + if space, err = c.remoteWin.reserve(space); err != nil { + return n, err + } + if want := headerLength + space; uint32(cap(packet)) < want { + packet = make([]byte, want) + } else { + packet = packet[:want] + } + + todo := data[:space] + + packet[0] = opCode + binary.BigEndian.PutUint32(packet[1:], c.remoteId) + if extendedCode > 0 { + binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) + } + binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) + copy(packet[headerLength:], todo) + if err = c.writePacket(packet); err != nil { + return n, err + } + + n += len(todo) + data = data[len(todo):] + } + + c.writeMu.Lock() + c.packetPool[extendedCode] = packet + c.writeMu.Unlock() + + return n, err +} + +func (c *channel) handleData(packet []byte) error { + headerLen := 9 + isExtendedData := packet[0] == msgChannelExtendedData + if isExtendedData { + headerLen = 13 + } + if len(packet) < headerLen { + // malformed data packet + return parseError(packet[0]) + } + + var extended uint32 + if isExtendedData { + extended = binary.BigEndian.Uint32(packet[5:]) + } + + length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) + if length == 0 { + return nil + } + if length > c.maxIncomingPayload { + // TODO(hanwen): should send Disconnect? + return errors.New("ssh: incoming packet exceeds maximum payload size") + } + + data := packet[headerLen:] + if length != uint32(len(data)) { + return errors.New("ssh: wrong packet length") + } + + c.windowMu.Lock() + if c.myWindow < length { + c.windowMu.Unlock() + // TODO(hanwen): should send Disconnect with reason? + return errors.New("ssh: remote side wrote too much") + } + c.myWindow -= length + c.windowMu.Unlock() + + if extended == 1 { + c.extPending.write(data) + } else if extended > 0 { + // discard other extended data. + } else { + c.pending.write(data) + } + return nil +} + +func (c *channel) adjustWindow(n uint32) error { + c.windowMu.Lock() + // Since myWindow is managed on our side, and can never exceed + // the initial window setting, we don't worry about overflow. + c.myWindow += uint32(n) + c.windowMu.Unlock() + return c.sendMessage(windowAdjustMsg{ + AdditionalBytes: uint32(n), + }) +} + +func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { + switch extended { + case 1: + n, err = c.extPending.Read(data) + case 0: + n, err = c.pending.Read(data) + default: + return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) + } + + if n > 0 { + err = c.adjustWindow(uint32(n)) + // sendWindowAdjust can return io.EOF if the remote + // peer has closed the connection, however we want to + // defer forwarding io.EOF to the caller of Read until + // the buffer has been drained. + if n > 0 && err == io.EOF { + err = nil + } + } + + return n, err +} + +func (c *channel) close() { + c.pending.eof() + c.extPending.eof() + close(c.msg) + close(c.incomingRequests) + c.writeMu.Lock() + // This is not necessary for a normal channel teardown, but if + // there was another error, it is. + c.sentClose = true + c.writeMu.Unlock() + // Unblock writers. + c.remoteWin.close() +} + +// responseMessageReceived is called when a success or failure message is +// received on a channel to check that such a message is reasonable for the +// given channel. +func (c *channel) responseMessageReceived() error { + if c.direction == channelInbound { + return errors.New("ssh: channel response message received on inbound channel") + } + if c.decided { + return errors.New("ssh: duplicate response received for channel") + } + c.decided = true + return nil +} + +func (c *channel) handlePacket(packet []byte) error { + switch packet[0] { + case msgChannelData, msgChannelExtendedData: + return c.handleData(packet) + case msgChannelClose: + c.sendMessage(channelCloseMsg{PeersId: c.remoteId}) + c.mux.chanList.remove(c.localId) + c.close() + return nil + case msgChannelEOF: + // RFC 4254 is mute on how EOF affects dataExt messages but + // it is logical to signal EOF at the same time. + c.extPending.eof() + c.pending.eof() + return nil + } + + decoded, err := decode(packet) + if err != nil { + return err + } + + switch msg := decoded.(type) { + case *channelOpenFailureMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + c.mux.chanList.remove(msg.PeersId) + c.msg <- msg + case *channelOpenConfirmMsg: + if err := c.responseMessageReceived(); err != nil { + return err + } + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) + } + c.remoteId = msg.MyId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.MyWindow) + c.msg <- msg + case *windowAdjustMsg: + if !c.remoteWin.add(msg.AdditionalBytes) { + return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) + } + case *channelRequestMsg: + req := Request{ + Type: msg.Request, + WantReply: msg.WantReply, + Payload: msg.RequestSpecificData, + ch: c, + } + + c.incomingRequests <- &req + default: + c.msg <- msg + } + return nil +} + +func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { + ch := &channel{ + remoteWin: window{Cond: newCond()}, + myWindow: channelWindowSize, + pending: newBuffer(), + extPending: newBuffer(), + direction: direction, + incomingRequests: make(chan *Request, chanSize), + msg: make(chan interface{}, chanSize), + chanType: chanType, + extraData: extraData, + mux: m, + packetPool: make(map[uint32][]byte), + } + ch.localId = m.chanList.add(ch) + return ch +} + +var errUndecided = errors.New("ssh: must Accept or Reject channel") +var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") + +type extChannel struct { + code uint32 + ch *channel +} + +func (e *extChannel) Write(data []byte) (n int, err error) { + return e.ch.WriteExtended(data, e.code) +} + +func (e *extChannel) Read(data []byte) (n int, err error) { + return e.ch.ReadExtended(data, e.code) +} + +func (c *channel) Accept() (Channel, <-chan *Request, error) { + if c.decided { + return nil, nil, errDecidedAlready + } + c.maxIncomingPayload = channelMaxPacket + confirm := channelOpenConfirmMsg{ + PeersId: c.remoteId, + MyId: c.localId, + MyWindow: c.myWindow, + MaxPacketSize: c.maxIncomingPayload, + } + c.decided = true + if err := c.sendMessage(confirm); err != nil { + return nil, nil, err + } + + return c, c.incomingRequests, nil +} + +func (ch *channel) Reject(reason RejectionReason, message string) error { + if ch.decided { + return errDecidedAlready + } + reject := channelOpenFailureMsg{ + PeersId: ch.remoteId, + Reason: reason, + Message: message, + Language: "en", + } + ch.decided = true + return ch.sendMessage(reject) +} + +func (ch *channel) Read(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.ReadExtended(data, 0) +} + +func (ch *channel) Write(data []byte) (int, error) { + if !ch.decided { + return 0, errUndecided + } + return ch.WriteExtended(data, 0) +} + +func (ch *channel) CloseWrite() error { + if !ch.decided { + return errUndecided + } + ch.sentEOF = true + return ch.sendMessage(channelEOFMsg{ + PeersId: ch.remoteId}) +} + +func (ch *channel) Close() error { + if !ch.decided { + return errUndecided + } + + return ch.sendMessage(channelCloseMsg{ + PeersId: ch.remoteId}) +} + +// Extended returns an io.ReadWriter that sends and receives data on the given, +// SSH extended stream. Such streams are used, for example, for stderr. +func (ch *channel) Extended(code uint32) io.ReadWriter { + if !ch.decided { + return nil + } + return &extChannel{code, ch} +} + +func (ch *channel) Stderr() io.ReadWriter { + return ch.Extended(1) +} + +func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + if !ch.decided { + return false, errUndecided + } + + if wantReply { + ch.sentRequestMu.Lock() + defer ch.sentRequestMu.Unlock() + } + + msg := channelRequestMsg{ + PeersId: ch.remoteId, + Request: name, + WantReply: wantReply, + RequestSpecificData: payload, + } + + if err := ch.sendMessage(msg); err != nil { + return false, err + } + + if wantReply { + m, ok := (<-ch.msg) + if !ok { + return false, io.EOF + } + switch m.(type) { + case *channelRequestFailureMsg: + return false, nil + case *channelRequestSuccessMsg: + return true, nil + default: + return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) + } + } + + return false, nil +} + +// ackRequest either sends an ack or nack to the channel request. +func (ch *channel) ackRequest(ok bool) error { + if !ch.decided { + return errUndecided + } + + var msg interface{} + if !ok { + msg = channelRequestFailureMsg{ + PeersId: ch.remoteId, + } + } else { + msg = channelRequestSuccessMsg{ + PeersId: ch.remoteId, + } + } + return ch.sendMessage(msg) +} + +func (ch *channel) ChannelType() string { + return ch.chanType +} + +func (ch *channel) ExtraData() []byte { + return ch.extraData +} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go new file mode 100644 index 0000000000000000000000000000000000000000..13484ab4b3901905fa5add775dee8d4674199c1c --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/cipher.go @@ -0,0 +1,627 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/rc4" + "crypto/subtle" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" +) + +const ( + packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. + + // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations + // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC + // indicates implementations SHOULD be able to handle larger packet sizes, but then + // waffles on about reasonable limits. + // + // OpenSSH caps their maxPacket at 256kB so we choose to do + // the same. maxPacket is also used to ensure that uint32 + // length fields do not overflow, so it should remain well + // below 4G. + maxPacket = 256 * 1024 +) + +// noneCipher implements cipher.Stream and provides no encryption. It is used +// by the transport before the first key-exchange. +type noneCipher struct{} + +func (c noneCipher) XORKeyStream(dst, src []byte) { + copy(dst, src) +} + +func newAESCTR(key, iv []byte) (cipher.Stream, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + return cipher.NewCTR(c, iv), nil +} + +func newRC4(key, iv []byte) (cipher.Stream, error) { + return rc4.NewCipher(key) +} + +type streamCipherMode struct { + keySize int + ivSize int + skip int + createFunc func(key, iv []byte) (cipher.Stream, error) +} + +func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { + if len(key) < c.keySize { + panic("ssh: key length too small for cipher") + } + if len(iv) < c.ivSize { + panic("ssh: iv too small for cipher") + } + + stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) + if err != nil { + return nil, err + } + + var streamDump []byte + if c.skip > 0 { + streamDump = make([]byte, 512) + } + + for remainingToDump := c.skip; remainingToDump > 0; { + dumpThisTime := remainingToDump + if dumpThisTime > len(streamDump) { + dumpThisTime = len(streamDump) + } + stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) + remainingToDump -= dumpThisTime + } + + return stream, nil +} + +// cipherModes documents properties of supported ciphers. Ciphers not included +// are not supported and will not be negotiated, even if explicitly requested in +// ClientConfig.Crypto.Ciphers. +var cipherModes = map[string]*streamCipherMode{ + // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms + // are defined in the order specified in the RFC. + "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, + "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, + "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, + + // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. + // They are defined in the order specified in the RFC. + "arcfour128": {16, 0, 1536, newRC4}, + "arcfour256": {32, 0, 1536, newRC4}, + + // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. + // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and + // RC4) has problems with weak keys, and should be used with caution." + // RFC4345 introduces improved versions of Arcfour. + "arcfour": {16, 0, 0, newRC4}, + + // AES-GCM is not a stream cipher, so it is constructed with a + // special case. If we add any more non-stream ciphers, we + // should invest a cleaner way to do this. + gcmCipherID: {16, 12, 0, nil}, + + // CBC mode is insecure and so is not included in the default config. + // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely + // needed, it's possible to specify a custom Config to enable it. + // You should expect that an active attacker can recover plaintext if + // you do. + aes128cbcID: {16, aes.BlockSize, 0, nil}, + + // 3des-cbc is insecure and is disabled by default. + tripledescbcID: {24, des.BlockSize, 0, nil}, +} + +// prefixLen is the length of the packet prefix that contains the packet length +// and number of padding bytes. +const prefixLen = 5 + +// streamPacketCipher is a packetCipher using a stream cipher. +type streamPacketCipher struct { + mac hash.Hash + cipher cipher.Stream + etm bool + + // The following members are to avoid per-packet allocations. + prefix [prefixLen]byte + seqNumBytes [4]byte + padding [2 * packetSizeMultiple]byte + packetData []byte + macResult []byte +} + +// readPacket reads and decrypt a single packet from the reader argument. +func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, s.prefix[:]); err != nil { + return nil, err + } + + var encryptedPaddingLength [1]byte + if s.mac != nil && s.etm { + copy(encryptedPaddingLength[:], s.prefix[4:5]) + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } else { + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + length := binary.BigEndian.Uint32(s.prefix[0:4]) + paddingLength := uint32(s.prefix[4]) + + var macSize uint32 + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + if s.etm { + s.mac.Write(s.prefix[:4]) + s.mac.Write(encryptedPaddingLength[:]) + } else { + s.mac.Write(s.prefix[:]) + } + macSize = uint32(s.mac.Size()) + } + + if length <= paddingLength+1 { + return nil, errors.New("ssh: invalid packet length, packet too small") + } + + if length > maxPacket { + return nil, errors.New("ssh: invalid packet length, packet too large") + } + + // the maxPacket check above ensures that length-1+macSize + // does not overflow. + if uint32(cap(s.packetData)) < length-1+macSize { + s.packetData = make([]byte, length-1+macSize) + } else { + s.packetData = s.packetData[:length-1+macSize] + } + + if _, err := io.ReadFull(r, s.packetData); err != nil { + return nil, err + } + mac := s.packetData[length-1:] + data := s.packetData[:length-1] + + if s.mac != nil && s.etm { + s.mac.Write(data) + } + + s.cipher.XORKeyStream(data, data) + + if s.mac != nil { + if !s.etm { + s.mac.Write(data) + } + s.macResult = s.mac.Sum(s.macResult[:0]) + if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { + return nil, errors.New("ssh: MAC failure") + } + } + + return s.packetData[:length-paddingLength-1], nil +} + +// writePacket encrypts and sends a packet of data to the writer argument +func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + if len(packet) > maxPacket { + return errors.New("ssh: packet too large") + } + + aadlen := 0 + if s.mac != nil && s.etm { + // packet length is not encrypted for EtM modes + aadlen = 4 + } + + paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple + if paddingLength < 4 { + paddingLength += packetSizeMultiple + } + + length := len(packet) + 1 + paddingLength + binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) + s.prefix[4] = byte(paddingLength) + padding := s.padding[:paddingLength] + if _, err := io.ReadFull(rand, padding); err != nil { + return err + } + + if s.mac != nil { + s.mac.Reset() + binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) + s.mac.Write(s.seqNumBytes[:]) + + if s.etm { + // For EtM algorithms, the packet length must stay unencrypted, + // but the following data (padding length) must be encrypted + s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) + } + + s.mac.Write(s.prefix[:]) + + if !s.etm { + // For non-EtM algorithms, the algorithm is applied on unencrypted data + s.mac.Write(packet) + s.mac.Write(padding) + } + } + + if !(s.mac != nil && s.etm) { + // For EtM algorithms, the padding length has already been encrypted + // and the packet length must remain unencrypted + s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) + } + + s.cipher.XORKeyStream(packet, packet) + s.cipher.XORKeyStream(padding, padding) + + if s.mac != nil && s.etm { + // For EtM algorithms, packet and padding must be encrypted + s.mac.Write(packet) + s.mac.Write(padding) + } + + if _, err := w.Write(s.prefix[:]); err != nil { + return err + } + if _, err := w.Write(packet); err != nil { + return err + } + if _, err := w.Write(padding); err != nil { + return err + } + + if s.mac != nil { + s.macResult = s.mac.Sum(s.macResult[:0]) + if _, err := w.Write(s.macResult); err != nil { + return err + } + } + + return nil +} + +type gcmCipher struct { + aead cipher.AEAD + prefix [4]byte + iv []byte + buf []byte +} + +func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + aead, err := cipher.NewGCM(c) + if err != nil { + return nil, err + } + + return &gcmCipher{ + aead: aead, + iv: iv, + }, nil +} + +const gcmTagSize = 16 + +func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + // Pad out to multiple of 16 bytes. This is different from the + // stream cipher because that encrypts the length too. + padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) + if padding < 4 { + padding += packetSizeMultiple + } + + length := uint32(len(packet) + int(padding) + 1) + binary.BigEndian.PutUint32(c.prefix[:], length) + if _, err := w.Write(c.prefix[:]); err != nil { + return err + } + + if cap(c.buf) < int(length) { + c.buf = make([]byte, length) + } else { + c.buf = c.buf[:length] + } + + c.buf[0] = padding + copy(c.buf[1:], packet) + if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { + return err + } + c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if _, err := w.Write(c.buf); err != nil { + return err + } + c.incIV() + + return nil +} + +func (c *gcmCipher) incIV() { + for i := 4 + 7; i >= 4; i-- { + c.iv[i]++ + if c.iv[i] != 0 { + break + } + } +} + +func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + if _, err := io.ReadFull(r, c.prefix[:]); err != nil { + return nil, err + } + length := binary.BigEndian.Uint32(c.prefix[:]) + if length > maxPacket { + return nil, errors.New("ssh: max packet length exceeded.") + } + + if cap(c.buf) < int(length+gcmTagSize) { + c.buf = make([]byte, length+gcmTagSize) + } else { + c.buf = c.buf[:length+gcmTagSize] + } + + if _, err := io.ReadFull(r, c.buf); err != nil { + return nil, err + } + + plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) + if err != nil { + return nil, err + } + c.incIV() + + padding := plain[0] + if padding < 4 || padding >= 20 { + return nil, fmt.Errorf("ssh: illegal padding %d", padding) + } + + if int(padding+1) >= len(plain) { + return nil, fmt.Errorf("ssh: padding %d too large", padding) + } + plain = plain[1 : length-uint32(padding)] + return plain, nil +} + +// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 +type cbcCipher struct { + mac hash.Hash + macSize uint32 + decrypter cipher.BlockMode + encrypter cipher.BlockMode + + // The following members are to avoid per-packet allocations. + seqNumBytes [4]byte + packetData []byte + macResult []byte + + // Amount of data we should still read to hide which + // verification error triggered. + oracleCamouflage uint32 +} + +func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + cbc := &cbcCipher{ + mac: macModes[algs.MAC].new(macKey), + decrypter: cipher.NewCBCDecrypter(c, iv), + encrypter: cipher.NewCBCEncrypter(c, iv), + packetData: make([]byte, 1024), + } + if cbc.mac != nil { + cbc.macSize = uint32(cbc.mac.Size()) + } + + return cbc, nil +} + +func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { + c, err := des.NewTripleDESCipher(key) + if err != nil { + return nil, err + } + + cbc, err := newCBCCipher(c, iv, key, macKey, algs) + if err != nil { + return nil, err + } + + return cbc, nil +} + +func maxUInt32(a, b int) uint32 { + if a > b { + return uint32(a) + } + return uint32(b) +} + +const ( + cbcMinPacketSizeMultiple = 8 + cbcMinPacketSize = 16 + cbcMinPaddingSize = 4 +) + +// cbcError represents a verification error that may leak information. +type cbcError string + +func (e cbcError) Error() string { return string(e) } + +func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { + p, err := c.readPacketLeaky(seqNum, r) + if err != nil { + if _, ok := err.(cbcError); ok { + // Verification error: read a fixed amount of + // data, to make distinguishing between + // failing MAC and failing length check more + // difficult. + io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) + } + } + return p, err +} + +func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { + blockSize := c.decrypter.BlockSize() + + // Read the header, which will include some of the subsequent data in the + // case of block ciphers - this is copied back to the payload later. + // How many bytes of payload/padding will be read with this first read. + firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) + firstBlock := c.packetData[:firstBlockLength] + if _, err := io.ReadFull(r, firstBlock); err != nil { + return nil, err + } + + c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength + + c.decrypter.CryptBlocks(firstBlock, firstBlock) + length := binary.BigEndian.Uint32(firstBlock[:4]) + if length > maxPacket { + return nil, cbcError("ssh: packet too large") + } + if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { + // The minimum size of a packet is 16 (or the cipher block size, whichever + // is larger) bytes. + return nil, cbcError("ssh: packet too small") + } + // The length of the packet (including the length field but not the MAC) must + // be a multiple of the block size or 8, whichever is larger. + if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { + return nil, cbcError("ssh: invalid packet length multiple") + } + + paddingLength := uint32(firstBlock[4]) + if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { + return nil, cbcError("ssh: invalid packet length") + } + + // Positions within the c.packetData buffer: + macStart := 4 + length + paddingStart := macStart - paddingLength + + // Entire packet size, starting before length, ending at end of mac. + entirePacketSize := macStart + c.macSize + + // Ensure c.packetData is large enough for the entire packet data. + if uint32(cap(c.packetData)) < entirePacketSize { + // Still need to upsize and copy, but this should be rare at runtime, only + // on upsizing the packetData buffer. + c.packetData = make([]byte, entirePacketSize) + copy(c.packetData, firstBlock) + } else { + c.packetData = c.packetData[:entirePacketSize] + } + + if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { + return nil, err + } else { + c.oracleCamouflage -= uint32(n) + } + + remainingCrypted := c.packetData[firstBlockLength:macStart] + c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) + + mac := c.packetData[macStart:] + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData[:macStart]) + c.macResult = c.mac.Sum(c.macResult[:0]) + if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { + return nil, cbcError("ssh: MAC failure") + } + } + + return c.packetData[prefixLen:paddingStart], nil +} + +func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { + effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) + + // Length of encrypted portion of the packet (header, payload, padding). + // Enforce minimum padding and packet size. + encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) + // Enforce block size. + encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize + + length := encLength - 4 + paddingLength := int(length) - (1 + len(packet)) + + // Overall buffer contains: header, payload, padding, mac. + // Space for the MAC is reserved in the capacity but not the slice length. + bufferSize := encLength + c.macSize + if uint32(cap(c.packetData)) < bufferSize { + c.packetData = make([]byte, encLength, bufferSize) + } else { + c.packetData = c.packetData[:encLength] + } + + p := c.packetData + + // Packet header. + binary.BigEndian.PutUint32(p, length) + p = p[4:] + p[0] = byte(paddingLength) + + // Payload. + p = p[1:] + copy(p, packet) + + // Padding. + p = p[len(packet):] + if _, err := io.ReadFull(rand, p); err != nil { + return err + } + + if c.mac != nil { + c.mac.Reset() + binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) + c.mac.Write(c.seqNumBytes[:]) + c.mac.Write(c.packetData) + // The MAC is now appended into the capacity reserved for it earlier. + c.packetData = c.mac.Sum(c.packetData) + } + + c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) + + if _, err := w.Write(c.packetData); err != nil { + return err + } + + return nil +} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go new file mode 100644 index 0000000000000000000000000000000000000000..c97f2978e8f6c61a771d0e36ddc5ab8f9daf6758 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client.go @@ -0,0 +1,211 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "net" + "sync" + "time" +) + +// Client implements a traditional SSH client that supports shells, +// subprocesses, port forwarding and tunneled dialing. +type Client struct { + Conn + + forwards forwardList // forwarded tcpip connections from the remote side + mu sync.Mutex + channelHandlers map[string]chan NewChannel +} + +// HandleChannelOpen returns a channel on which NewChannel requests +// for the given type are sent. If the type already is being handled, +// nil is returned. The channel is closed when the connection is closed. +func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { + c.mu.Lock() + defer c.mu.Unlock() + if c.channelHandlers == nil { + // The SSH channel has been closed. + c := make(chan NewChannel) + close(c) + return c + } + + ch := c.channelHandlers[channelType] + if ch != nil { + return nil + } + + ch = make(chan NewChannel, chanSize) + c.channelHandlers[channelType] = ch + return ch +} + +// NewClient creates a Client on top of the given connection. +func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { + conn := &Client{ + Conn: c, + channelHandlers: make(map[string]chan NewChannel, 1), + } + + go conn.handleGlobalRequests(reqs) + go conn.handleChannelOpens(chans) + go func() { + conn.Wait() + conn.forwards.closeAll() + }() + go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) + return conn +} + +// NewClientConn establishes an authenticated SSH connection using c +// as the underlying transport. The Request and NewChannel channels +// must be serviced or the connection will hang. +func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + conn := &connection{ + sshConn: sshConn{conn: c}, + } + + if err := conn.clientHandshake(addr, &fullConf); err != nil { + c.Close() + return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) + } + conn.mux = newMux(conn.transport) + return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil +} + +// clientHandshake performs the client side key exchange. See RFC 4253 Section +// 7. +func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { + if config.ClientVersion != "" { + c.clientVersion = []byte(config.ClientVersion) + } else { + c.clientVersion = []byte(packageVersion) + } + var err error + c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) + if err != nil { + return err + } + + c.transport = newClientTransport( + newTransport(c.sshConn.conn, config.Rand, true /* is client */), + c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) + if err := c.transport.waitSession(); err != nil { + return err + } + + c.sessionID = c.transport.getSessionID() + return c.clientAuthenticate(config) +} + +// verifyHostKeySignature verifies the host key obtained in the key +// exchange. +func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { + sig, rest, ok := parseSignatureBody(result.Signature) + if len(rest) > 0 || !ok { + return errors.New("ssh: signature parse error") + } + + return hostKey.Verify(result.H, sig) +} + +// NewSession opens a new Session for this client. (A session is a remote +// execution of a program.) +func (c *Client) NewSession() (*Session, error) { + ch, in, err := c.OpenChannel("session", nil) + if err != nil { + return nil, err + } + return newSession(ch, in) +} + +func (c *Client) handleGlobalRequests(incoming <-chan *Request) { + for r := range incoming { + // This handles keepalive messages and matches + // the behaviour of OpenSSH. + r.Reply(false, nil) + } +} + +// handleChannelOpens channel open messages from the remote side. +func (c *Client) handleChannelOpens(in <-chan NewChannel) { + for ch := range in { + c.mu.Lock() + handler := c.channelHandlers[ch.ChannelType()] + c.mu.Unlock() + + if handler != nil { + handler <- ch + } else { + ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) + } + } + + c.mu.Lock() + for _, ch := range c.channelHandlers { + close(ch) + } + c.channelHandlers = nil + c.mu.Unlock() +} + +// Dial starts a client connection to the given SSH server. It is a +// convenience function that connects to the given network address, +// initiates the SSH handshake, and then sets up a Client. For access +// to incoming channels and requests, use net.Dial with NewClientConn +// instead. +func Dial(network, addr string, config *ClientConfig) (*Client, error) { + conn, err := net.DialTimeout(network, addr, config.Timeout) + if err != nil { + return nil, err + } + c, chans, reqs, err := NewClientConn(conn, addr, config) + if err != nil { + return nil, err + } + return NewClient(c, chans, reqs), nil +} + +// A ClientConfig structure is used to configure a Client. It must not be +// modified after having been passed to an SSH function. +type ClientConfig struct { + // Config contains configuration that is shared between clients and + // servers. + Config + + // User contains the username to authenticate as. + User string + + // Auth contains possible authentication methods to use with the + // server. Only the first instance of a particular RFC 4252 method will + // be used during authentication. + Auth []AuthMethod + + // HostKeyCallback, if not nil, is called during the cryptographic + // handshake to validate the server's host key. A nil HostKeyCallback + // implies that all host keys are accepted. + HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + + // ClientVersion contains the version identification string that will + // be used for the connection. If empty, a reasonable default is used. + ClientVersion string + + // HostKeyAlgorithms lists the key types that the client will + // accept from the server as host key, in order of + // preference. If empty, a reasonable default is used. Any + // string returned from PublicKey.Type method may be used, or + // any of the CertAlgoXxxx and KeyAlgoXxxx constants. + HostKeyAlgorithms []string + + // Timeout is the maximum amount of time for the TCP connection to establish. + // + // A Timeout of zero means no timeout. + Timeout time.Duration +} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go new file mode 100644 index 0000000000000000000000000000000000000000..fd1ec5dda6e6e06cdb46aee18e750358f1ca80aa --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/client_auth.go @@ -0,0 +1,475 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// clientAuthenticate authenticates with the remote server. See RFC 4252. +func (c *connection) clientAuthenticate(config *ClientConfig) error { + // initiate user auth session + if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { + return err + } + packet, err := c.transport.readPacket() + if err != nil { + return err + } + var serviceAccept serviceAcceptMsg + if err := Unmarshal(packet, &serviceAccept); err != nil { + return err + } + + // during the authentication phase the client first attempts the "none" method + // then any untried methods suggested by the server. + tried := make(map[string]bool) + var lastMethods []string + + sessionID := c.transport.getSessionID() + for auth := AuthMethod(new(noneAuth)); auth != nil; { + ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) + if err != nil { + return err + } + if ok { + // success + return nil + } + tried[auth.method()] = true + if methods == nil { + methods = lastMethods + } + lastMethods = methods + + auth = nil + + findNext: + for _, a := range config.Auth { + candidateMethod := a.method() + if tried[candidateMethod] { + continue + } + for _, meth := range methods { + if meth == candidateMethod { + auth = a + break findNext + } + } + } + } + return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) +} + +func keys(m map[string]bool) []string { + s := make([]string, 0, len(m)) + + for key := range m { + s = append(s, key) + } + return s +} + +// An AuthMethod represents an instance of an RFC 4252 authentication method. +type AuthMethod interface { + // auth authenticates user over transport t. + // Returns true if authentication is successful. + // If authentication is not successful, a []string of alternative + // method names is returned. If the slice is nil, it will be ignored + // and the previous set of possible methods will be reused. + auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) + + // method returns the RFC 4252 method name. + method() string +} + +// "none" authentication, RFC 4252 section 5.2. +type noneAuth int + +func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + if err := c.writePacket(Marshal(&userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: "none", + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (n *noneAuth) method() string { + return "none" +} + +// passwordCallback is an AuthMethod that fetches the password through +// a function call, e.g. by prompting the user. +type passwordCallback func() (password string, err error) + +func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type passwordAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + Reply bool + Password string + } + + pw, err := cb() + // REVIEW NOTE: is there a need to support skipping a password attempt? + // The program may only find out that the user doesn't have a password + // when prompting. + if err != nil { + return false, nil, err + } + + if err := c.writePacket(Marshal(&passwordAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + Reply: false, + Password: pw, + })); err != nil { + return false, nil, err + } + + return handleAuthResponse(c) +} + +func (cb passwordCallback) method() string { + return "password" +} + +// Password returns an AuthMethod using the given password. +func Password(secret string) AuthMethod { + return passwordCallback(func() (string, error) { return secret, nil }) +} + +// PasswordCallback returns an AuthMethod that uses a callback for +// fetching a password. +func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { + return passwordCallback(prompt) +} + +type publickeyAuthMsg struct { + User string `sshtype:"50"` + Service string + Method string + // HasSig indicates to the receiver packet that the auth request is signed and + // should be used for authentication of the request. + HasSig bool + Algoname string + PubKey []byte + // Sig is tagged with "rest" so Marshal will exclude it during + // validateKey + Sig []byte `ssh:"rest"` +} + +// publicKeyCallback is an AuthMethod that uses a set of key +// pairs for authentication. +type publicKeyCallback func() ([]Signer, error) + +func (cb publicKeyCallback) method() string { + return "publickey" +} + +func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + // Authentication is performed in two stages. The first stage sends an + // enquiry to test if each key is acceptable to the remote. The second + // stage attempts to authenticate with the valid keys obtained in the + // first stage. + + signers, err := cb() + if err != nil { + return false, nil, err + } + var validKeys []Signer + for _, signer := range signers { + if ok, err := validateKey(signer.PublicKey(), user, c); ok { + validKeys = append(validKeys, signer) + } else { + if err != nil { + return false, nil, err + } + } + } + + // methods that may continue if this auth is not successful. + var methods []string + for _, signer := range validKeys { + pub := signer.PublicKey() + + pubKey := pub.Marshal() + sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + }, []byte(pub.Type()), pubKey)) + if err != nil { + return false, nil, err + } + + // manually wrap the serialized signature in a string + s := Marshal(sign) + sig := make([]byte, stringLength(len(s))) + marshalString(sig, s) + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: cb.method(), + HasSig: true, + Algoname: pub.Type(), + PubKey: pubKey, + Sig: sig, + } + p := Marshal(&msg) + if err := c.writePacket(p); err != nil { + return false, nil, err + } + var success bool + success, methods, err = handleAuthResponse(c) + if err != nil { + return false, nil, err + } + if success { + return success, methods, err + } + } + return false, methods, nil +} + +// validateKey validates the key provided is acceptable to the server. +func validateKey(key PublicKey, user string, c packetConn) (bool, error) { + pubKey := key.Marshal() + msg := publickeyAuthMsg{ + User: user, + Service: serviceSSH, + Method: "publickey", + HasSig: false, + Algoname: key.Type(), + PubKey: pubKey, + } + if err := c.writePacket(Marshal(&msg)); err != nil { + return false, err + } + + return confirmKeyAck(key, c) +} + +func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { + pubKey := key.Marshal() + algoname := key.Type() + + for { + packet, err := c.readPacket() + if err != nil { + return false, err + } + switch packet[0] { + case msgUserAuthBanner: + // TODO(gpaul): add callback to present the banner to the user + case msgUserAuthPubKeyOk: + var msg userAuthPubKeyOkMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, err + } + if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { + return false, nil + } + return true, nil + case msgUserAuthFailure: + return false, nil + default: + return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// PublicKeys returns an AuthMethod that uses the given key +// pairs. +func PublicKeys(signers ...Signer) AuthMethod { + return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) +} + +// PublicKeysCallback returns an AuthMethod that runs the given +// function to obtain a list of key pairs. +func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { + return publicKeyCallback(getSigners) +} + +// handleAuthResponse returns whether the preceding authentication request succeeded +// along with a list of remaining authentication methods to try next and +// an error if an unexpected response was received. +func handleAuthResponse(c packetConn) (bool, []string, error) { + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + switch packet[0] { + case msgUserAuthBanner: + // TODO: add callback to present the banner to the user + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + default: + return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) + } + } +} + +// KeyboardInteractiveChallenge should print questions, optionally +// disabling echoing (e.g. for passwords), and return all the answers. +// Challenge may be called multiple times in a single session. After +// successful authentication, the server may send a challenge with no +// questions, for which the user and instruction messages should be +// printed. RFC 4256 section 3.3 details how the UI should behave for +// both CLI and GUI environments. +type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) + +// KeyboardInteractive returns a AuthMethod using a prompt/response +// sequence controlled by the server. +func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { + return challenge +} + +func (cb KeyboardInteractiveChallenge) method() string { + return "keyboard-interactive" +} + +func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { + type initiateMsg struct { + User string `sshtype:"50"` + Service string + Method string + Language string + Submethods string + } + + if err := c.writePacket(Marshal(&initiateMsg{ + User: user, + Service: serviceSSH, + Method: "keyboard-interactive", + })); err != nil { + return false, nil, err + } + + for { + packet, err := c.readPacket() + if err != nil { + return false, nil, err + } + + // like handleAuthResponse, but with less options. + switch packet[0] { + case msgUserAuthBanner: + // TODO: Print banners during userauth. + continue + case msgUserAuthInfoRequest: + // OK + case msgUserAuthFailure: + var msg userAuthFailureMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + return false, msg.Methods, nil + case msgUserAuthSuccess: + return true, nil, nil + default: + return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) + } + + var msg userAuthInfoRequestMsg + if err := Unmarshal(packet, &msg); err != nil { + return false, nil, err + } + + // Manually unpack the prompt/echo pairs. + rest := msg.Prompts + var prompts []string + var echos []bool + for i := 0; i < int(msg.NumPrompts); i++ { + prompt, r, ok := parseString(rest) + if !ok || len(r) == 0 { + return false, nil, errors.New("ssh: prompt format error") + } + prompts = append(prompts, string(prompt)) + echos = append(echos, r[0] != 0) + rest = r[1:] + } + + if len(rest) != 0 { + return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") + } + + answers, err := cb(msg.User, msg.Instruction, prompts, echos) + if err != nil { + return false, nil, err + } + + if len(answers) != len(prompts) { + return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") + } + responseLength := 1 + 4 + for _, a := range answers { + responseLength += stringLength(len(a)) + } + serialized := make([]byte, responseLength) + p := serialized + p[0] = msgUserAuthInfoResponse + p = p[1:] + p = marshalUint32(p, uint32(len(answers))) + for _, a := range answers { + p = marshalString(p, []byte(a)) + } + + if err := c.writePacket(serialized); err != nil { + return false, nil, err + } + } +} + +type retryableAuthMethod struct { + authMethod AuthMethod + maxTries int +} + +func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { + for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { + ok, methods, err = r.authMethod.auth(session, user, c, rand) + if ok || err != nil { // either success or error terminate + return ok, methods, err + } + } + return ok, methods, err +} + +func (r *retryableAuthMethod) method() string { + return r.authMethod.method() +} + +// RetryableAuthMethod is a decorator for other auth methods enabling them to +// be retried up to maxTries before considering that AuthMethod itself failed. +// If maxTries is <= 0, will retry indefinitely +// +// This is useful for interactive clients using challenge/response type +// authentication (e.g. Keyboard-Interactive, Password, etc) where the user +// could mistype their response resulting in the server issuing a +// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 +// [keyboard-interactive]); Without this decorator, the non-retryable +// AuthMethod would be removed from future consideration, and never tried again +// (and so the user would never be able to retry their entry). +func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { + return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} +} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go new file mode 100644 index 0000000000000000000000000000000000000000..8656d0f85d2cb9c2428a8a2306220ba55f9a6625 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/common.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/rand" + "fmt" + "io" + "sync" + + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" +) + +// These are string constants in the SSH protocol. +const ( + compressionNone = "none" + serviceUserAuth = "ssh-userauth" + serviceSSH = "ssh-connection" +) + +// supportedCiphers specifies the supported ciphers in preference order. +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", + "arcfour256", "arcfour128", +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA1, kexAlgoDH1SHA1, +} + +// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods +// of authenticating servers) in preference order. +var supportedHostKeyAlgos = []string{ + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, + CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, + + KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, + KeyAlgoRSA, KeyAlgoDSA, + + KeyAlgoED25519, +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", +} + +var supportedCompressions = []string{compressionNone} + +// hashFuncs keeps the mapping of supported algorithms to their respective +// hashes needed for signature verification. +var hashFuncs = map[string]crypto.Hash{ + KeyAlgoRSA: crypto.SHA1, + KeyAlgoDSA: crypto.SHA1, + KeyAlgoECDSA256: crypto.SHA256, + KeyAlgoECDSA384: crypto.SHA384, + KeyAlgoECDSA521: crypto.SHA512, + CertAlgoRSAv01: crypto.SHA1, + CertAlgoDSAv01: crypto.SHA1, + CertAlgoECDSA256v01: crypto.SHA256, + CertAlgoECDSA384v01: crypto.SHA384, + CertAlgoECDSA521v01: crypto.SHA512, +} + +// unexpectedMessageError results when the SSH message that we received didn't +// match what we wanted. +func unexpectedMessageError(expected, got uint8) error { + return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) +} + +// parseError results from a malformed SSH message. +func parseError(tag uint8) error { + return fmt.Errorf("ssh: parse error in message type %d", tag) +} + +func findCommon(what string, client []string, server []string) (common string, err error) { + for _, c := range client { + for _, s := range server { + if c == s { + return c, nil + } + } + } + return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) +} + +type directionAlgorithms struct { + Cipher string + MAC string + Compression string +} + +// rekeyBytes returns a rekeying intervals in bytes. +func (a *directionAlgorithms) rekeyBytes() int64 { + // According to RFC4344 block ciphers should rekey after + // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is + // 128. + switch a.Cipher { + case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: + return 16 * (1 << 32) + + } + + // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. + return 1 << 30 +} + +type algorithms struct { + kex string + hostKey string + w directionAlgorithms + r directionAlgorithms +} + +func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { + result := &algorithms{} + + result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) + if err != nil { + return + } + + result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) + if err != nil { + return + } + + result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) + if err != nil { + return + } + + result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) + if err != nil { + return + } + + result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) + if err != nil { + return + } + + result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) + if err != nil { + return + } + + result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) + if err != nil { + return + } + + result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) + if err != nil { + return + } + + return result, nil +} + +// If rekeythreshold is too small, we can't make any progress sending +// stuff. +const minRekeyThreshold uint64 = 256 + +// Config contains configuration data common to both ServerConfig and +// ClientConfig. +type Config struct { + // Rand provides the source of entropy for cryptographic + // primitives. If Rand is nil, the cryptographic random reader + // in package crypto/rand will be used. + Rand io.Reader + + // The maximum number of bytes sent or received after which a + // new key is negotiated. It must be at least 256. If + // unspecified, 1 gigabyte is used. + RekeyThreshold uint64 + + // The allowed key exchanges algorithms. If unspecified then a + // default set of algorithms is used. + KeyExchanges []string + + // The allowed cipher algorithms. If unspecified then a sensible + // default is used. + Ciphers []string + + // The allowed MAC algorithms. If unspecified then a sensible default + // is used. + MACs []string +} + +// SetDefaults sets sensible values for unset fields in config. This is +// exported for testing: Configs passed to SSH functions are copied and have +// default values set automatically. +func (c *Config) SetDefaults() { + if c.Rand == nil { + c.Rand = rand.Reader + } + if c.Ciphers == nil { + c.Ciphers = supportedCiphers + } + var ciphers []string + for _, c := range c.Ciphers { + if cipherModes[c] != nil { + // reject the cipher if we have no cipherModes definition + ciphers = append(ciphers, c) + } + } + c.Ciphers = ciphers + + if c.KeyExchanges == nil { + c.KeyExchanges = supportedKexAlgos + } + + if c.MACs == nil { + c.MACs = supportedMACs + } + + if c.RekeyThreshold == 0 { + // RFC 4253, section 9 suggests rekeying after 1G. + c.RekeyThreshold = 1 << 30 + } + if c.RekeyThreshold < minRekeyThreshold { + c.RekeyThreshold = minRekeyThreshold + } +} + +// buildDataSignedForAuth returns the data that is signed in order to prove +// possession of a private key. See RFC 4252, section 7. +func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { + data := struct { + Session []byte + Type byte + User string + Service string + Method string + Sign bool + Algo []byte + PubKey []byte + }{ + sessionId, + msgUserAuthRequest, + req.User, + req.Service, + req.Method, + true, + algo, + pubKey, + } + return Marshal(data) +} + +func appendU16(buf []byte, n uint16) []byte { + return append(buf, byte(n>>8), byte(n)) +} + +func appendU32(buf []byte, n uint32) []byte { + return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendU64(buf []byte, n uint64) []byte { + return append(buf, + byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), + byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) +} + +func appendInt(buf []byte, n int) []byte { + return appendU32(buf, uint32(n)) +} + +func appendString(buf []byte, s string) []byte { + buf = appendU32(buf, uint32(len(s))) + buf = append(buf, s...) + return buf +} + +func appendBool(buf []byte, b bool) []byte { + if b { + return append(buf, 1) + } + return append(buf, 0) +} + +// newCond is a helper to hide the fact that there is no usable zero +// value for sync.Cond. +func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } + +// window represents the buffer available to clients +// wishing to write to a channel. +type window struct { + *sync.Cond + win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 + writeWaiters int + closed bool +} + +// add adds win to the amount of window available +// for consumers. +func (w *window) add(win uint32) bool { + // a zero sized window adjust is a noop. + if win == 0 { + return true + } + w.L.Lock() + if w.win+win < win { + w.L.Unlock() + return false + } + w.win += win + // It is unusual that multiple goroutines would be attempting to reserve + // window space, but not guaranteed. Use broadcast to notify all waiters + // that additional window is available. + w.Broadcast() + w.L.Unlock() + return true +} + +// close sets the window to closed, so all reservations fail +// immediately. +func (w *window) close() { + w.L.Lock() + w.closed = true + w.Broadcast() + w.L.Unlock() +} + +// reserve reserves win from the available window capacity. +// If no capacity remains, reserve will block. reserve may +// return less than requested. +func (w *window) reserve(win uint32) (uint32, error) { + var err error + w.L.Lock() + w.writeWaiters++ + w.Broadcast() + for w.win == 0 && !w.closed { + w.Wait() + } + w.writeWaiters-- + if w.win < win { + win = w.win + } + w.win -= win + if w.closed { + err = io.EOF + } + w.L.Unlock() + return win, err +} + +// waitWriterBlocked waits until some goroutine is blocked for further +// writes. It is used in tests only. +func (w *window) waitWriterBlocked() { + w.Cond.L.Lock() + for w.writeWaiters == 0 { + w.Cond.Wait() + } + w.Cond.L.Unlock() +} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go new file mode 100644 index 0000000000000000000000000000000000000000..e786f2f9a20562511a303d451aef07486c143569 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/connection.go @@ -0,0 +1,143 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "fmt" + "net" +) + +// OpenChannelError is returned if the other side rejects an +// OpenChannel request. +type OpenChannelError struct { + Reason RejectionReason + Message string +} + +func (e *OpenChannelError) Error() string { + return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) +} + +// ConnMetadata holds metadata for the connection. +type ConnMetadata interface { + // User returns the user ID for this connection. + User() string + + // SessionID returns the sesson hash, also denoted by H. + SessionID() []byte + + // ClientVersion returns the client's version string as hashed + // into the session ID. + ClientVersion() []byte + + // ServerVersion returns the server's version string as hashed + // into the session ID. + ServerVersion() []byte + + // RemoteAddr returns the remote address for this connection. + RemoteAddr() net.Addr + + // LocalAddr returns the local address for this connection. + LocalAddr() net.Addr +} + +// Conn represents an SSH connection for both server and client roles. +// Conn is the basis for implementing an application layer, such +// as ClientConn, which implements the traditional shell access for +// clients. +type Conn interface { + ConnMetadata + + // SendRequest sends a global request, and returns the + // reply. If wantReply is true, it returns the response status + // and payload. See also RFC4254, section 4. + SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) + + // OpenChannel tries to open an channel. If the request is + // rejected, it returns *OpenChannelError. On success it returns + // the SSH Channel and a Go channel for incoming, out-of-band + // requests. The Go channel must be serviced, or the + // connection will hang. + OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) + + // Close closes the underlying network connection + Close() error + + // Wait blocks until the connection has shut down, and returns the + // error causing the shutdown. + Wait() error + + // TODO(hanwen): consider exposing: + // RequestKeyChange + // Disconnect +} + +// DiscardRequests consumes and rejects all requests from the +// passed-in channel. +func DiscardRequests(in <-chan *Request) { + for req := range in { + if req.WantReply { + req.Reply(false, nil) + } + } +} + +// A connection represents an incoming connection. +type connection struct { + transport *handshakeTransport + sshConn + + // The connection protocol. + *mux +} + +func (c *connection) Close() error { + return c.sshConn.conn.Close() +} + +// sshconn provides net.Conn metadata, but disallows direct reads and +// writes. +type sshConn struct { + conn net.Conn + + user string + sessionID []byte + clientVersion []byte + serverVersion []byte +} + +func dup(src []byte) []byte { + dst := make([]byte, len(src)) + copy(dst, src) + return dst +} + +func (c *sshConn) User() string { + return c.user +} + +func (c *sshConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *sshConn) Close() error { + return c.conn.Close() +} + +func (c *sshConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *sshConn) SessionID() []byte { + return dup(c.sessionID) +} + +func (c *sshConn) ClientVersion() []byte { + return dup(c.clientVersion) +} + +func (c *sshConn) ServerVersion() []byte { + return dup(c.serverVersion) +} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..d6be8946629210740906a38f1f00fd9bff358122 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/doc.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package ssh implements an SSH client and server. + +SSH is a transport security protocol, an authentication protocol and a +family of application protocols. The most typical application level +protocol is a remote shell and this is specifically implemented. However, +the multiplexed nature of SSH is exposed to users that wish to support +others. + +References: + [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD + [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 +*/ +package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go new file mode 100644 index 0000000000000000000000000000000000000000..8de650644a5ba818ae13fad5a01d6a62fe093e5d --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -0,0 +1,625 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto/rand" + "errors" + "fmt" + "io" + "log" + "net" + "sync" +) + +// debugHandshake, if set, prints messages sent and received. Key +// exchange messages are printed as if DH were used, so the debug +// messages are wrong when using ECDH. +const debugHandshake = false + +// chanSize sets the amount of buffering SSH connections. This is +// primarily for testing: setting chanSize=0 uncovers deadlocks more +// quickly. +const chanSize = 16 + +// keyingTransport is a packet based transport that supports key +// changes. It need not be thread-safe. It should pass through +// msgNewKeys in both directions. +type keyingTransport interface { + packetConn + + // prepareKeyChange sets up a key change. The key change for a + // direction will be effected if a msgNewKeys message is sent + // or received. + prepareKeyChange(*algorithms, *kexResult) error +} + +// handshakeTransport implements rekeying on top of a keyingTransport +// and offers a thread-safe writePacket() interface. +type handshakeTransport struct { + conn keyingTransport + config *Config + + serverVersion []byte + clientVersion []byte + + // hostKeys is non-empty if we are the server. In that case, + // it contains all host keys that can be used to sign the + // connection. + hostKeys []Signer + + // hostKeyAlgorithms is non-empty if we are the client. In that case, + // we accept these key types from the server as host key. + hostKeyAlgorithms []string + + // On read error, incoming is closed, and readError is set. + incoming chan []byte + readError error + + mu sync.Mutex + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + pendingPackets [][]byte // Used when a key exchange is in progress. + + // If the read loop wants to schedule a kex, it pings this + // channel, and the write loop will send out a kex + // message. + requestKex chan struct{} + + // If the other side requests or confirms a kex, its kexInit + // packet is sent here for the write loop to find it. + startKex chan *pendingKex + + // data for host key checking + hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error + dialAddress string + remoteAddr net.Addr + + // Algorithms agreed in the last key exchange. + algorithms *algorithms + + readPacketsLeft uint32 + readBytesLeft int64 + + writePacketsLeft uint32 + writeBytesLeft int64 + + // The session ID or nil if first kex did not complete yet. + sessionID []byte +} + +type pendingKex struct { + otherInit []byte + done chan error +} + +func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { + t := &handshakeTransport{ + conn: conn, + serverVersion: serverVersion, + clientVersion: clientVersion, + incoming: make(chan []byte, chanSize), + requestKex: make(chan struct{}, 1), + startKex: make(chan *pendingKex, 1), + + config: config, + } + + // We always start with a mandatory key exchange. + t.requestKex <- struct{}{} + return t +} + +func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.dialAddress = dialAddr + t.remoteAddr = addr + t.hostKeyCallback = config.HostKeyCallback + if config.HostKeyAlgorithms != nil { + t.hostKeyAlgorithms = config.HostKeyAlgorithms + } else { + t.hostKeyAlgorithms = supportedHostKeyAlgos + } + go t.readLoop() + go t.kexLoop() + return t +} + +func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { + t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) + t.hostKeys = config.hostKeys + go t.readLoop() + go t.kexLoop() + return t +} + +func (t *handshakeTransport) getSessionID() []byte { + return t.sessionID +} + +// waitSession waits for the session to be established. This should be +// the first thing to call after instantiating handshakeTransport. +func (t *handshakeTransport) waitSession() error { + p, err := t.readPacket() + if err != nil { + return err + } + if p[0] != msgNewKeys { + return fmt.Errorf("ssh: first packet should be msgNewKeys") + } + + return nil +} + +func (t *handshakeTransport) id() string { + if len(t.hostKeys) > 0 { + return "server" + } + return "client" +} + +func (t *handshakeTransport) printPacket(p []byte, write bool) { + action := "got" + if write { + action = "sent" + } + + if p[0] == msgChannelData || p[0] == msgChannelExtendedData { + log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) + } else { + msg, err := decode(p) + log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) + } +} + +func (t *handshakeTransport) readPacket() ([]byte, error) { + p, ok := <-t.incoming + if !ok { + return nil, t.readError + } + return p, nil +} + +func (t *handshakeTransport) readLoop() { + first := true + for { + p, err := t.readOnePacket(first) + first = false + if err != nil { + t.readError = err + close(t.incoming) + break + } + if p[0] == msgIgnore || p[0] == msgDebug { + continue + } + t.incoming <- p + } + + // Stop writers too. + t.recordWriteError(t.readError) + + // Unblock the writer should it wait for this. + close(t.startKex) + + // Don't close t.requestKex; it's also written to from writePacket. +} + +func (t *handshakeTransport) pushPacket(p []byte) error { + if debugHandshake { + t.printPacket(p, true) + } + return t.conn.writePacket(p) +} + +func (t *handshakeTransport) getWriteError() error { + t.mu.Lock() + defer t.mu.Unlock() + return t.writeError +} + +func (t *handshakeTransport) recordWriteError(err error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError == nil && err != nil { + t.writeError = err + } +} + +func (t *handshakeTransport) requestKeyExchange() { + select { + case t.requestKex <- struct{}{}: + default: + // something already requested a kex, so do nothing. + } +} + +func (t *handshakeTransport) kexLoop() { + +write: + for t.getWriteError() == nil { + var request *pendingKex + var sent bool + + for request == nil || !sent { + var ok bool + select { + case request, ok = <-t.startKex: + if !ok { + break write + } + case <-t.requestKex: + break + } + + if !sent { + if err := t.sendKexInit(); err != nil { + t.recordWriteError(err) + break + } + sent = true + } + } + + if err := t.getWriteError(); err != nil { + if request != nil { + request.done <- err + } + break + } + + // We're not servicing t.requestKex, but that is OK: + // we never block on sending to t.requestKex. + + // We're not servicing t.startKex, but the remote end + // has just sent us a kexInitMsg, so it can't send + // another key change request, until we close the done + // channel on the pendingKex request. + + err := t.enterKeyExchange(request.otherInit) + + t.mu.Lock() + t.writeError = err + t.sentInitPacket = nil + t.sentInitMsg = nil + t.writePacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.writeBytesLeft = int64(t.config.RekeyThreshold) + } else if t.algorithms != nil { + t.writeBytesLeft = t.algorithms.w.rekeyBytes() + } + + // we have completed the key exchange. Since the + // reader is still blocked, it is safe to clear out + // the requestKex channel. This avoids the situation + // where: 1) we consumed our own request for the + // initial kex, and 2) the kex from the remote side + // caused another send on the requestKex channel, + clear: + for { + select { + case <-t.requestKex: + // + default: + break clear + } + } + + request.done <- t.writeError + + // kex finished. Push packets that we received while + // the kex was in progress. Don't look at t.startKex + // and don't increment writtenSinceKex: if we trigger + // another kex while we are still busy with the last + // one, things will become very confusing. + for _, p := range t.pendingPackets { + t.writeError = t.pushPacket(p) + if t.writeError != nil { + break + } + } + t.pendingPackets = t.pendingPackets[:0] + t.mu.Unlock() + } + + // drain startKex channel. We don't service t.requestKex + // because nobody does blocking sends there. + go func() { + for init := range t.startKex { + init.done <- t.writeError + } + }() + + // Unblock reader. + t.conn.Close() +} + +// The protocol uses uint32 for packet counters, so we can't let them +// reach 1<<32. We will actually read and write more packets than +// this, though: the other side may send more packets, and after we +// hit this limit on writing we will send a few more packets for the +// key exchange itself. +const packetRekeyThreshold = (1 << 31) + +func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { + p, err := t.conn.readPacket() + if err != nil { + return nil, err + } + + if t.readPacketsLeft > 0 { + t.readPacketsLeft-- + } else { + t.requestKeyExchange() + } + + if t.readBytesLeft > 0 { + t.readBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if debugHandshake { + t.printPacket(p, false) + } + + if first && p[0] != msgKexInit { + return nil, fmt.Errorf("ssh: first packet should be msgKexInit") + } + + if p[0] != msgKexInit { + return p, nil + } + + firstKex := t.sessionID == nil + + kex := pendingKex{ + done: make(chan error, 1), + otherInit: p, + } + t.startKex <- &kex + err = <-kex.done + + if debugHandshake { + log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) + } + + if err != nil { + return nil, err + } + + t.readPacketsLeft = packetRekeyThreshold + if t.config.RekeyThreshold > 0 { + t.readBytesLeft = int64(t.config.RekeyThreshold) + } else { + t.readBytesLeft = t.algorithms.r.rekeyBytes() + } + + // By default, a key exchange is hidden from higher layers by + // translating it into msgIgnore. + successPacket := []byte{msgIgnore} + if firstKex { + // sendKexInit() for the first kex waits for + // msgNewKeys so the authentication process is + // guaranteed to happen over an encrypted transport. + successPacket = []byte{msgNewKeys} + } + + return successPacket, nil +} + +// sendKexInit sends a key change message. +func (t *handshakeTransport) sendKexInit() error { + t.mu.Lock() + defer t.mu.Unlock() + if t.sentInitMsg != nil { + // kexInits may be sent either in response to the other side, + // or because our side wants to initiate a key change, so we + // may have already sent a kexInit. In that case, don't send a + // second kexInit. + return nil + } + + msg := &kexInitMsg{ + KexAlgos: t.config.KeyExchanges, + CiphersClientServer: t.config.Ciphers, + CiphersServerClient: t.config.Ciphers, + MACsClientServer: t.config.MACs, + MACsServerClient: t.config.MACs, + CompressionClientServer: supportedCompressions, + CompressionServerClient: supportedCompressions, + } + io.ReadFull(rand.Reader, msg.Cookie[:]) + + if len(t.hostKeys) > 0 { + for _, k := range t.hostKeys { + msg.ServerHostKeyAlgos = append( + msg.ServerHostKeyAlgos, k.PublicKey().Type()) + } + } else { + msg.ServerHostKeyAlgos = t.hostKeyAlgorithms + } + packet := Marshal(msg) + + // writePacket destroys the contents, so save a copy. + packetCopy := make([]byte, len(packet)) + copy(packetCopy, packet) + + if err := t.pushPacket(packetCopy); err != nil { + return err + } + + t.sentInitMsg = msg + t.sentInitPacket = packet + + return nil +} + +func (t *handshakeTransport) writePacket(p []byte) error { + switch p[0] { + case msgKexInit: + return errors.New("ssh: only handshakeTransport can send kexInit") + case msgNewKeys: + return errors.New("ssh: only handshakeTransport can send newKeys") + } + + t.mu.Lock() + defer t.mu.Unlock() + if t.writeError != nil { + return t.writeError + } + + if t.sentInitMsg != nil { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + + if t.writeBytesLeft > 0 { + t.writeBytesLeft -= int64(len(p)) + } else { + t.requestKeyExchange() + } + + if t.writePacketsLeft > 0 { + t.writePacketsLeft-- + } else { + t.requestKeyExchange() + } + + if err := t.pushPacket(p); err != nil { + t.writeError = err + } + + return nil +} + +func (t *handshakeTransport) Close() error { + return t.conn.Close() +} + +func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { + if debugHandshake { + log.Printf("%s entered key exchange", t.id()) + } + + otherInit := &kexInitMsg{} + if err := Unmarshal(otherInitPacket, otherInit); err != nil { + return err + } + + magics := handshakeMagics{ + clientVersion: t.clientVersion, + serverVersion: t.serverVersion, + clientKexInit: otherInitPacket, + serverKexInit: t.sentInitPacket, + } + + clientInit := otherInit + serverInit := t.sentInitMsg + if len(t.hostKeys) == 0 { + clientInit, serverInit = serverInit, clientInit + + magics.clientKexInit = t.sentInitPacket + magics.serverKexInit = otherInitPacket + } + + var err error + t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit) + if err != nil { + return err + } + + // We don't send FirstKexFollows, but we handle receiving it. + // + // RFC 4253 section 7 defines the kex and the agreement method for + // first_kex_packet_follows. It states that the guessed packet + // should be ignored if the "kex algorithm and/or the host + // key algorithm is guessed wrong (server and client have + // different preferred algorithm), or if any of the other + // algorithms cannot be agreed upon". The other algorithms have + // already been checked above so the kex algorithm and host key + // algorithm are checked here. + if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { + // other side sent a kex message for the wrong algorithm, + // which we have to ignore. + if _, err := t.conn.readPacket(); err != nil { + return err + } + } + + kex, ok := kexAlgoMap[t.algorithms.kex] + if !ok { + return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) + } + + var result *kexResult + if len(t.hostKeys) > 0 { + result, err = t.server(kex, t.algorithms, &magics) + } else { + result, err = t.client(kex, t.algorithms, &magics) + } + + if err != nil { + return err + } + + if t.sessionID == nil { + t.sessionID = result.H + } + result.SessionID = t.sessionID + + t.conn.prepareKeyChange(t.algorithms, result) + if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { + return err + } + if packet, err := t.conn.readPacket(); err != nil { + return err + } else if packet[0] != msgNewKeys { + return unexpectedMessageError(msgNewKeys, packet[0]) + } + + return nil +} + +func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + var hostKey Signer + for _, k := range t.hostKeys { + if algs.hostKey == k.PublicKey().Type() { + hostKey = k + } + } + + r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) + return r, err +} + +func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { + result, err := kex.Client(t.conn, t.config.Rand, magics) + if err != nil { + return nil, err + } + + hostKey, err := ParsePublicKey(result.HostKey) + if err != nil { + return nil, err + } + + if err := verifyHostKeySignature(hostKey, result); err != nil { + return nil, err + } + + if t.hostKeyCallback != nil { + err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) + if err != nil { + return nil, err + } + } + + return result, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go new file mode 100644 index 0000000000000000000000000000000000000000..c87fbebfde88a2be08fdd9440241cb1a3c4c145a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/kex.go @@ -0,0 +1,540 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" + + "golang.org/x/crypto/curve25519" +) + +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" +) + +// kexResult captures the outcome of a key exchange. +type kexResult struct { + // Session hash. See also RFC 4253, section 8. + H []byte + + // Shared secret. See also RFC 4253, section 8. + K []byte + + // Host key as hashed into H. + HostKey []byte + + // Signature of H. + Signature []byte + + // A cryptographic hash function that matches the security + // level of the key exchange algorithm. It is used for + // calculating H, and for deriving keys from H and K. + Hash crypto.Hash + + // The session ID, which is the first H computed. This is used + // to derive key material inside the transport. + SessionID []byte +} + +// handshakeMagics contains data that is always included in the +// session hash. +type handshakeMagics struct { + clientVersion, serverVersion []byte + clientKexInit, serverKexInit []byte +} + +func (m *handshakeMagics) write(w io.Writer) { + writeString(w, m.clientVersion) + writeString(w, m.serverVersion) + writeString(w, m.clientKexInit) + writeString(w, m.serverKexInit) +} + +// kexAlgorithm abstracts different key exchange algorithms. +type kexAlgorithm interface { + // Server runs server-side key agreement, signing the result + // with a hostkey. + Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) + + // Client runs the client-side key agreement. Caller is + // responsible for verifying the host key signature. + Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) +} + +// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. +type dhGroup struct { + g, p, pMinus1 *big.Int +} + +func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { + if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { + return nil, errors.New("ssh: DH parameter out of bounds") + } + return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil +} + +func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { + hashFunc := crypto.SHA1 + + var x *big.Int + for { + var err error + if x, err = rand.Int(randSource, group.pMinus1); err != nil { + return nil, err + } + if x.Sign() > 0 { + break + } + } + + X := new(big.Int).Exp(group.g, x, group.p) + kexDHInit := kexDHInitMsg{ + X: X, + } + if err := c.writePacket(Marshal(&kexDHInit)); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexDHReply kexDHReplyMsg + if err = Unmarshal(packet, &kexDHReply); err != nil { + return nil, err + } + + kInt, err := group.diffieHellman(kexDHReply.Y, x) + if err != nil { + return nil, err + } + + h := hashFunc.New() + magics.write(h) + writeString(h, kexDHReply.HostKey) + writeInt(h, X) + writeInt(h, kexDHReply.Y) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: kexDHReply.HostKey, + Signature: kexDHReply.Signature, + Hash: crypto.SHA1, + }, nil +} + +func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + hashFunc := crypto.SHA1 + packet, err := c.readPacket() + if err != nil { + return + } + var kexDHInit kexDHInitMsg + if err = Unmarshal(packet, &kexDHInit); err != nil { + return + } + + var y *big.Int + for { + if y, err = rand.Int(randSource, group.pMinus1); err != nil { + return + } + if y.Sign() > 0 { + break + } + } + + Y := new(big.Int).Exp(group.g, y, group.p) + kInt, err := group.diffieHellman(kexDHInit.X, y) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := hashFunc.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeInt(h, kexDHInit.X) + writeInt(h, Y) + + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, randSource, H) + if err != nil { + return nil, err + } + + kexDHReply := kexDHReplyMsg{ + HostKey: hostKeyBytes, + Y: Y, + Signature: sig, + } + packet = Marshal(&kexDHReply) + + err = c.writePacket(packet) + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA1, + }, nil +} + +// ecdh performs Elliptic Curve Diffie-Hellman key exchange as +// described in RFC 5656, section 4. +type ecdh struct { + curve elliptic.Curve +} + +func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + kexInit := kexECDHInitMsg{ + ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), + } + + serialized := Marshal(&kexInit) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + + x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) + if err != nil { + return nil, err + } + + // generate shared secret + secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kexInit.ClientPubKey) + writeString(h, reply.EphemeralPubKey) + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: ecHash(kex.curve), + }, nil +} + +// unmarshalECKey parses and checks an EC key. +func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { + x, y = elliptic.Unmarshal(curve, pubkey) + if x == nil { + return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") + } + if !validateECPublicKey(curve, x, y) { + return nil, nil, errors.New("ssh: public key not on curve") + } + return x, y, nil +} + +// validateECPublicKey checks that the point is a valid public key for +// the given curve. See [SEC1], 3.2.2 +func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { + if x.Sign() == 0 && y.Sign() == 0 { + return false + } + + if x.Cmp(curve.Params().P) >= 0 { + return false + } + + if y.Cmp(curve.Params().P) >= 0 { + return false + } + + if !curve.IsOnCurve(x, y) { + return false + } + + // We don't check if N * PubKey == 0, since + // + // - the NIST curves have cofactor = 1, so this is implicit. + // (We don't foresee an implementation that supports non NIST + // curves) + // + // - for ephemeral keys, we don't need to worry about small + // subgroup attacks. + return true +} + +func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var kexECDHInit kexECDHInitMsg + if err = Unmarshal(packet, &kexECDHInit); err != nil { + return nil, err + } + + clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) + if err != nil { + return nil, err + } + + // We could cache this key across multiple users/multiple + // connection attempts, but the benefit is small. OpenSSH + // generates a new key for each incoming connection. + ephKey, err := ecdsa.GenerateKey(kex.curve, rand) + if err != nil { + return nil, err + } + + hostKeyBytes := priv.PublicKey().Marshal() + + serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) + + // generate shared secret + secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) + + h := ecHash(kex.curve).New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexECDHInit.ClientPubKey) + writeString(h, serializedEphKey) + + K := make([]byte, intLength(secret)) + marshalInt(K, secret) + h.Write(K) + + H := h.Sum(nil) + + // H is already a hash, but the hostkey signing will apply its + // own key-specific hash algorithm. + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: serializedEphKey, + HostKey: hostKeyBytes, + Signature: sig, + } + + serialized := Marshal(&reply) + if err := c.writePacket(serialized); err != nil { + return nil, err + } + + return &kexResult{ + H: H, + K: K, + HostKey: reply.HostKey, + Signature: sig, + Hash: ecHash(kex.curve), + }, nil +} + +var kexAlgoMap = map[string]kexAlgorithm{} + +func init() { + // This is the group called diffie-hellman-group1-sha1 in RFC + // 4253 and Oakley Group 2 in RFC 2409. + p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) + kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + // This is the group called diffie-hellman-group14-sha1 in RFC + // 4253 and Oakley Group 14 in RFC 3526. + p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) + + kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ + g: new(big.Int).SetInt64(2), + p: p, + pMinus1: new(big.Int).Sub(p, bigOne), + } + + kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} + kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} + kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} + kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} +} + +// curve25519sha256 implements the curve25519-sha256@libssh.org key +// agreement protocol, as described in +// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt +type curve25519sha256 struct{} + +type curve25519KeyPair struct { + priv [32]byte + pub [32]byte +} + +func (kp *curve25519KeyPair) generate(rand io.Reader) error { + if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { + return err + } + curve25519.ScalarBaseMult(&kp.pub, &kp.priv) + return nil +} + +// curve25519Zeros is just an array of 32 zero bytes so that we have something +// convenient to compare against in order to reject curve25519 points with the +// wrong order. +var curve25519Zeros [32]byte + +func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { + return nil, err + } + + packet, err := c.readPacket() + if err != nil { + return nil, err + } + + var reply kexECDHReplyMsg + if err = Unmarshal(packet, &reply); err != nil { + return nil, err + } + if len(reply.EphemeralPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var servPub, secret [32]byte + copy(servPub[:], reply.EphemeralPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &servPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, reply.HostKey) + writeString(h, kp.pub[:]) + writeString(h, reply.EphemeralPubKey) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + return &kexResult{ + H: h.Sum(nil), + K: K, + HostKey: reply.HostKey, + Signature: reply.Signature, + Hash: crypto.SHA256, + }, nil +} + +func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { + packet, err := c.readPacket() + if err != nil { + return + } + var kexInit kexECDHInitMsg + if err = Unmarshal(packet, &kexInit); err != nil { + return + } + + if len(kexInit.ClientPubKey) != 32 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong length") + } + + var kp curve25519KeyPair + if err := kp.generate(rand); err != nil { + return nil, err + } + + var clientPub, secret [32]byte + copy(clientPub[:], kexInit.ClientPubKey) + curve25519.ScalarMult(&secret, &kp.priv, &clientPub) + if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { + return nil, errors.New("ssh: peer's curve25519 public value has wrong order") + } + + hostKeyBytes := priv.PublicKey().Marshal() + + h := crypto.SHA256.New() + magics.write(h) + writeString(h, hostKeyBytes) + writeString(h, kexInit.ClientPubKey) + writeString(h, kp.pub[:]) + + kInt := new(big.Int).SetBytes(secret[:]) + K := make([]byte, intLength(kInt)) + marshalInt(K, kInt) + h.Write(K) + + H := h.Sum(nil) + + sig, err := signAndMarshal(priv, rand, H) + if err != nil { + return nil, err + } + + reply := kexECDHReplyMsg{ + EphemeralPubKey: kp.pub[:], + HostKey: hostKeyBytes, + Signature: sig, + } + if err := c.writePacket(Marshal(&reply)); err != nil { + return nil, err + } + return &kexResult{ + H: H, + K: K, + HostKey: hostKeyBytes, + Signature: sig, + Hash: crypto.SHA256, + }, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go new file mode 100644 index 0000000000000000000000000000000000000000..f38de9898ca2c0258085c6fe301e2ee8aea6bf4e --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/keys.go @@ -0,0 +1,905 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/md5" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/asn1" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// These constants represent the algorithm names for key types supported by this +// package. +const ( + KeyAlgoRSA = "ssh-rsa" + KeyAlgoDSA = "ssh-dss" + KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" + KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" + KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" + KeyAlgoED25519 = "ssh-ed25519" +) + +// parsePubKey parses a public key of the given algorithm. +// Use ParsePublicKey for keys with prepended algorithm. +func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { + switch algo { + case KeyAlgoRSA: + return parseRSA(in) + case KeyAlgoDSA: + return parseDSA(in) + case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: + return parseECDSA(in) + case KeyAlgoED25519: + return parseED25519(in) + case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: + cert, err := parseCert(in, certToPrivAlgo(algo)) + if err != nil { + return nil, nil, err + } + return cert, nil, nil + } + return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) +} + +// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format +// (see sshd(8) manual page) once the options and key type fields have been +// removed. +func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { + in = bytes.TrimSpace(in) + + i := bytes.IndexAny(in, " \t") + if i == -1 { + i = len(in) + } + base64Key := in[:i] + + key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) + n, err := base64.StdEncoding.Decode(key, base64Key) + if err != nil { + return nil, "", err + } + key = key[:n] + out, err = ParsePublicKey(key) + if err != nil { + return nil, "", err + } + comment = string(bytes.TrimSpace(in[i:])) + return out, comment, nil +} + +// ParseKnownHosts parses an entry in the format of the known_hosts file. +// +// The known_hosts format is documented in the sshd(8) manual page. This +// function will parse a single entry from in. On successful return, marker +// will contain the optional marker value (i.e. "cert-authority" or "revoked") +// or else be empty, hosts will contain the hosts that this entry matches, +// pubKey will contain the public key and comment will contain any trailing +// comment at the end of the line. See the sshd(8) manual page for the various +// forms that a host string can take. +// +// The unparsed remainder of the input will be returned in rest. This function +// can be called repeatedly to parse multiple entries. +// +// If no entries were found in the input then err will be io.EOF. Otherwise a +// non-nil err value indicates a parse error. +func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + // Strip out the beginning of the known_host key. + // This is either an optional marker or a (set of) hostname(s). + keyFields := bytes.Fields(in) + if len(keyFields) < 3 || len(keyFields) > 5 { + return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") + } + + // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated + // list of hosts + marker := "" + if keyFields[0][0] == '@' { + marker = string(keyFields[0][1:]) + keyFields = keyFields[1:] + } + + hosts := string(keyFields[0]) + // keyFields[1] contains the key type (e.g. “ssh-rsaâ€). + // However, that information is duplicated inside the + // base64-encoded key and so is ignored here. + + key := bytes.Join(keyFields[2:], []byte(" ")) + if pubKey, comment, err = parseAuthorizedKey(key); err != nil { + return "", nil, nil, "", nil, err + } + + return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil + } + + return "", nil, nil, "", nil, io.EOF +} + +// ParseAuthorizedKeys parses a public key from an authorized_keys +// file used in OpenSSH according to the sshd(8) manual page. +func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { + for len(in) > 0 { + end := bytes.IndexByte(in, '\n') + if end != -1 { + rest = in[end+1:] + in = in[:end] + } else { + rest = nil + } + + end = bytes.IndexByte(in, '\r') + if end != -1 { + in = in[:end] + } + + in = bytes.TrimSpace(in) + if len(in) == 0 || in[0] == '#' { + in = rest + continue + } + + i := bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + return out, comment, options, rest, nil + } + + // No key type recognised. Maybe there's an options field at + // the beginning. + var b byte + inQuote := false + var candidateOptions []string + optionStart := 0 + for i, b = range in { + isEnd := !inQuote && (b == ' ' || b == '\t') + if (b == ',' && !inQuote) || isEnd { + if i-optionStart > 0 { + candidateOptions = append(candidateOptions, string(in[optionStart:i])) + } + optionStart = i + 1 + } + if isEnd { + break + } + if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { + inQuote = !inQuote + } + } + for i < len(in) && (in[i] == ' ' || in[i] == '\t') { + i++ + } + if i == len(in) { + // Invalid line: unmatched quote + in = rest + continue + } + + in = in[i:] + i = bytes.IndexAny(in, " \t") + if i == -1 { + in = rest + continue + } + + if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { + options = candidateOptions + return out, comment, options, rest, nil + } + + in = rest + continue + } + + return nil, "", nil, nil, errors.New("ssh: no key found") +} + +// ParsePublicKey parses an SSH public key formatted for use in +// the SSH wire protocol according to RFC 4253, section 6.6. +func ParsePublicKey(in []byte) (out PublicKey, err error) { + algo, in, ok := parseString(in) + if !ok { + return nil, errShortRead + } + var rest []byte + out, rest, err = parsePubKey(in, string(algo)) + if len(rest) > 0 { + return nil, errors.New("ssh: trailing junk in public key") + } + + return out, err +} + +// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends with newline. +func MarshalAuthorizedKey(key PublicKey) []byte { + b := &bytes.Buffer{} + b.WriteString(key.Type()) + b.WriteByte(' ') + e := base64.NewEncoder(base64.StdEncoding, b) + e.Write(key.Marshal()) + e.Close() + b.WriteByte('\n') + return b.Bytes() +} + +// PublicKey is an abstraction of different types of public keys. +type PublicKey interface { + // Type returns the key's type, e.g. "ssh-rsa". + Type() string + + // Marshal returns the serialized key data in SSH wire format, + // with the name prefix. + Marshal() []byte + + // Verify that sig is a signature on the given data using this + // key. This function will hash the data appropriately first. + Verify(data []byte, sig *Signature) error +} + +// CryptoPublicKey, if implemented by a PublicKey, +// returns the underlying crypto.PublicKey form of the key. +type CryptoPublicKey interface { + CryptoPublicKey() crypto.PublicKey +} + +// A Signer can create signatures that verify against a public key. +type Signer interface { + // PublicKey returns an associated PublicKey instance. + PublicKey() PublicKey + + // Sign returns raw signature for the given data. This method + // will apply the hash specified for the keytype to the data. + Sign(rand io.Reader, data []byte) (*Signature, error) +} + +type rsaPublicKey rsa.PublicKey + +func (r *rsaPublicKey) Type() string { + return "ssh-rsa" +} + +// parseRSA parses an RSA key according to RFC 4253, section 6.6. +func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + E *big.Int + N *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + if w.E.BitLen() > 24 { + return nil, nil, errors.New("ssh: exponent too large") + } + e := w.E.Int64() + if e < 3 || e&1 == 0 { + return nil, nil, errors.New("ssh: incorrect exponent") + } + + var key rsa.PublicKey + key.E = int(e) + key.N = w.N + return (*rsaPublicKey)(&key), w.Rest, nil +} + +func (r *rsaPublicKey) Marshal() []byte { + e := new(big.Int).SetInt64(int64(r.E)) + // RSA publickey struct layout should match the struct used by + // parseRSACert in the x/crypto/ssh/agent package. + wirekey := struct { + Name string + E *big.Int + N *big.Int + }{ + KeyAlgoRSA, + e, + r.N, + } + return Marshal(&wirekey) +} + +func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != r.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) +} + +func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*rsa.PublicKey)(r) +} + +type dsaPublicKey dsa.PublicKey + +func (r *dsaPublicKey) Type() string { + return "ssh-dss" +} + +// parseDSA parses an DSA key according to RFC 4253, section 6.6. +func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + P, Q, G, Y *big.Int + Rest []byte `ssh:"rest"` + } + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := &dsaPublicKey{ + Parameters: dsa.Parameters{ + P: w.P, + Q: w.Q, + G: w.G, + }, + Y: w.Y, + } + return key, w.Rest, nil +} + +func (k *dsaPublicKey) Marshal() []byte { + // DSA publickey struct layout should match the struct used by + // parseDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + P, Q, G, Y *big.Int + }{ + k.Type(), + k.P, + k.Q, + k.G, + k.Y, + } + + return Marshal(&w) +} + +func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != k.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) + } + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 4253, section 6.6, + // The value for 'dss_signature_blob' is encoded as a string containing + // r, followed by s (which are 160-bit integers, without lengths or + // padding, unsigned, and in network byte order). + // For DSS purposes, sig.Blob should be exactly 40 bytes in length. + if len(sig.Blob) != 40 { + return errors.New("ssh: DSA signature parse error") + } + r := new(big.Int).SetBytes(sig.Blob[:20]) + s := new(big.Int).SetBytes(sig.Blob[20:]) + if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*dsa.PublicKey)(k) +} + +type dsaPrivateKey struct { + *dsa.PrivateKey +} + +func (k *dsaPrivateKey) PublicKey() PublicKey { + return (*dsaPublicKey)(&k.PrivateKey.PublicKey) +} + +func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { + h := crypto.SHA1.New() + h.Write(data) + digest := h.Sum(nil) + r, s, err := dsa.Sign(rand, k.PrivateKey, digest) + if err != nil { + return nil, err + } + + sig := make([]byte, 40) + rb := r.Bytes() + sb := s.Bytes() + + copy(sig[20-len(rb):20], rb) + copy(sig[40-len(sb):], sb) + + return &Signature{ + Format: k.PublicKey().Type(), + Blob: sig, + }, nil +} + +type ecdsaPublicKey ecdsa.PublicKey + +func (key *ecdsaPublicKey) Type() string { + return "ecdsa-sha2-" + key.nistID() +} + +func (key *ecdsaPublicKey) nistID() string { + switch key.Params().BitSize { + case 256: + return "nistp256" + case 384: + return "nistp384" + case 521: + return "nistp521" + } + panic("ssh: unsupported ecdsa key size") +} + +type ed25519PublicKey ed25519.PublicKey + +func (key ed25519PublicKey) Type() string { + return KeyAlgoED25519 +} + +func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := ed25519.PublicKey(w.KeyBytes) + + return (ed25519PublicKey)(key), w.Rest, nil +} + +func (key ed25519PublicKey) Marshal() []byte { + w := struct { + Name string + KeyBytes []byte + }{ + KeyAlgoED25519, + []byte(key), + } + return Marshal(&w) +} + +func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + edKey := (ed25519.PublicKey)(key) + if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { + return errors.New("ssh: signature did not verify") + } + + return nil +} + +func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { + return ed25519.PublicKey(k) +} + +func supportedEllipticCurve(curve elliptic.Curve) bool { + return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() +} + +// ecHash returns the hash to match the given elliptic curve, see RFC +// 5656, section 6.2.1 +func ecHash(curve elliptic.Curve) crypto.Hash { + bitSize := curve.Params().BitSize + switch { + case bitSize <= 256: + return crypto.SHA256 + case bitSize <= 384: + return crypto.SHA384 + } + return crypto.SHA512 +} + +// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. +func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { + var w struct { + Curve string + KeyBytes []byte + Rest []byte `ssh:"rest"` + } + + if err := Unmarshal(in, &w); err != nil { + return nil, nil, err + } + + key := new(ecdsa.PublicKey) + + switch w.Curve { + case "nistp256": + key.Curve = elliptic.P256() + case "nistp384": + key.Curve = elliptic.P384() + case "nistp521": + key.Curve = elliptic.P521() + default: + return nil, nil, errors.New("ssh: unsupported curve") + } + + key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) + if key.X == nil || key.Y == nil { + return nil, nil, errors.New("ssh: invalid curve point") + } + return (*ecdsaPublicKey)(key), w.Rest, nil +} + +func (key *ecdsaPublicKey) Marshal() []byte { + // See RFC 5656, section 3.1. + keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) + // ECDSA publickey struct layout should match the struct used by + // parseECDSACert in the x/crypto/ssh/agent package. + w := struct { + Name string + ID string + Key []byte + }{ + key.Type(), + key.nistID(), + keyBytes, + } + + return Marshal(&w) +} + +func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { + if sig.Format != key.Type() { + return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) + } + + h := ecHash(key.Curve).New() + h.Write(data) + digest := h.Sum(nil) + + // Per RFC 5656, section 3.1.2, + // The ecdsa_signature_blob value has the following specific encoding: + // mpint r + // mpint s + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := Unmarshal(sig.Blob, &ecSig); err != nil { + return err + } + + if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) { + return nil + } + return errors.New("ssh: signature did not verify") +} + +func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { + return (*ecdsa.PublicKey)(k) +} + +// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, +// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding +// Signer instance. ECDSA keys must use P-256, P-384 or P-521. +func NewSignerFromKey(key interface{}) (Signer, error) { + switch key := key.(type) { + case crypto.Signer: + return NewSignerFromSigner(key) + case *dsa.PrivateKey: + return &dsaPrivateKey{key}, nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +type wrappedSigner struct { + signer crypto.Signer + pubKey PublicKey +} + +// NewSignerFromSigner takes any crypto.Signer implementation and +// returns a corresponding Signer interface. This can be used, for +// example, with keys kept in hardware modules. +func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { + pubKey, err := NewPublicKey(signer.Public()) + if err != nil { + return nil, err + } + + return &wrappedSigner{signer, pubKey}, nil +} + +func (s *wrappedSigner) PublicKey() PublicKey { + return s.pubKey +} + +func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { + var hashFunc crypto.Hash + + switch key := s.pubKey.(type) { + case *rsaPublicKey, *dsaPublicKey: + hashFunc = crypto.SHA1 + case *ecdsaPublicKey: + hashFunc = ecHash(key.Curve) + case ed25519PublicKey: + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } + + var digest []byte + if hashFunc != 0 { + h := hashFunc.New() + h.Write(data) + digest = h.Sum(nil) + } else { + digest = data + } + + signature, err := s.signer.Sign(rand, digest, hashFunc) + if err != nil { + return nil, err + } + + // crypto.Signer.Sign is expected to return an ASN.1-encoded signature + // for ECDSA and DSA, but that's not the encoding expected by SSH, so + // re-encode. + switch s.pubKey.(type) { + case *ecdsaPublicKey, *dsaPublicKey: + type asn1Signature struct { + R, S *big.Int + } + asn1Sig := new(asn1Signature) + _, err := asn1.Unmarshal(signature, asn1Sig) + if err != nil { + return nil, err + } + + switch s.pubKey.(type) { + case *ecdsaPublicKey: + signature = Marshal(asn1Sig) + + case *dsaPublicKey: + signature = make([]byte, 40) + r := asn1Sig.R.Bytes() + s := asn1Sig.S.Bytes() + copy(signature[20-len(r):20], r) + copy(signature[40-len(s):40], s) + } + } + + return &Signature{ + Format: s.pubKey.Type(), + Blob: signature, + }, nil +} + +// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, +// or ed25519.PublicKey returns a corresponding PublicKey instance. +// ECDSA keys must use P-256, P-384 or P-521. +func NewPublicKey(key interface{}) (PublicKey, error) { + switch key := key.(type) { + case *rsa.PublicKey: + return (*rsaPublicKey)(key), nil + case *ecdsa.PublicKey: + if !supportedEllipticCurve(key.Curve) { + return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") + } + return (*ecdsaPublicKey)(key), nil + case *dsa.PublicKey: + return (*dsaPublicKey)(key), nil + case ed25519.PublicKey: + return (ed25519PublicKey)(key), nil + default: + return nil, fmt.Errorf("ssh: unsupported key type %T", key) + } +} + +// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports +// the same keys as ParseRawPrivateKey. +func ParsePrivateKey(pemBytes []byte) (Signer, error) { + key, err := ParseRawPrivateKey(pemBytes) + if err != nil { + return nil, err + } + + return NewSignerFromKey(key) +} + +// encryptedBlock tells whether a private key is +// encrypted by examining its Proc-Type header +// for a mention of ENCRYPTED +// according to RFC 1421 Section 4.6.1.1. +func encryptedBlock(block *pem.Block) bool { + return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") +} + +// ParseRawPrivateKey returns a private key from a PEM encoded private key. It +// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. +func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("ssh: no key found") + } + + if encryptedBlock(block) { + return nil, errors.New("ssh: cannot decode encrypted private keys") + } + + switch block.Type { + case "RSA PRIVATE KEY": + return x509.ParsePKCS1PrivateKey(block.Bytes) + case "EC PRIVATE KEY": + return x509.ParseECPrivateKey(block.Bytes) + case "DSA PRIVATE KEY": + return ParseDSAPrivateKey(block.Bytes) + case "OPENSSH PRIVATE KEY": + return parseOpenSSHPrivateKey(block.Bytes) + default: + return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) + } +} + +// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as +// specified by the OpenSSL DSA man page. +func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { + var k struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + } + rest, err := asn1.Unmarshal(der, &k) + if err != nil { + return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) + } + if len(rest) > 0 { + return nil, errors.New("ssh: garbage after DSA key") + } + + return &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, + Q: k.Q, + G: k.G, + }, + Y: k.Pub, + }, + X: k.Priv, + }, nil +} + +// Implemented based on the documentation at +// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key +func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) { + magic := append([]byte("openssh-key-v1"), 0) + if !bytes.Equal(magic, key[0:len(magic)]) { + return nil, errors.New("ssh: invalid openssh private key format") + } + remaining := key[len(magic):] + + var w struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + } + + if err := Unmarshal(remaining, &w); err != nil { + return nil, err + } + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{} + + if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { + return nil, err + } + + if pk1.Check1 != pk1.Check2 { + return nil, errors.New("ssh: checkint mismatch") + } + + // we only handle ed25519 keys currently + if pk1.Keytype != KeyAlgoED25519 { + return nil, errors.New("ssh: unhandled key type") + } + + for i, b := range pk1.Pad { + if int(b) != i+1 { + return nil, errors.New("ssh: padding not as expected") + } + } + + if len(pk1.Priv) != ed25519.PrivateKeySize { + return nil, errors.New("ssh: private key unexpected length") + } + + pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) + copy(pk, pk1.Priv) + return &pk, nil +} + +// FingerprintLegacyMD5 returns the user presentation of the key's +// fingerprint as described by RFC 4716 section 4. +func FingerprintLegacyMD5(pubKey PublicKey) string { + md5sum := md5.Sum(pubKey.Marshal()) + hexarray := make([]string, len(md5sum)) + for i, c := range md5sum { + hexarray[i] = hex.EncodeToString([]byte{c}) + } + return strings.Join(hexarray, ":") +} + +// FingerprintSHA256 returns the user presentation of the key's +// fingerprint as unpadded base64 encoded sha256 hash. +// This format was introduced from OpenSSH 6.8. +// https://www.openssh.com/txt/release-6.8 +// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) +func FingerprintSHA256(pubKey PublicKey) string { + sha256sum := sha256.Sum256(pubKey.Marshal()) + hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) + return "SHA256:" + hash +} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go new file mode 100644 index 0000000000000000000000000000000000000000..c07a06285e66617febc7bac604e91ba10a967e34 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mac.go @@ -0,0 +1,61 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Message authentication support + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/sha256" + "hash" +) + +type macMode struct { + keySize int + etm bool + new func(key []byte) hash.Hash +} + +// truncatingMAC wraps around a hash.Hash and truncates the output digest to +// a given size. +type truncatingMAC struct { + length int + hmac hash.Hash +} + +func (t truncatingMAC) Write(data []byte) (int, error) { + return t.hmac.Write(data) +} + +func (t truncatingMAC) Sum(in []byte) []byte { + out := t.hmac.Sum(in) + return out[:len(in)+t.length] +} + +func (t truncatingMAC) Reset() { + t.hmac.Reset() +} + +func (t truncatingMAC) Size() int { + return t.length +} + +func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } + +var macModes = map[string]*macMode{ + "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { + return hmac.New(sha256.New, key) + }}, + "hmac-sha1": {20, false, func(key []byte) hash.Hash { + return hmac.New(sha1.New, key) + }}, + "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { + return truncatingMAC{12, hmac.New(sha1.New, key)} + }}, +} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go new file mode 100644 index 0000000000000000000000000000000000000000..e6ecd3afa5adc5843de6f7819f540d07e1ca8a67 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -0,0 +1,758 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math/big" + "reflect" + "strconv" + "strings" +) + +// These are SSH message type numbers. They are scattered around several +// documents but many were taken from [SSH-PARAMETERS]. +const ( + msgIgnore = 2 + msgUnimplemented = 3 + msgDebug = 4 + msgNewKeys = 21 + + // Standard authentication messages + msgUserAuthSuccess = 52 + msgUserAuthBanner = 53 +) + +// SSH messages: +// +// These structures mirror the wire format of the corresponding SSH messages. +// They are marshaled using reflection with the marshal and unmarshal functions +// in this file. The only wrinkle is that a final member of type []byte with a +// ssh tag of "rest" receives the remainder of a packet when unmarshaling. + +// See RFC 4253, section 11.1. +const msgDisconnect = 1 + +// disconnectMsg is the message that signals a disconnect. It is also +// the error type returned from mux.Wait() +type disconnectMsg struct { + Reason uint32 `sshtype:"1"` + Message string + Language string +} + +func (d *disconnectMsg) Error() string { + return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) +} + +// See RFC 4253, section 7.1. +const msgKexInit = 20 + +type kexInitMsg struct { + Cookie [16]byte `sshtype:"20"` + KexAlgos []string + ServerHostKeyAlgos []string + CiphersClientServer []string + CiphersServerClient []string + MACsClientServer []string + MACsServerClient []string + CompressionClientServer []string + CompressionServerClient []string + LanguagesClientServer []string + LanguagesServerClient []string + FirstKexFollows bool + Reserved uint32 +} + +// See RFC 4253, section 8. + +// Diffie-Helman +const msgKexDHInit = 30 + +type kexDHInitMsg struct { + X *big.Int `sshtype:"30"` +} + +const msgKexECDHInit = 30 + +type kexECDHInitMsg struct { + ClientPubKey []byte `sshtype:"30"` +} + +const msgKexECDHReply = 31 + +type kexECDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + EphemeralPubKey []byte + Signature []byte +} + +const msgKexDHReply = 31 + +type kexDHReplyMsg struct { + HostKey []byte `sshtype:"31"` + Y *big.Int + Signature []byte +} + +// See RFC 4253, section 10. +const msgServiceRequest = 5 + +type serviceRequestMsg struct { + Service string `sshtype:"5"` +} + +// See RFC 4253, section 10. +const msgServiceAccept = 6 + +type serviceAcceptMsg struct { + Service string `sshtype:"6"` +} + +// See RFC 4252, section 5. +const msgUserAuthRequest = 50 + +type userAuthRequestMsg struct { + User string `sshtype:"50"` + Service string + Method string + Payload []byte `ssh:"rest"` +} + +// Used for debug printouts of packets. +type userAuthSuccessMsg struct { +} + +// See RFC 4252, section 5.1 +const msgUserAuthFailure = 51 + +type userAuthFailureMsg struct { + Methods []string `sshtype:"51"` + PartialSuccess bool +} + +// See RFC 4256, section 3.2 +const msgUserAuthInfoRequest = 60 +const msgUserAuthInfoResponse = 61 + +type userAuthInfoRequestMsg struct { + User string `sshtype:"60"` + Instruction string + DeprecatedLanguage string + NumPrompts uint32 + Prompts []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpen = 90 + +type channelOpenMsg struct { + ChanType string `sshtype:"90"` + PeersId uint32 + PeersWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +const msgChannelExtendedData = 95 +const msgChannelData = 94 + +// Used for debug print outs of packets. +type channelDataMsg struct { + PeersId uint32 `sshtype:"94"` + Length uint32 + Rest []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenConfirm = 91 + +type channelOpenConfirmMsg struct { + PeersId uint32 `sshtype:"91"` + MyId uint32 + MyWindow uint32 + MaxPacketSize uint32 + TypeSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.1. +const msgChannelOpenFailure = 92 + +type channelOpenFailureMsg struct { + PeersId uint32 `sshtype:"92"` + Reason RejectionReason + Message string + Language string +} + +const msgChannelRequest = 98 + +type channelRequestMsg struct { + PeersId uint32 `sshtype:"98"` + Request string + WantReply bool + RequestSpecificData []byte `ssh:"rest"` +} + +// See RFC 4254, section 5.4. +const msgChannelSuccess = 99 + +type channelRequestSuccessMsg struct { + PeersId uint32 `sshtype:"99"` +} + +// See RFC 4254, section 5.4. +const msgChannelFailure = 100 + +type channelRequestFailureMsg struct { + PeersId uint32 `sshtype:"100"` +} + +// See RFC 4254, section 5.3 +const msgChannelClose = 97 + +type channelCloseMsg struct { + PeersId uint32 `sshtype:"97"` +} + +// See RFC 4254, section 5.3 +const msgChannelEOF = 96 + +type channelEOFMsg struct { + PeersId uint32 `sshtype:"96"` +} + +// See RFC 4254, section 4 +const msgGlobalRequest = 80 + +type globalRequestMsg struct { + Type string `sshtype:"80"` + WantReply bool + Data []byte `ssh:"rest"` +} + +// See RFC 4254, section 4 +const msgRequestSuccess = 81 + +type globalRequestSuccessMsg struct { + Data []byte `ssh:"rest" sshtype:"81"` +} + +// See RFC 4254, section 4 +const msgRequestFailure = 82 + +type globalRequestFailureMsg struct { + Data []byte `ssh:"rest" sshtype:"82"` +} + +// See RFC 4254, section 5.2 +const msgChannelWindowAdjust = 93 + +type windowAdjustMsg struct { + PeersId uint32 `sshtype:"93"` + AdditionalBytes uint32 +} + +// See RFC 4252, section 7 +const msgUserAuthPubKeyOk = 60 + +type userAuthPubKeyOkMsg struct { + Algo string `sshtype:"60"` + PubKey []byte +} + +// typeTags returns the possible type bytes for the given reflect.Type, which +// should be a struct. The possible values are separated by a '|' character. +func typeTags(structType reflect.Type) (tags []byte) { + tagStr := structType.Field(0).Tag.Get("sshtype") + + for _, tag := range strings.Split(tagStr, "|") { + i, err := strconv.Atoi(tag) + if err == nil { + tags = append(tags, byte(i)) + } + } + + return tags +} + +func fieldError(t reflect.Type, field int, problem string) error { + if problem != "" { + problem = ": " + problem + } + return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) +} + +var errShortRead = errors.New("ssh: short read") + +// Unmarshal parses data in SSH wire format into a structure. The out +// argument should be a pointer to struct. If the first member of the +// struct has the "sshtype" tag set to a '|'-separated set of numbers +// in decimal, the packet must start with one of those numbers. In +// case of error, Unmarshal returns a ParseError or +// UnexpectedMessageError. +func Unmarshal(data []byte, out interface{}) error { + v := reflect.ValueOf(out).Elem() + structType := v.Type() + expectedTypes := typeTags(structType) + + var expectedType byte + if len(expectedTypes) > 0 { + expectedType = expectedTypes[0] + } + + if len(data) == 0 { + return parseError(expectedType) + } + + if len(expectedTypes) > 0 { + goodType := false + for _, e := range expectedTypes { + if e > 0 && data[0] == e { + goodType = true + break + } + } + if !goodType { + return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) + } + data = data[1:] + } + + var ok bool + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + t := field.Type() + switch t.Kind() { + case reflect.Bool: + if len(data) < 1 { + return errShortRead + } + field.SetBool(data[0] != 0) + data = data[1:] + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + return fieldError(structType, i, "array of unsupported type") + } + if len(data) < t.Len() { + return errShortRead + } + for j, n := 0, t.Len(); j < n; j++ { + field.Index(j).Set(reflect.ValueOf(data[j])) + } + data = data[t.Len():] + case reflect.Uint64: + var u64 uint64 + if u64, data, ok = parseUint64(data); !ok { + return errShortRead + } + field.SetUint(u64) + case reflect.Uint32: + var u32 uint32 + if u32, data, ok = parseUint32(data); !ok { + return errShortRead + } + field.SetUint(uint64(u32)) + case reflect.Uint8: + if len(data) < 1 { + return errShortRead + } + field.SetUint(uint64(data[0])) + data = data[1:] + case reflect.String: + var s []byte + if s, data, ok = parseString(data); !ok { + return fieldError(structType, i, "") + } + field.SetString(string(s)) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if structType.Field(i).Tag.Get("ssh") == "rest" { + field.Set(reflect.ValueOf(data)) + data = nil + } else { + var s []byte + if s, data, ok = parseString(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(s)) + } + case reflect.String: + var nl []string + if nl, data, ok = parseNameList(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(nl)) + default: + return fieldError(structType, i, "slice of unsupported type") + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + if n, data, ok = parseInt(data); !ok { + return errShortRead + } + field.Set(reflect.ValueOf(n)) + } else { + return fieldError(structType, i, "pointer to unsupported type") + } + default: + return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) + } + } + + if len(data) != 0 { + return parseError(expectedType) + } + + return nil +} + +// Marshal serializes the message in msg to SSH wire format. The msg +// argument should be a struct or pointer to struct. If the first +// member has the "sshtype" tag set to a number in decimal, that +// number is prepended to the result. If the last of member has the +// "ssh" tag set to "rest", its contents are appended to the output. +func Marshal(msg interface{}) []byte { + out := make([]byte, 0, 64) + return marshalStruct(out, msg) +} + +func marshalStruct(out []byte, msg interface{}) []byte { + v := reflect.Indirect(reflect.ValueOf(msg)) + msgTypes := typeTags(v.Type()) + if len(msgTypes) > 0 { + out = append(out, msgTypes[0]) + } + + for i, n := 0, v.NumField(); i < n; i++ { + field := v.Field(i) + switch t := field.Type(); t.Kind() { + case reflect.Bool: + var v uint8 + if field.Bool() { + v = 1 + } + out = append(out, v) + case reflect.Array: + if t.Elem().Kind() != reflect.Uint8 { + panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) + } + for j, l := 0, t.Len(); j < l; j++ { + out = append(out, uint8(field.Index(j).Uint())) + } + case reflect.Uint32: + out = appendU32(out, uint32(field.Uint())) + case reflect.Uint64: + out = appendU64(out, uint64(field.Uint())) + case reflect.Uint8: + out = append(out, uint8(field.Uint())) + case reflect.String: + s := field.String() + out = appendInt(out, len(s)) + out = append(out, s...) + case reflect.Slice: + switch t.Elem().Kind() { + case reflect.Uint8: + if v.Type().Field(i).Tag.Get("ssh") != "rest" { + out = appendInt(out, field.Len()) + } + out = append(out, field.Bytes()...) + case reflect.String: + offset := len(out) + out = appendU32(out, 0) + if n := field.Len(); n > 0 { + for j := 0; j < n; j++ { + f := field.Index(j) + if j != 0 { + out = append(out, ',') + } + out = append(out, f.String()...) + } + // overwrite length value + binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) + } + default: + panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) + } + case reflect.Ptr: + if t == bigIntType { + var n *big.Int + nValue := reflect.ValueOf(&n) + nValue.Elem().Set(field) + needed := intLength(n) + oldLength := len(out) + + if cap(out)-len(out) < needed { + newOut := make([]byte, len(out), 2*(len(out)+needed)) + copy(newOut, out) + out = newOut + } + out = out[:oldLength+needed] + marshalInt(out[oldLength:], n) + } else { + panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) + } + } + } + + return out +} + +var bigOne = big.NewInt(1) + +func parseString(in []byte) (out, rest []byte, ok bool) { + if len(in) < 4 { + return + } + length := binary.BigEndian.Uint32(in) + in = in[4:] + if uint32(len(in)) < length { + return + } + out = in[:length] + rest = in[length:] + ok = true + return +} + +var ( + comma = []byte{','} + emptyNameList = []string{} +) + +func parseNameList(in []byte) (out []string, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + if len(contents) == 0 { + out = emptyNameList + return + } + parts := bytes.Split(contents, comma) + out = make([]string, len(parts)) + for i, part := range parts { + out[i] = string(part) + } + return +} + +func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { + contents, rest, ok := parseString(in) + if !ok { + return + } + out = new(big.Int) + + if len(contents) > 0 && contents[0]&0x80 == 0x80 { + // This is a negative number + notBytes := make([]byte, len(contents)) + for i := range notBytes { + notBytes[i] = ^contents[i] + } + out.SetBytes(notBytes) + out.Add(out, bigOne) + out.Neg(out) + } else { + // Positive number + out.SetBytes(contents) + } + ok = true + return +} + +func parseUint32(in []byte) (uint32, []byte, bool) { + if len(in) < 4 { + return 0, nil, false + } + return binary.BigEndian.Uint32(in), in[4:], true +} + +func parseUint64(in []byte) (uint64, []byte, bool) { + if len(in) < 8 { + return 0, nil, false + } + return binary.BigEndian.Uint64(in), in[8:], true +} + +func intLength(n *big.Int) int { + length := 4 /* length bytes */ + if n.Sign() < 0 { + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bitLen := nMinus1.BitLen() + if bitLen%8 == 0 { + // The number will need 0xff padding + length++ + } + length += (bitLen + 7) / 8 + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bitLen := n.BitLen() + if bitLen%8 == 0 { + // The number will need 0x00 padding + length++ + } + length += (bitLen + 7) / 8 + } + + return length +} + +func marshalUint32(to []byte, n uint32) []byte { + binary.BigEndian.PutUint32(to, n) + return to[4:] +} + +func marshalUint64(to []byte, n uint64) []byte { + binary.BigEndian.PutUint64(to, n) + return to[8:] +} + +func marshalInt(to []byte, n *big.Int) []byte { + lengthBytes := to + to = to[4:] + length := 0 + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll subtract 1 and invert. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + to[0] = 0xff + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } else if n.Sign() == 0 { + // A zero is the zero length string + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with a 0x00 in order to + // stop it looking like a negative number. + to[0] = 0 + to = to[1:] + length++ + } + nBytes := copy(to, bytes) + to = to[nBytes:] + length += nBytes + } + + lengthBytes[0] = byte(length >> 24) + lengthBytes[1] = byte(length >> 16) + lengthBytes[2] = byte(length >> 8) + lengthBytes[3] = byte(length) + return to +} + +func writeInt(w io.Writer, n *big.Int) { + length := intLength(n) + buf := make([]byte, length) + marshalInt(buf, n) + w.Write(buf) +} + +func writeString(w io.Writer, s []byte) { + var lengthBytes [4]byte + lengthBytes[0] = byte(len(s) >> 24) + lengthBytes[1] = byte(len(s) >> 16) + lengthBytes[2] = byte(len(s) >> 8) + lengthBytes[3] = byte(len(s)) + w.Write(lengthBytes[:]) + w.Write(s) +} + +func stringLength(n int) int { + return 4 + n +} + +func marshalString(to []byte, s []byte) []byte { + to[0] = byte(len(s) >> 24) + to[1] = byte(len(s) >> 16) + to[2] = byte(len(s) >> 8) + to[3] = byte(len(s)) + to = to[4:] + copy(to, s) + return to[len(s):] +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)) + +// Decode a packet into its corresponding message. +func decode(packet []byte) (interface{}, error) { + var msg interface{} + switch packet[0] { + case msgDisconnect: + msg = new(disconnectMsg) + case msgServiceRequest: + msg = new(serviceRequestMsg) + case msgServiceAccept: + msg = new(serviceAcceptMsg) + case msgKexInit: + msg = new(kexInitMsg) + case msgKexDHInit: + msg = new(kexDHInitMsg) + case msgKexDHReply: + msg = new(kexDHReplyMsg) + case msgUserAuthRequest: + msg = new(userAuthRequestMsg) + case msgUserAuthSuccess: + return new(userAuthSuccessMsg), nil + case msgUserAuthFailure: + msg = new(userAuthFailureMsg) + case msgUserAuthPubKeyOk: + msg = new(userAuthPubKeyOkMsg) + case msgGlobalRequest: + msg = new(globalRequestMsg) + case msgRequestSuccess: + msg = new(globalRequestSuccessMsg) + case msgRequestFailure: + msg = new(globalRequestFailureMsg) + case msgChannelOpen: + msg = new(channelOpenMsg) + case msgChannelData: + msg = new(channelDataMsg) + case msgChannelOpenConfirm: + msg = new(channelOpenConfirmMsg) + case msgChannelOpenFailure: + msg = new(channelOpenFailureMsg) + case msgChannelWindowAdjust: + msg = new(windowAdjustMsg) + case msgChannelEOF: + msg = new(channelEOFMsg) + case msgChannelClose: + msg = new(channelCloseMsg) + case msgChannelRequest: + msg = new(channelRequestMsg) + case msgChannelSuccess: + msg = new(channelRequestSuccessMsg) + case msgChannelFailure: + msg = new(channelRequestFailureMsg) + default: + return nil, unexpectedMessageError(0, packet[0]) + } + if err := Unmarshal(packet, msg); err != nil { + return nil, err + } + return msg, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go new file mode 100644 index 0000000000000000000000000000000000000000..27a527c106bfbd6fd3aad6f8a8d2ceb3300fa8e3 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/mux.go @@ -0,0 +1,330 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "encoding/binary" + "fmt" + "io" + "log" + "sync" + "sync/atomic" +) + +// debugMux, if set, causes messages in the connection protocol to be +// logged. +const debugMux = false + +// chanList is a thread safe channel list. +type chanList struct { + // protects concurrent access to chans + sync.Mutex + + // chans are indexed by the local id of the channel, which the + // other side should send in the PeersId field. + chans []*channel + + // This is a debugging aid: it offsets all IDs by this + // amount. This helps distinguish otherwise identical + // server/client muxes + offset uint32 +} + +// Assigns a channel ID to the given channel. +func (c *chanList) add(ch *channel) uint32 { + c.Lock() + defer c.Unlock() + for i := range c.chans { + if c.chans[i] == nil { + c.chans[i] = ch + return uint32(i) + c.offset + } + } + c.chans = append(c.chans, ch) + return uint32(len(c.chans)-1) + c.offset +} + +// getChan returns the channel for the given ID. +func (c *chanList) getChan(id uint32) *channel { + id -= c.offset + + c.Lock() + defer c.Unlock() + if id < uint32(len(c.chans)) { + return c.chans[id] + } + return nil +} + +func (c *chanList) remove(id uint32) { + id -= c.offset + c.Lock() + if id < uint32(len(c.chans)) { + c.chans[id] = nil + } + c.Unlock() +} + +// dropAll forgets all channels it knows, returning them in a slice. +func (c *chanList) dropAll() []*channel { + c.Lock() + defer c.Unlock() + var r []*channel + + for _, ch := range c.chans { + if ch == nil { + continue + } + r = append(r, ch) + } + c.chans = nil + return r +} + +// mux represents the state for the SSH connection protocol, which +// multiplexes many channels onto a single packet transport. +type mux struct { + conn packetConn + chanList chanList + + incomingChannels chan NewChannel + + globalSentMu sync.Mutex + globalResponses chan interface{} + incomingRequests chan *Request + + errCond *sync.Cond + err error +} + +// When debugging, each new chanList instantiation has a different +// offset. +var globalOff uint32 + +func (m *mux) Wait() error { + m.errCond.L.Lock() + defer m.errCond.L.Unlock() + for m.err == nil { + m.errCond.Wait() + } + return m.err +} + +// newMux returns a mux that runs over the given connection. +func newMux(p packetConn) *mux { + m := &mux{ + conn: p, + incomingChannels: make(chan NewChannel, chanSize), + globalResponses: make(chan interface{}, 1), + incomingRequests: make(chan *Request, chanSize), + errCond: newCond(), + } + if debugMux { + m.chanList.offset = atomic.AddUint32(&globalOff, 1) + } + + go m.loop() + return m +} + +func (m *mux) sendMessage(msg interface{}) error { + p := Marshal(msg) + if debugMux { + log.Printf("send global(%d): %#v", m.chanList.offset, msg) + } + return m.conn.writePacket(p) +} + +func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { + if wantReply { + m.globalSentMu.Lock() + defer m.globalSentMu.Unlock() + } + + if err := m.sendMessage(globalRequestMsg{ + Type: name, + WantReply: wantReply, + Data: payload, + }); err != nil { + return false, nil, err + } + + if !wantReply { + return false, nil, nil + } + + msg, ok := <-m.globalResponses + if !ok { + return false, nil, io.EOF + } + switch msg := msg.(type) { + case *globalRequestFailureMsg: + return false, msg.Data, nil + case *globalRequestSuccessMsg: + return true, msg.Data, nil + default: + return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) + } +} + +// ackRequest must be called after processing a global request that +// has WantReply set. +func (m *mux) ackRequest(ok bool, data []byte) error { + if ok { + return m.sendMessage(globalRequestSuccessMsg{Data: data}) + } + return m.sendMessage(globalRequestFailureMsg{Data: data}) +} + +func (m *mux) Close() error { + return m.conn.Close() +} + +// loop runs the connection machine. It will process packets until an +// error is encountered. To synchronize on loop exit, use mux.Wait. +func (m *mux) loop() { + var err error + for err == nil { + err = m.onePacket() + } + + for _, ch := range m.chanList.dropAll() { + ch.close() + } + + close(m.incomingChannels) + close(m.incomingRequests) + close(m.globalResponses) + + m.conn.Close() + + m.errCond.L.Lock() + m.err = err + m.errCond.Broadcast() + m.errCond.L.Unlock() + + if debugMux { + log.Println("loop exit", err) + } +} + +// onePacket reads and processes one packet. +func (m *mux) onePacket() error { + packet, err := m.conn.readPacket() + if err != nil { + return err + } + + if debugMux { + if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { + log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) + } else { + p, _ := decode(packet) + log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) + } + } + + switch packet[0] { + case msgChannelOpen: + return m.handleChannelOpen(packet) + case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: + return m.handleGlobalPacket(packet) + } + + // assume a channel packet. + if len(packet) < 5 { + return parseError(packet[0]) + } + id := binary.BigEndian.Uint32(packet[1:]) + ch := m.chanList.getChan(id) + if ch == nil { + return fmt.Errorf("ssh: invalid channel %d", id) + } + + return ch.handlePacket(packet) +} + +func (m *mux) handleGlobalPacket(packet []byte) error { + msg, err := decode(packet) + if err != nil { + return err + } + + switch msg := msg.(type) { + case *globalRequestMsg: + m.incomingRequests <- &Request{ + Type: msg.Type, + WantReply: msg.WantReply, + Payload: msg.Data, + mux: m, + } + case *globalRequestSuccessMsg, *globalRequestFailureMsg: + m.globalResponses <- msg + default: + panic(fmt.Sprintf("not a global message %#v", msg)) + } + + return nil +} + +// handleChannelOpen schedules a channel to be Accept()ed. +func (m *mux) handleChannelOpen(packet []byte) error { + var msg channelOpenMsg + if err := Unmarshal(packet, &msg); err != nil { + return err + } + + if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { + failMsg := channelOpenFailureMsg{ + PeersId: msg.PeersId, + Reason: ConnectionFailed, + Message: "invalid request", + Language: "en_US.UTF-8", + } + return m.sendMessage(failMsg) + } + + c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) + c.remoteId = msg.PeersId + c.maxRemotePayload = msg.MaxPacketSize + c.remoteWin.add(msg.PeersWindow) + m.incomingChannels <- c + return nil +} + +func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { + ch, err := m.openChannel(chanType, extra) + if err != nil { + return nil, nil, err + } + + return ch, ch.incomingRequests, nil +} + +func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { + ch := m.newChannel(chanType, channelOutbound, extra) + + ch.maxIncomingPayload = channelMaxPacket + + open := channelOpenMsg{ + ChanType: chanType, + PeersWindow: ch.myWindow, + MaxPacketSize: ch.maxIncomingPayload, + TypeSpecificData: extra, + PeersId: ch.localId, + } + if err := m.sendMessage(open); err != nil { + return nil, err + } + + switch msg := (<-ch.msg).(type) { + case *channelOpenConfirmMsg: + return ch, nil + case *channelOpenFailureMsg: + return nil, &OpenChannelError{msg.Reason, msg.Message} + default: + return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go new file mode 100644 index 0000000000000000000000000000000000000000..77c84d165caa5a7e202989121dab26264f8b5638 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -0,0 +1,491 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bytes" + "errors" + "fmt" + "io" + "net" + "strings" +) + +// The Permissions type holds fine-grained permissions that are +// specific to a user or a specific authentication method for a +// user. Permissions, except for "source-address", must be enforced in +// the server application layer, after successful authentication. The +// Permissions are passed on in ServerConn so a server implementation +// can honor them. +type Permissions struct { + // Critical options restrict default permissions. Common + // restrictions are "source-address" and "force-command". If + // the server cannot enforce the restriction, or does not + // recognize it, the user should not authenticate. + CriticalOptions map[string]string + + // Extensions are extra functionality that the server may + // offer on authenticated connections. Common extensions are + // "permit-agent-forwarding", "permit-X11-forwarding". Lack of + // support for an extension does not preclude authenticating a + // user. + Extensions map[string]string +} + +// ServerConfig holds server specific configuration data. +type ServerConfig struct { + // Config contains configuration shared between client and server. + Config + + hostKeys []Signer + + // NoClientAuth is true if clients are allowed to connect without + // authenticating. + NoClientAuth bool + + // PasswordCallback, if non-nil, is called when a user + // attempts to authenticate using a password. + PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) + + // PublicKeyCallback, if non-nil, is called when a client attempts public + // key authentication. It must return true if the given public key is + // valid for the given user. For example, see CertChecker.Authenticate. + PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) + + // KeyboardInteractiveCallback, if non-nil, is called when + // keyboard-interactive authentication is selected (RFC + // 4256). The client object's Challenge function should be + // used to query the user. The callback may offer multiple + // Challenge rounds. To avoid information leaks, the client + // should be presented a challenge even if the user is + // unknown. + KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) + + // AuthLogCallback, if non-nil, is called to log all authentication + // attempts. + AuthLogCallback func(conn ConnMetadata, method string, err error) + + // ServerVersion is the version identification string to announce in + // the public handshake. + // If empty, a reasonable default is used. + // Note that RFC 4253 section 4.2 requires that this string start with + // "SSH-2.0-". + ServerVersion string +} + +// AddHostKey adds a private key as a host key. If an existing host +// key exists with the same algorithm, it is overwritten. Each server +// config must have at least one host key. +func (s *ServerConfig) AddHostKey(key Signer) { + for i, k := range s.hostKeys { + if k.PublicKey().Type() == key.PublicKey().Type() { + s.hostKeys[i] = key + return + } + } + + s.hostKeys = append(s.hostKeys, key) +} + +// cachedPubKey contains the results of querying whether a public key is +// acceptable for a user. +type cachedPubKey struct { + user string + pubKeyData []byte + result error + perms *Permissions +} + +const maxCachedPubKeys = 16 + +// pubKeyCache caches tests for public keys. Since SSH clients +// will query whether a public key is acceptable before attempting to +// authenticate with it, we end up with duplicate queries for public +// key validity. The cache only applies to a single ServerConn. +type pubKeyCache struct { + keys []cachedPubKey +} + +// get returns the result for a given user/algo/key tuple. +func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { + for _, k := range c.keys { + if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { + return k, true + } + } + return cachedPubKey{}, false +} + +// add adds the given tuple to the cache. +func (c *pubKeyCache) add(candidate cachedPubKey) { + if len(c.keys) < maxCachedPubKeys { + c.keys = append(c.keys, candidate) + } +} + +// ServerConn is an authenticated SSH connection, as seen from the +// server +type ServerConn struct { + Conn + + // If the succeeding authentication callback returned a + // non-nil Permissions pointer, it is stored here. + Permissions *Permissions +} + +// NewServerConn starts a new SSH server with c as the underlying +// transport. It starts with a handshake and, if the handshake is +// unsuccessful, it closes the connection and returns an error. The +// Request and NewChannel channels must be serviced, or the connection +// will hang. +func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { + fullConf := *config + fullConf.SetDefaults() + s := &connection{ + sshConn: sshConn{conn: c}, + } + perms, err := s.serverHandshake(&fullConf) + if err != nil { + c.Close() + return nil, nil, nil, err + } + return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil +} + +// signAndMarshal signs the data with the appropriate algorithm, +// and serializes the result in SSH wire format. +func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { + sig, err := k.Sign(rand, data) + if err != nil { + return nil, err + } + + return Marshal(sig), nil +} + +// handshake performs key exchange and user authentication. +func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { + if len(config.hostKeys) == 0 { + return nil, errors.New("ssh: server has no host keys") + } + + if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if config.ServerVersion != "" { + s.serverVersion = []byte(config.ServerVersion) + } else { + s.serverVersion = []byte(packageVersion) + } + var err error + s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) + if err != nil { + return nil, err + } + + tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) + s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) + + if err := s.transport.waitSession(); err != nil { + return nil, err + } + + // We just did the key change, so the session ID is established. + s.sessionID = s.transport.getSessionID() + + var packet []byte + if packet, err = s.transport.readPacket(); err != nil { + return nil, err + } + + var serviceRequest serviceRequestMsg + if err = Unmarshal(packet, &serviceRequest); err != nil { + return nil, err + } + if serviceRequest.Service != serviceUserAuth { + return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") + } + serviceAccept := serviceAcceptMsg{ + Service: serviceUserAuth, + } + if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { + return nil, err + } + + perms, err := s.serverAuthenticate(config) + if err != nil { + return nil, err + } + s.mux = newMux(s.transport) + return perms, err +} + +func isAcceptableAlgo(algo string) bool { + switch algo { + case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + return true + } + return false +} + +func checkSourceAddress(addr net.Addr, sourceAddrs string) error { + if addr == nil { + return errors.New("ssh: no address known for client, but source-address match required") + } + + tcpAddr, ok := addr.(*net.TCPAddr) + if !ok { + return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) + } + + for _, sourceAddr := range strings.Split(sourceAddrs, ",") { + if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { + if allowedIP.Equal(tcpAddr.IP) { + return nil + } + } else { + _, ipNet, err := net.ParseCIDR(sourceAddr) + if err != nil { + return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) + } + + if ipNet.Contains(tcpAddr.IP) { + return nil + } + } + } + + return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) +} + +func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + sessionID := s.transport.getSessionID() + var cache pubKeyCache + var perms *Permissions + +userAuthLoop: + for { + var userAuthReq userAuthRequestMsg + if packet, err := s.transport.readPacket(); err != nil { + return nil, err + } else if err = Unmarshal(packet, &userAuthReq); err != nil { + return nil, err + } + + if userAuthReq.Service != serviceSSH { + return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) + } + + s.user = userAuthReq.User + perms = nil + authErr := errors.New("no auth passed yet") + + switch userAuthReq.Method { + case "none": + if config.NoClientAuth { + authErr = nil + } + case "password": + if config.PasswordCallback == nil { + authErr = errors.New("ssh: password auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 || payload[0] != 0 { + return nil, parseError(msgUserAuthRequest) + } + payload = payload[1:] + password, payload, ok := parseString(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + perms, authErr = config.PasswordCallback(s, password) + case "keyboard-interactive": + if config.KeyboardInteractiveCallback == nil { + authErr = errors.New("ssh: keyboard-interactive auth not configubred") + break + } + + prompter := &sshClientKeyboardInteractive{s} + perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) + case "publickey": + if config.PublicKeyCallback == nil { + authErr = errors.New("ssh: publickey auth not configured") + break + } + payload := userAuthReq.Payload + if len(payload) < 1 { + return nil, parseError(msgUserAuthRequest) + } + isQuery := payload[0] == 0 + payload = payload[1:] + algoBytes, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + algo := string(algoBytes) + if !isAcceptableAlgo(algo) { + authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) + break + } + + pubKeyData, payload, ok := parseString(payload) + if !ok { + return nil, parseError(msgUserAuthRequest) + } + + pubKey, err := ParsePublicKey(pubKeyData) + if err != nil { + return nil, err + } + + candidate, ok := cache.get(s.user, pubKeyData) + if !ok { + candidate.user = s.user + candidate.pubKeyData = pubKeyData + candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) + if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { + candidate.result = checkSourceAddress( + s.RemoteAddr(), + candidate.perms.CriticalOptions[sourceAddressCriticalOption]) + } + cache.add(candidate) + } + + if isQuery { + // The client can query if the given public key + // would be okay. + if len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + + if candidate.result == nil { + okMsg := userAuthPubKeyOkMsg{ + Algo: algo, + PubKey: pubKeyData, + } + if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { + return nil, err + } + continue userAuthLoop + } + authErr = candidate.result + } else { + sig, payload, ok := parseSignature(payload) + if !ok || len(payload) > 0 { + return nil, parseError(msgUserAuthRequest) + } + // Ensure the public key algo and signature algo + // are supported. Compare the private key + // algorithm name that corresponds to algo with + // sig.Format. This is usually the same, but + // for certs, the names differ. + if !isAcceptableAlgo(sig.Format) { + break + } + signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) + + if err := pubKey.Verify(signedData, sig); err != nil { + return nil, err + } + + authErr = candidate.result + perms = candidate.perms + } + default: + authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) + } + + if config.AuthLogCallback != nil { + config.AuthLogCallback(s, userAuthReq.Method, authErr) + } + + if authErr == nil { + break userAuthLoop + } + + var failureMsg userAuthFailureMsg + if config.PasswordCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "password") + } + if config.PublicKeyCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "publickey") + } + if config.KeyboardInteractiveCallback != nil { + failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") + } + + if len(failureMsg.Methods) == 0 { + return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") + } + + if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { + return nil, err + } + } + + if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { + return nil, err + } + return perms, nil +} + +// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by +// asking the client on the other side of a ServerConn. +type sshClientKeyboardInteractive struct { + *connection +} + +func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { + if len(questions) != len(echos) { + return nil, errors.New("ssh: echos and questions must have equal length") + } + + var prompts []byte + for i := range questions { + prompts = appendString(prompts, questions[i]) + prompts = appendBool(prompts, echos[i]) + } + + if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ + Instruction: instruction, + NumPrompts: uint32(len(questions)), + Prompts: prompts, + })); err != nil { + return nil, err + } + + packet, err := c.transport.readPacket() + if err != nil { + return nil, err + } + if packet[0] != msgUserAuthInfoResponse { + return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) + } + packet = packet[1:] + + n, packet, ok := parseUint32(packet) + if !ok || int(n) != len(questions) { + return nil, parseError(msgUserAuthInfoResponse) + } + + for i := uint32(0); i < n; i++ { + ans, rest, ok := parseString(packet) + if !ok { + return nil, parseError(msgUserAuthInfoResponse) + } + + answers = append(answers, string(ans)) + packet = rest + } + if len(packet) != 0 { + return nil, errors.New("ssh: junk at end of message") + } + + return answers, nil +} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go new file mode 100644 index 0000000000000000000000000000000000000000..17e2aa85c1f9a450a8596cf09de3fb9945bd2861 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/session.go @@ -0,0 +1,627 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +// Session implements an interactive session described in +// "RFC 4254, section 6". + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "sync" +) + +type Signal string + +// POSIX signals as listed in RFC 4254 Section 6.10. +const ( + SIGABRT Signal = "ABRT" + SIGALRM Signal = "ALRM" + SIGFPE Signal = "FPE" + SIGHUP Signal = "HUP" + SIGILL Signal = "ILL" + SIGINT Signal = "INT" + SIGKILL Signal = "KILL" + SIGPIPE Signal = "PIPE" + SIGQUIT Signal = "QUIT" + SIGSEGV Signal = "SEGV" + SIGTERM Signal = "TERM" + SIGUSR1 Signal = "USR1" + SIGUSR2 Signal = "USR2" +) + +var signals = map[Signal]int{ + SIGABRT: 6, + SIGALRM: 14, + SIGFPE: 8, + SIGHUP: 1, + SIGILL: 4, + SIGINT: 2, + SIGKILL: 9, + SIGPIPE: 13, + SIGQUIT: 3, + SIGSEGV: 11, + SIGTERM: 15, +} + +type TerminalModes map[uint8]uint32 + +// POSIX terminal mode flags as listed in RFC 4254 Section 8. +const ( + tty_OP_END = 0 + VINTR = 1 + VQUIT = 2 + VERASE = 3 + VKILL = 4 + VEOF = 5 + VEOL = 6 + VEOL2 = 7 + VSTART = 8 + VSTOP = 9 + VSUSP = 10 + VDSUSP = 11 + VREPRINT = 12 + VWERASE = 13 + VLNEXT = 14 + VFLUSH = 15 + VSWTCH = 16 + VSTATUS = 17 + VDISCARD = 18 + IGNPAR = 30 + PARMRK = 31 + INPCK = 32 + ISTRIP = 33 + INLCR = 34 + IGNCR = 35 + ICRNL = 36 + IUCLC = 37 + IXON = 38 + IXANY = 39 + IXOFF = 40 + IMAXBEL = 41 + ISIG = 50 + ICANON = 51 + XCASE = 52 + ECHO = 53 + ECHOE = 54 + ECHOK = 55 + ECHONL = 56 + NOFLSH = 57 + TOSTOP = 58 + IEXTEN = 59 + ECHOCTL = 60 + ECHOKE = 61 + PENDIN = 62 + OPOST = 70 + OLCUC = 71 + ONLCR = 72 + OCRNL = 73 + ONOCR = 74 + ONLRET = 75 + CS7 = 90 + CS8 = 91 + PARENB = 92 + PARODD = 93 + TTY_OP_ISPEED = 128 + TTY_OP_OSPEED = 129 +) + +// A Session represents a connection to a remote command or shell. +type Session struct { + // Stdin specifies the remote process's standard input. + // If Stdin is nil, the remote process reads from an empty + // bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr specify the remote process's standard + // output and error. + // + // If either is nil, Run connects the corresponding file + // descriptor to an instance of ioutil.Discard. There is a + // fixed amount of buffering that is shared for the two streams. + // If either blocks it may eventually cause the remote + // command to block. + Stdout io.Writer + Stderr io.Writer + + ch Channel // the channel backing this session + started bool // true once Start, Run or Shell is invoked. + copyFuncs []func() error + errors chan error // one send per copyFunc + + // true if pipe method is active + stdinpipe, stdoutpipe, stderrpipe bool + + // stdinPipeWriter is non-nil if StdinPipe has not been called + // and Stdin was specified by the user; it is the write end of + // a pipe connecting Session.Stdin to the stdin channel. + stdinPipeWriter io.WriteCloser + + exitStatus chan error +} + +// SendRequest sends an out-of-band channel request on the SSH channel +// underlying the session. +func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { + return s.ch.SendRequest(name, wantReply, payload) +} + +func (s *Session) Close() error { + return s.ch.Close() +} + +// RFC 4254 Section 6.4. +type setenvRequest struct { + Name string + Value string +} + +// Setenv sets an environment variable that will be applied to any +// command executed by Shell or Run. +func (s *Session) Setenv(name, value string) error { + msg := setenvRequest{ + Name: name, + Value: value, + } + ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: setenv failed") + } + return err +} + +// RFC 4254 Section 6.2. +type ptyRequestMsg struct { + Term string + Columns uint32 + Rows uint32 + Width uint32 + Height uint32 + Modelist string +} + +// RequestPty requests the association of a pty with the session on the remote host. +func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { + var tm []byte + for k, v := range termmodes { + kv := struct { + Key byte + Val uint32 + }{k, v} + + tm = append(tm, Marshal(&kv)...) + } + tm = append(tm, tty_OP_END) + req := ptyRequestMsg{ + Term: term, + Columns: uint32(w), + Rows: uint32(h), + Width: uint32(w * 8), + Height: uint32(h * 8), + Modelist: string(tm), + } + ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) + if err == nil && !ok { + err = errors.New("ssh: pty-req failed") + } + return err +} + +// RFC 4254 Section 6.5. +type subsystemRequestMsg struct { + Subsystem string +} + +// RequestSubsystem requests the association of a subsystem with the session on the remote host. +// A subsystem is a predefined command that runs in the background when the ssh session is initiated +func (s *Session) RequestSubsystem(subsystem string) error { + msg := subsystemRequestMsg{ + Subsystem: subsystem, + } + ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) + if err == nil && !ok { + err = errors.New("ssh: subsystem request failed") + } + return err +} + +// RFC 4254 Section 6.9. +type signalMsg struct { + Signal string +} + +// Signal sends the given signal to the remote process. +// sig is one of the SIG* constants. +func (s *Session) Signal(sig Signal) error { + msg := signalMsg{ + Signal: string(sig), + } + + _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) + return err +} + +// RFC 4254 Section 6.5. +type execMsg struct { + Command string +} + +// Start runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start or Shell. +func (s *Session) Start(cmd string) error { + if s.started { + return errors.New("ssh: session already started") + } + req := execMsg{ + Command: cmd, + } + + ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) + if err == nil && !ok { + err = fmt.Errorf("ssh: command %v failed", cmd) + } + if err != nil { + return err + } + return s.start() +} + +// Run runs cmd on the remote host. Typically, the remote +// server passes cmd to the shell for interpretation. +// A Session only accepts one call to Run, Start, Shell, Output, +// or CombinedOutput. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Run(cmd string) error { + err := s.Start(cmd) + if err != nil { + return err + } + return s.Wait() +} + +// Output runs cmd on the remote host and returns its standard output. +func (s *Session) Output(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + var b bytes.Buffer + s.Stdout = &b + err := s.Run(cmd) + return b.Bytes(), err +} + +type singleWriter struct { + b bytes.Buffer + mu sync.Mutex +} + +func (w *singleWriter) Write(p []byte) (int, error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.b.Write(p) +} + +// CombinedOutput runs cmd on the remote host and returns its combined +// standard output and standard error. +func (s *Session) CombinedOutput(cmd string) ([]byte, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + var b singleWriter + s.Stdout = &b + s.Stderr = &b + err := s.Run(cmd) + return b.b.Bytes(), err +} + +// Shell starts a login shell on the remote host. A Session only +// accepts one call to Run, Start, Shell, Output, or CombinedOutput. +func (s *Session) Shell() error { + if s.started { + return errors.New("ssh: session already started") + } + + ok, err := s.ch.SendRequest("shell", true, nil) + if err == nil && !ok { + return errors.New("ssh: could not start shell") + } + if err != nil { + return err + } + return s.start() +} + +func (s *Session) start() error { + s.started = true + + type F func(*Session) + for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { + setupFd(s) + } + + s.errors = make(chan error, len(s.copyFuncs)) + for _, fn := range s.copyFuncs { + go func(fn func() error) { + s.errors <- fn() + }(fn) + } + return nil +} + +// Wait waits for the remote command to exit. +// +// The returned error is nil if the command runs, has no problems +// copying stdin, stdout, and stderr, and exits with a zero exit +// status. +// +// If the remote server does not send an exit status, an error of type +// *ExitMissingError is returned. If the command completes +// unsuccessfully or is interrupted by a signal, the error is of type +// *ExitError. Other error types may be returned for I/O problems. +func (s *Session) Wait() error { + if !s.started { + return errors.New("ssh: session not started") + } + waitErr := <-s.exitStatus + + if s.stdinPipeWriter != nil { + s.stdinPipeWriter.Close() + } + var copyError error + for _ = range s.copyFuncs { + if err := <-s.errors; err != nil && copyError == nil { + copyError = err + } + } + if waitErr != nil { + return waitErr + } + return copyError +} + +func (s *Session) wait(reqs <-chan *Request) error { + wm := Waitmsg{status: -1} + // Wait for msg channel to be closed before returning. + for msg := range reqs { + switch msg.Type { + case "exit-status": + wm.status = int(binary.BigEndian.Uint32(msg.Payload)) + case "exit-signal": + var sigval struct { + Signal string + CoreDumped bool + Error string + Lang string + } + if err := Unmarshal(msg.Payload, &sigval); err != nil { + return err + } + + // Must sanitize strings? + wm.signal = sigval.Signal + wm.msg = sigval.Error + wm.lang = sigval.Lang + default: + // This handles keepalives and matches + // OpenSSH's behaviour. + if msg.WantReply { + msg.Reply(false, nil) + } + } + } + if wm.status == 0 { + return nil + } + if wm.status == -1 { + // exit-status was never sent from server + if wm.signal == "" { + // signal was not sent either. RFC 4254 + // section 6.10 recommends against this + // behavior, but it is allowed, so we let + // clients handle it. + return &ExitMissingError{} + } + wm.status = 128 + if _, ok := signals[Signal(wm.signal)]; ok { + wm.status += signals[Signal(wm.signal)] + } + } + + return &ExitError{wm} +} + +// ExitMissingError is returned if a session is torn down cleanly, but +// the server sends no confirmation of the exit status. +type ExitMissingError struct{} + +func (e *ExitMissingError) Error() string { + return "wait: remote command exited without exit status or exit signal" +} + +func (s *Session) stdin() { + if s.stdinpipe { + return + } + var stdin io.Reader + if s.Stdin == nil { + stdin = new(bytes.Buffer) + } else { + r, w := io.Pipe() + go func() { + _, err := io.Copy(w, s.Stdin) + w.CloseWithError(err) + }() + stdin, s.stdinPipeWriter = r, w + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.ch, stdin) + if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { + err = err1 + } + return err + }) +} + +func (s *Session) stdout() { + if s.stdoutpipe { + return + } + if s.Stdout == nil { + s.Stdout = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stdout, s.ch) + return err + }) +} + +func (s *Session) stderr() { + if s.stderrpipe { + return + } + if s.Stderr == nil { + s.Stderr = ioutil.Discard + } + s.copyFuncs = append(s.copyFuncs, func() error { + _, err := io.Copy(s.Stderr, s.ch.Stderr()) + return err + }) +} + +// sessionStdin reroutes Close to CloseWrite. +type sessionStdin struct { + io.Writer + ch Channel +} + +func (s *sessionStdin) Close() error { + return s.ch.CloseWrite() +} + +// StdinPipe returns a pipe that will be connected to the +// remote command's standard input when the command starts. +func (s *Session) StdinPipe() (io.WriteCloser, error) { + if s.Stdin != nil { + return nil, errors.New("ssh: Stdin already set") + } + if s.started { + return nil, errors.New("ssh: StdinPipe after process started") + } + s.stdinpipe = true + return &sessionStdin{s.ch, s.ch}, nil +} + +// StdoutPipe returns a pipe that will be connected to the +// remote command's standard output when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StdoutPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StdoutPipe() (io.Reader, error) { + if s.Stdout != nil { + return nil, errors.New("ssh: Stdout already set") + } + if s.started { + return nil, errors.New("ssh: StdoutPipe after process started") + } + s.stdoutpipe = true + return s.ch, nil +} + +// StderrPipe returns a pipe that will be connected to the +// remote command's standard error when the command starts. +// There is a fixed amount of buffering that is shared between +// stdout and stderr streams. If the StderrPipe reader is +// not serviced fast enough it may eventually cause the +// remote command to block. +func (s *Session) StderrPipe() (io.Reader, error) { + if s.Stderr != nil { + return nil, errors.New("ssh: Stderr already set") + } + if s.started { + return nil, errors.New("ssh: StderrPipe after process started") + } + s.stderrpipe = true + return s.ch.Stderr(), nil +} + +// newSession returns a new interactive session on the remote host. +func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { + s := &Session{ + ch: ch, + } + s.exitStatus = make(chan error, 1) + go func() { + s.exitStatus <- s.wait(reqs) + }() + + return s, nil +} + +// An ExitError reports unsuccessful completion of a remote command. +type ExitError struct { + Waitmsg +} + +func (e *ExitError) Error() string { + return e.Waitmsg.String() +} + +// Waitmsg stores the information about an exited remote command +// as reported by Wait. +type Waitmsg struct { + status int + signal string + msg string + lang string +} + +// ExitStatus returns the exit status of the remote command. +func (w Waitmsg) ExitStatus() int { + return w.status +} + +// Signal returns the exit signal of the remote command if +// it was terminated violently. +func (w Waitmsg) Signal() string { + return w.signal +} + +// Msg returns the exit message given by the remote command +func (w Waitmsg) Msg() string { + return w.msg +} + +// Lang returns the language tag. See RFC 3066 +func (w Waitmsg) Lang() string { + return w.lang +} + +func (w Waitmsg) String() string { + str := fmt.Sprintf("Process exited with status %v", w.status) + if w.signal != "" { + str += fmt.Sprintf(" from signal %v", w.signal) + } + if w.msg != "" { + str += fmt.Sprintf(". Reason was: %v", w.msg) + } + return str +} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go new file mode 100644 index 0000000000000000000000000000000000000000..6151241ff08e3fd4d5b9ad6c66159ba044ced239 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -0,0 +1,407 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +// Listen requests the remote peer open a listening socket on +// addr. Incoming connections will be available by calling Accept on +// the returned net.Listener. The listener must be serviced, or the +// SSH connection may hang. +func (c *Client) Listen(n, addr string) (net.Listener, error) { + laddr, err := net.ResolveTCPAddr(n, addr) + if err != nil { + return nil, err + } + return c.ListenTCP(laddr) +} + +// Automatic port allocation is broken with OpenSSH before 6.0. See +// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In +// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, +// rather than the actual port number. This means you can never open +// two different listeners with auto allocated ports. We work around +// this by trying explicit ports until we succeed. + +const openSSHPrefix = "OpenSSH_" + +var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) + +// isBrokenOpenSSHVersion returns true if the given version string +// specifies a version of OpenSSH that is known to have a bug in port +// forwarding. +func isBrokenOpenSSHVersion(versionStr string) bool { + i := strings.Index(versionStr, openSSHPrefix) + if i < 0 { + return false + } + i += len(openSSHPrefix) + j := i + for ; j < len(versionStr); j++ { + if versionStr[j] < '0' || versionStr[j] > '9' { + break + } + } + version, _ := strconv.Atoi(versionStr[i:j]) + return version < 6 +} + +// autoPortListenWorkaround simulates automatic port allocation by +// trying random ports repeatedly. +func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { + var sshListener net.Listener + var err error + const tries = 10 + for i := 0; i < tries; i++ { + addr := *laddr + addr.Port = 1024 + portRandomizer.Intn(60000) + sshListener, err = c.ListenTCP(&addr) + if err == nil { + laddr.Port = addr.Port + return sshListener, err + } + } + return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) +} + +// RFC 4254 7.1 +type channelForwardMsg struct { + addr string + rport uint32 +} + +// ListenTCP requests the remote peer open a listening socket +// on laddr. Incoming connections will be available by calling +// Accept on the returned net.Listener. +func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { + if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { + return c.autoPortListenWorkaround(laddr) + } + + m := channelForwardMsg{ + laddr.IP.String(), + uint32(laddr.Port), + } + // send message + ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) + if err != nil { + return nil, err + } + if !ok { + return nil, errors.New("ssh: tcpip-forward request denied by peer") + } + + // If the original port was 0, then the remote side will + // supply a real port number in the response. + if laddr.Port == 0 { + var p struct { + Port uint32 + } + if err := Unmarshal(resp, &p); err != nil { + return nil, err + } + laddr.Port = int(p.Port) + } + + // Register this forward, using the port number we obtained. + ch := c.forwards.add(*laddr) + + return &tcpListener{laddr, c, ch}, nil +} + +// forwardList stores a mapping between remote +// forward requests and the tcpListeners. +type forwardList struct { + sync.Mutex + entries []forwardEntry +} + +// forwardEntry represents an established mapping of a laddr on a +// remote ssh server to a channel connected to a tcpListener. +type forwardEntry struct { + laddr net.TCPAddr + c chan forward +} + +// forward represents an incoming forwarded tcpip connection. The +// arguments to add/remove/lookup should be address as specified in +// the original forward-request. +type forward struct { + newCh NewChannel // the ssh client channel underlying this forward + raddr *net.TCPAddr // the raddr of the incoming connection +} + +func (l *forwardList) add(addr net.TCPAddr) chan forward { + l.Lock() + defer l.Unlock() + f := forwardEntry{ + addr, + make(chan forward, 1), + } + l.entries = append(l.entries, f) + return f.c +} + +// See RFC 4254, section 7.2 +type forwardedTCPPayload struct { + Addr string + Port uint32 + OriginAddr string + OriginPort uint32 +} + +// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. +func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { + if port == 0 || port > 65535 { + return nil, fmt.Errorf("ssh: port number out of range: %d", port) + } + ip := net.ParseIP(string(addr)) + if ip == nil { + return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) + } + return &net.TCPAddr{IP: ip, Port: int(port)}, nil +} + +func (l *forwardList) handleChannels(in <-chan NewChannel) { + for ch := range in { + var payload forwardedTCPPayload + if err := Unmarshal(ch.ExtraData(), &payload); err != nil { + ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) + continue + } + + // RFC 4254 section 7.2 specifies that incoming + // addresses should list the address, in string + // format. It is implied that this should be an IP + // address, as it would be impossible to connect to it + // otherwise. + laddr, err := parseTCPAddr(payload.Addr, payload.Port) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort) + if err != nil { + ch.Reject(ConnectionFailed, err.Error()) + continue + } + + if ok := l.forward(*laddr, *raddr, ch); !ok { + // Section 7.2, implementations MUST reject spurious incoming + // connections. + ch.Reject(Prohibited, "no forward for address") + continue + } + } +} + +// remove removes the forward entry, and the channel feeding its +// listener. +func (l *forwardList) remove(addr net.TCPAddr) { + l.Lock() + defer l.Unlock() + for i, f := range l.entries { + if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port { + l.entries = append(l.entries[:i], l.entries[i+1:]...) + close(f.c) + return + } + } +} + +// closeAll closes and clears all forwards. +func (l *forwardList) closeAll() { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + close(f.c) + } + l.entries = nil +} + +func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool { + l.Lock() + defer l.Unlock() + for _, f := range l.entries { + if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port { + f.c <- forward{ch, &raddr} + return true + } + } + return false +} + +type tcpListener struct { + laddr *net.TCPAddr + + conn *Client + in <-chan forward +} + +// Accept waits for and returns the next connection to the listener. +func (l *tcpListener) Accept() (net.Conn, error) { + s, ok := <-l.in + if !ok { + return nil, io.EOF + } + ch, incoming, err := s.newCh.Accept() + if err != nil { + return nil, err + } + go DiscardRequests(incoming) + + return &tcpChanConn{ + Channel: ch, + laddr: l.laddr, + raddr: s.raddr, + }, nil +} + +// Close closes the listener. +func (l *tcpListener) Close() error { + m := channelForwardMsg{ + l.laddr.IP.String(), + uint32(l.laddr.Port), + } + + // this also closes the listener. + l.conn.forwards.remove(*l.laddr) + ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) + if err == nil && !ok { + err = errors.New("ssh: cancel-tcpip-forward failed") + } + return err +} + +// Addr returns the listener's network address. +func (l *tcpListener) Addr() net.Addr { + return l.laddr +} + +// Dial initiates a connection to the addr from the remote host. +// The resulting connection has a zero LocalAddr() and RemoteAddr(). +func (c *Client) Dial(n, addr string) (net.Conn, error) { + // Parse the address into host and numeric port. + host, portString, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.ParseUint(portString, 10, 16) + if err != nil { + return nil, err + } + // Use a zero address for local and remote address. + zeroAddr := &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port)) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: zeroAddr, + raddr: zeroAddr, + }, nil +} + +// DialTCP connects to the remote address raddr on the network net, +// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used +// as the local address for the connection. +func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { + if laddr == nil { + laddr = &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } + } + ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) + if err != nil { + return nil, err + } + return &tcpChanConn{ + Channel: ch, + laddr: laddr, + raddr: raddr, + }, nil +} + +// RFC 4254 7.2 +type channelOpenDirectMsg struct { + raddr string + rport uint32 + laddr string + lport uint32 +} + +func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { + msg := channelOpenDirectMsg{ + raddr: raddr, + rport: uint32(rport), + laddr: laddr, + lport: uint32(lport), + } + ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) + if err != nil { + return nil, err + } + go DiscardRequests(in) + return ch, err +} + +type tcpChan struct { + Channel // the backing channel +} + +// tcpChanConn fulfills the net.Conn interface without +// the tcpChan having to hold laddr or raddr directly. +type tcpChanConn struct { + Channel + laddr, raddr net.Addr +} + +// LocalAddr returns the local network address. +func (t *tcpChanConn) LocalAddr() net.Addr { + return t.laddr +} + +// RemoteAddr returns the remote network address. +func (t *tcpChanConn) RemoteAddr() net.Addr { + return t.raddr +} + +// SetDeadline sets the read and write deadlines associated +// with the connection. +func (t *tcpChanConn) SetDeadline(deadline time.Time) error { + if err := t.SetReadDeadline(deadline); err != nil { + return err + } + return t.SetWriteDeadline(deadline) +} + +// SetReadDeadline sets the read deadline. +// A zero value for t means Read will not time out. +// After the deadline, the error from Read will implement net.Error +// with Timeout() == true. +func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} + +// SetWriteDeadline exists to satisfy the net.Conn interface +// but is not implemented by this type. It always returns an error. +func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error { + return errors.New("ssh: tcpChan: deadline not supported") +} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go new file mode 100644 index 0000000000000000000000000000000000000000..f9780e0ae702d155b18a5e305a4dd149c1aa2c72 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/transport.go @@ -0,0 +1,375 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ssh + +import ( + "bufio" + "errors" + "io" + "log" +) + +// debugTransport if set, will print packet types as they go over the +// wire. No message decoding is done, to minimize the impact on timing. +const debugTransport = false + +const ( + gcmCipherID = "aes128-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +// packetConn represents a transport that implements packet based +// operations. +type packetConn interface { + // Encrypt and send a packet of data to the remote peer. + writePacket(packet []byte) error + + // Read a packet from the connection. The read is blocking, + // i.e. if error is nil, then the returned byte slice is + // always non-empty. + readPacket() ([]byte, error) + + // Close closes the write-side of the connection. + Close() error +} + +// transport is the keyingTransport that implements the SSH packet +// protocol. +type transport struct { + reader connectionState + writer connectionState + + bufReader *bufio.Reader + bufWriter *bufio.Writer + rand io.Reader + isClient bool + io.Closer +} + +// packetCipher represents a combination of SSH encryption/MAC +// protocol. A single instance should be used for one direction only. +type packetCipher interface { + // writePacket encrypts the packet and writes it to w. The + // contents of the packet are generally scrambled. + writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error + + // readPacket reads and decrypts a packet of data. The + // returned packet may be overwritten by future calls of + // readPacket. + readPacket(seqnum uint32, r io.Reader) ([]byte, error) +} + +// connectionState represents one side (read or write) of the +// connection. This is necessary because each direction has its own +// keys, and can even have its own algorithms +type connectionState struct { + packetCipher + seqNum uint32 + dir direction + pendingKeyChange chan packetCipher +} + +// prepareKeyChange sets up key material for a keychange. The key changes in +// both directions are triggered by reading and writing a msgNewKey packet +// respectively. +func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { + if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { + return err + } else { + t.reader.pendingKeyChange <- ciph + } + + if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil { + return err + } else { + t.writer.pendingKeyChange <- ciph + } + + return nil +} + +func (t *transport) printPacket(p []byte, write bool) { + if len(p) == 0 { + return + } + who := "server" + if t.isClient { + who = "client" + } + what := "read" + if write { + what = "write" + } + + log.Println(what, who, p[0]) +} + +// Read and decrypt next packet. +func (t *transport) readPacket() (p []byte, err error) { + for { + p, err = t.reader.readPacket(t.bufReader) + if err != nil { + break + } + if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { + break + } + } + if debugTransport { + t.printPacket(p, false) + } + + return p, err +} + +func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { + packet, err := s.packetCipher.readPacket(s.seqNum, r) + s.seqNum++ + if err == nil && len(packet) == 0 { + err = errors.New("ssh: zero length packet") + } + + if len(packet) > 0 { + switch packet[0] { + case msgNewKeys: + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + return nil, errors.New("ssh: got bogus newkeys message.") + } + + case msgDisconnect: + // Transform a disconnect message into an + // error. Since this is lowest level at which + // we interpret message types, doing it here + // ensures that we don't have to handle it + // elsewhere. + var msg disconnectMsg + if err := Unmarshal(packet, &msg); err != nil { + return nil, err + } + return nil, &msg + } + } + + // The packet may point to an internal buffer, so copy the + // packet out here. + fresh := make([]byte, len(packet)) + copy(fresh, packet) + + return fresh, err +} + +func (t *transport) writePacket(packet []byte) error { + if debugTransport { + t.printPacket(packet, true) + } + return t.writer.writePacket(t.bufWriter, t.rand, packet) +} + +func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { + changeKeys := len(packet) > 0 && packet[0] == msgNewKeys + + err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) + if err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + s.seqNum++ + if changeKeys { + select { + case cipher := <-s.pendingKeyChange: + s.packetCipher = cipher + default: + panic("ssh: no key material for msgNewKeys") + } + } + return err +} + +func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { + t := &transport{ + bufReader: bufio.NewReader(rwc), + bufWriter: bufio.NewWriter(rwc), + rand: rand, + reader: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + writer: connectionState{ + packetCipher: &streamPacketCipher{cipher: noneCipher{}}, + pendingKeyChange: make(chan packetCipher, 1), + }, + Closer: rwc, + } + t.isClient = isClient + + if isClient { + t.reader.dir = serverKeys + t.writer.dir = clientKeys + } else { + t.reader.dir = clientKeys + t.writer.dir = serverKeys + } + + return t +} + +type direction struct { + ivTag []byte + keyTag []byte + macKeyTag []byte +} + +var ( + serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} + clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} +) + +// generateKeys generates key material for IV, MAC and encryption. +func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { + cipherMode := cipherModes[algs.Cipher] + macMode := macModes[algs.MAC] + + iv = make([]byte, cipherMode.ivSize) + key = make([]byte, cipherMode.keySize) + macKey = make([]byte, macMode.keySize) + + generateKeyMaterial(iv, d.ivTag, kex) + generateKeyMaterial(key, d.keyTag, kex) + generateKeyMaterial(macKey, d.macKeyTag, kex) + return +} + +// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as +// described in RFC 4253, section 6.4. direction should either be serverKeys +// (to setup server->client keys) or clientKeys (for client->server keys). +func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { + iv, key, macKey := generateKeys(d, algs, kex) + + if algs.Cipher == gcmCipherID { + return newGCMCipher(iv, key, macKey) + } + + if algs.Cipher == aes128cbcID { + return newAESCBCCipher(iv, key, macKey, algs) + } + + if algs.Cipher == tripledescbcID { + return newTripleDESCBCCipher(iv, key, macKey, algs) + } + + c := &streamPacketCipher{ + mac: macModes[algs.MAC].new(macKey), + etm: macModes[algs.MAC].etm, + } + c.macResult = make([]byte, c.mac.Size()) + + var err error + c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) + if err != nil { + return nil, err + } + + return c, nil +} + +// generateKeyMaterial fills out with key material generated from tag, K, H +// and sessionId, as specified in RFC 4253, section 7.2. +func generateKeyMaterial(out, tag []byte, r *kexResult) { + var digestsSoFar []byte + + h := r.Hash.New() + for len(out) > 0 { + h.Reset() + h.Write(r.K) + h.Write(r.H) + + if len(digestsSoFar) == 0 { + h.Write(tag) + h.Write(r.SessionID) + } else { + h.Write(digestsSoFar) + } + + digest := h.Sum(nil) + n := copy(out, digest) + out = out[n:] + if len(out) > 0 { + digestsSoFar = append(digestsSoFar, digest...) + } + } +} + +const packageVersion = "SSH-2.0-Go" + +// Sends and receives a version line. The versionLine string should +// be US ASCII, start with "SSH-2.0-", and should not include a +// newline. exchangeVersions returns the other side's version line. +func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { + // Contrary to the RFC, we do not ignore lines that don't + // start with "SSH-2.0-" to make the library usable with + // nonconforming servers. + for _, c := range versionLine { + // The spec disallows non US-ASCII chars, and + // specifically forbids null chars. + if c < 32 { + return nil, errors.New("ssh: junk character in version line") + } + } + if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { + return + } + + them, err = readVersion(rw) + return them, err +} + +// maxVersionStringBytes is the maximum number of bytes that we'll +// accept as a version string. RFC 4253 section 4.2 limits this at 255 +// chars +const maxVersionStringBytes = 255 + +// Read version string as specified by RFC 4253, section 4.2. +func readVersion(r io.Reader) ([]byte, error) { + versionString := make([]byte, 0, 64) + var ok bool + var buf [1]byte + + for len(versionString) < maxVersionStringBytes { + _, err := io.ReadFull(r, buf[:]) + if err != nil { + return nil, err + } + // The RFC says that the version should be terminated with \r\n + // but several SSH servers actually only send a \n. + if buf[0] == '\n' { + ok = true + break + } + + // non ASCII chars are disallowed, but we are lenient, + // since Go doesn't use null-terminated strings. + + // The RFC allows a comment after a space, however, + // all of it (version and comments) goes into the + // session hash. + versionString = append(versionString, buf[0]) + } + + if !ok { + return nil, errors.New("ssh: overflow reading version string") + } + + // There might be a '\r' on the end which we should remove. + if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { + versionString = versionString[:len(versionString)-1] + } + return versionString, nil +} diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6a66aea5eafe0ca6a688840c47219556c552488e --- /dev/null +++ b/vendor/golang.org/x/net/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS new file mode 100644 index 0000000000000000000000000000000000000000..733099041f84fa1e58611ab2e11af51c1f26d1d2 --- /dev/null +++ b/vendor/golang.org/x/net/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000000000000000000000000000000000000..f143ed6a1eb064088a860b89b1939acef1dc3b12 --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,156 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000000000000000000000000000000000000..606cf1f9726213a2c2aa1bec11b1c59e6921d50d --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go new file mode 100644 index 0000000000000000000000000000000000000000..926870cc23fd642e319cabce8438750e0c6df677 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go @@ -0,0 +1,147 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000000000000000000000000000000000000..d20f52b7de93f81675cb405bd4309af9ee61d2d1 --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000000000000000000000000000000000000..0f35592df51885abc1eb01b2e58318590df65874 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md new file mode 100644 index 0000000000000000000000000000000000000000..07f3e66bf67ab95d9703d9e2c6cf005510d6f3ee --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-3.0.md @@ -0,0 +1,363 @@ +# Elastic 3.0 + +Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. + +We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. + +So, to summarize: + +1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. +2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. + +The rest of the document is a list of all changes in Elastic 3.0. + +## Pointer types + +All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(&q).Do() // notice the & here +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +res, err := elastic.Search("one").Query(q).Do() // no more & +// ... which can be simplified as: +res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() +``` + +It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). + +## Query/filter merge + +One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). + +The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! + +Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. + +Example for Elastic 2.0 (old): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermFilter("tag", "important") +res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewMatchAllQuery() +f := elastic.NewTermQuery("tag", "important") // it's a query now! +res, err := elastic.Search().Index("one").Query(q).PostFilter(f) +``` + +## Facets are removed + +[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. + +## Errors + +Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. + +Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). + +### HTTP Status 404 (Not Found) + +When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. + +Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. + +To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). + +The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. + +Example for Elastic 2.0 (old): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + // Something else went wrong (but 404 is NOT an error in Elastic 2.0) +} +if !res.Found { + // Document has not been found +} +``` + +Example for Elastic 3.0 (new): + +```go +res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() +if err != nil { + if elastic.IsNotFound(err) { + // Document has not been found + } else { + // Something else went wrong + } +} +``` + +### HTTP Status 408 (Timeouts) + +Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. + +To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. + +Example for Elastic 2.0 (old): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if err != nil { + // ... +} +if health.TimedOut { + // We have a timeout +} +``` + +Example for Elastic 3.0 (new): + +```go +health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() +if elastic.IsTimeout(err) { + // We have a timeout +} +``` + +### Bulk Errors + +The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. +In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. +These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). + +### Removed specific Elastic errors + +The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. + +## Numeric types + +Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. + +## Pluralization + +Some services accept zero, one or more indices or types to operate on. +E.g. in the `SearchService` accepts a list of zero, one, or more indices to +search and therefor had a func called `Index(index string)` and a func +called `Indices(indices ...string)`. + +Elastic 3.0 now only uses the singular form that, when applicable, accepts a +variadic type. E.g. in the case of the `SearchService`, you now only have +one func with the following signature: `Index(indices ...string)`. + +Notice this is only limited to `Index(...)` and `Type(...)`. There are other +services with variadic functions. These have not been changed. + +## Multiple calls to variadic functions + +Some services with variadic functions have cleared the underlying slice when +called while other services just add to the existing slice. This has now been +normalized to always add to the underlying slice. + +Example for Elastic 2.0 (old): + +```go +// Would only cleared scroll id "two" +// because ScrollId cleared the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +Example for Elastic 3.0 (new): + +```go +// Now (correctly) clears both scroll id "one" and "two" +// because ScrollId no longer clears the values when called multiple times +client.ClearScroll().ScrollId("one").ScrollId("two").Do() +``` + +## Ping service requires URL + +The `Ping` service raised some issues because it is different from all +other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. + +Users expected to ping the cluster, but that is not possible as the cluster +can be a set of many nodes: So which node do we ping then? + +To make it more clear, the `Ping` function on the client now requires users +to explicitly set the URL of the node to ping. + +## Meta fields + +Many of the meta fields e.g. `_parent` or `_routing` are now +[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) +and are no longer returned as parts of the `fields` object. We had to change +larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. + +Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). + +## HasParentQuery / HasChildQuery + +`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. + +Example for Elastic 2.0 (old): + +```go +allQ := elastic.NewMatchAllQuery() +q := elastic.NewHasChildFilter("tweet").Query(&allQ) +``` + +Example for Elastic 3.0 (new): + +```go +q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) +``` + +## SetBasicAuth client option + +You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). + +Example: + +```go +client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) +if err != nil { + log.Fatal(err) +} +``` + +## Delete-by-Query API + +The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. + +An older version of this document stated the following: + +> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. +> +> Example for Elastic 3.0 (new): +> +> ```go +> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() +> if err == elastic.ErrPluginNotFound { +> // Delete By Query API is not available +> } +> ``` + +I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. + +If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. + +## HasPlugin and SetRequiredPlugins + +Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). + +You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. + +Example for Elastic 3.0 (new): + +```go +err, found := client.HasPlugin("delete-by-query") +if err == nil && found { + // ... Delete By Query API is available +} +``` + +To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. + +```go +// Will raise an error if the "delete-by-query" plugin is NOT installed +client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) +if err != nil { + log.Fatal(err) +} +``` + +Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. + +## Common Query has been renamed to Common Terms Query + +The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). + +## Remove `MoreLikeThis` and `MoreLikeThisField` + +The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. + +## Remove Filtered Query + +With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). + +## Remove FuzzyLikeThis and FuzzyLikeThisField + +Both have been removed from Elasticsearch 2.0 as well. + +## Remove LimitFilter + +The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. + +## Remove `_cache` and `_cache_key` from filters + +Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). + +## Partial fields are gone + +Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). + +## Scripting + +A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. + +Example for Elastic 2.0 (old): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script("ctx._source.retweets += num"). + ScriptParams(map[string]interface{}{"num": 1}). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +Example for Elastic 3.0 (new): + +```go +update, err := client.Update().Index("twitter").Type("tweet").Id("1"). + Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). + Upsert(map[string]interface{}{"retweets": 0}). + Do() +``` + +## Cluster State + +The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. + +## Unexported structs in response + +Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. + +## Add offset to Histogram aggregation + +Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. + +## Services + +### REST API specification + +As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. + +Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. + +This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. + +At the same time, the file names of the services are renamed to match the REST API specification naming. + +### REST API Test Suite + +The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. + +This process in not completed though. + + diff --git a/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md new file mode 100644 index 0000000000000000000000000000000000000000..161c6a1cedf38aded6b7029bda30bdb8d80f590b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CHANGELOG-5.0.md @@ -0,0 +1,195 @@ +# Changes in Elastic 5.0 + +## Enforce context.Context in PerformRequest and Do + +We enforce the usage of `context.Context` everywhere you execute a request. +You need to change all your `Do()` calls to pass a context: `Do(ctx)`. +This enables automatic request cancelation and many other patterns. + +If you don't need this, simply pass `context.TODO()` or `context.Background()`. + +## Warmers removed + +Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers). + +## Optimize removed + +Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed). +Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead. + +## Missing Query removed + +The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query). +Use `exists` query with `must_not` in `bool` query instead. + +## And Query removed + +The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `must` clauses in a `bool` query instead. + +## Not Query removed + +TODO Is it removed? + +## Or Query removed + +The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `should` clauses in a `bool` query instead. + +## Filtered Query removed + +The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use `bool` query instead, which supports `filter` clauses too. + +## Limit Query removed + +The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). +Use the `terminate_after` parameter instead. + +# Template Query removed + +The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use +Search Templates instead. + +We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity +to get rid of old stuff. + +## `_timestamp` and `_ttl` removed + +Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal). + +## Search template Put/Delete API returns `acknowledged` only + +The response type for Put/Delete search templates has changed. +It only returns a single `acknowledged` flag now. + +## Fields has been renamed to Stored Fields + +The `fields` parameter has been renamed to `stored_fields`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter). + +## Fielddatafields has been renamed to Docvaluefields + +The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter) +to `docvalue_fields`. + +## Type exists endpoint changed + +The endpoint for checking whether a type exists has been changed from +`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`. +See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal). + +## Refresh parameter changed + +The `?refresh` parameter previously could be a boolean value. It indicated +whether changes made by a request (e.g. by the Bulk API) should be immediately +visible in search, or not. Using `refresh=true` had the positive effect of +immediately seeing the changes when searching; the negative effect is that +it is a rather big performance hit. + +With 5.0, you now have the choice between these 3 values. + +* `"true"` - Refresh immediately +* `"false"` - Do not refresh (the default value) +* `"wait_for"` - Wait until ES made the document visible in search + +See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation. + +Notice that `true` and `false` (the boolean values) are no longer available +now in Elastic. You must use a string instead, with one of the above values. + +## ReindexerService removed + +The `ReindexerService` was a custom solution that was started in the ES 1.x era +to automate reindexing data, from one index to another or even between clusters. + +ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html) +so we're going to remove our custom solution and ask you to use the native reindexer. + +The `ReindexService` is available via `client.Reindex()` (which used to point +to the custom reindexer). + +## Delete By Query back in core + +The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html) +was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API. + +It has it's own endpoint at `/_delete_by_query`. + +Delete By Query, Reindex, and Update By Query are very similar under the hood. + +## Reindex, Delete By Query, and Update By Query response changed + +The response from the above APIs changed a bit. E.g. the `retries` value +used to be an `int64` and returns separate values for `bulk` and `search` now: + +``` +// Old +{ + ... + "retries": 123, + ... +} +``` + +``` +// New +{ + ... + "retries": { + "bulk": 123, + "search": 0 + }, + ... +} +``` + +## ScanService removed + +The `ScanService` is removed. Use the (new) `ScrollService` instead. + +## New ScrollService + +There was confusion around `ScanService` and `ScrollService` doing basically +the same. One was returning slices and didn't support all query details, the +other returned one document after another and wasn't safe for concurrent use. +So we merged the two and merged it into a new `ScrollService` that +removes all the problems with the older services. + +In other words: +If you used `ScanService`, switch to `ScrollService`. +If you used the old `ScrollService`, you might need to fix some things but +overall it should just work. + +Changes: +- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll". + +TODO Not implemented yet + +## Suggesters + +They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html). + +Some changes: +- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing). + +TODO Fix all structural changes in suggesters + +## Percolator + +Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html). + +Elastic 5.0 adds the new +[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html) +which can be used in combination with the new +[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html). + +The Percolate service is removed from Elastic 5.0. + +## Remove Consistency, add WaitForActiveShards + +The `consistency` parameter has been removed in a lot of places, e.g. the Bulk, +Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API. + +It has been replaced by a somewhat similar `wait_for_active_shards` parameter. +See https://github.com/elastic/elasticsearch/pull/19454. diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..4fbc79dd0d7e2cedfa1f0544bbb51a93b4a4fb73 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTING.md @@ -0,0 +1,40 @@ +# How to contribute + +Elastic is an open-source project and we are looking forward to each +contribution. + +Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level +overview of the features of Elasticsearch. However, Elastic tries to resemble +the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). + +This explains why you might think that some options are strange or missing +in Elastic, while often they're just different. Please check the Java API first. + +Having said that: Elasticsearch is moving fast and it might be very likely +that we missed some features or changes. Feel free to change that. + +## Your Pull Request + +To make it easy to review and understand your changes, please keep the +following things in mind before submitting your pull request: + +* You compared the existing implemenation with the Java API, did you? +* Please work on the latest possible state of `olivere/elastic`. + Use `release-branch.v2` for targeting Elasticsearch 1.x and + `release-branch.v3` for targeting 2.x. +* Create a branch dedicated to your change. +* If possible, write a test case which confirms your change. +* Make sure your changes and your tests work with all recent versions of + Elasticsearch. We currently support Elasticsearch 1.7.x in the + release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. +* Test your changes before creating a pull request (`go test ./...`). +* Don't mix several features or bug fixes in one pull request. +* Create a meaningful commit message. +* Explain your change, e.g. provide a link to the issue you are fixing and + probably a link to the Elasticsearch documentation and/or source code. +* Format your source with `go fmt`. + +## Additional Resources + +* [GitHub documentation](http://help.github.com/) +* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) diff --git a/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS new file mode 100644 index 0000000000000000000000000000000000000000..2155610dd6acdc95a77b886d2719e0320943af7e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/CONTRIBUTORS @@ -0,0 +1,84 @@ +# This is a list of people who have contributed code +# to the Elastic repository. +# +# It is just my small "thank you" to all those that helped +# making Elastic what it is. +# +# Please keep this list sorted. + +0x6875790d0a [@huydx](https://github.com/huydx) +Adam Alix [@adamalix](https://github.com/adamalix) +Adam Weiner [@adamweiner](https://github.com/adamweiner) +Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu) +Alex [@akotlar](https://github.com/akotlar) +Alexandre Olivier [@aliphen](https://github.com/aliphen) +Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) +Andrew Gaul [@andrewgaul](https://github.com/andrewgaul) +Benjamin Fernandes [@LotharSee](https://github.com/LotharSee) +Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux) +Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) +Brady Love [@bradylove](https://github.com/bradylove) +Bryan Conklin [@bmconklin](https://github.com/bmconklin) +Bruce Zhou [@brucez-isell](https://github.com/brucez-isell) +Chris M [@tebriel](https://github.com/tebriel) +Christophe Courtaut [@kri5](https://github.com/kri5) +Conrad Pankoff [@deoxxa](https://github.com/deoxxa) +Corey Scott [@corsc](https://github.com/corsc) +Daniel Barrett [@shendaras](https://github.com/shendaras) +Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) +Daniel Imfeld [@dimfeld](https://github.com/dimfeld) +Dwayne Schultz [@myshkin5](https://github.com/myshkin5) +Ellison Leão [@ellisonleao](https://github.com/ellisonleao) +Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov) +Faolan C-P [@fcheslack](https://github.com/fcheslack) +Gerhard Häring [@ghaering](https://github.com/ghaering) +Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) +Guillaume J. Charmes [@creack](https://github.com/creack) +Guiseppe [@gm42](https://github.com/gm42) +Han Yu [@MoonighT](https://github.com/MoonighT) +Harrison Wright [@wright8191](https://github.com/wright8191) +Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) +initialcontext [@initialcontext](https://github.com/initialcontext) +Isaac Saldana [@isaldana](https://github.com/isaldana) +Jack Lindamood [@cep21](https://github.com/cep21) +Jacob [@jdelgad](https://github.com/jdelgad) +Jayme Rotsaert [@jrots](https://github.com/jrots) +Jeremy Canady [@jrmycanady](https://github.com/jrmycanady) +Joe Buck [@four2five](https://github.com/four2five) +John Barker [@j16r](https://github.com/j16r) +John Goodall [@jgoodall](https://github.com/jgoodall) +John Stanford [@jxstanford](https://github.com/jxstanford) +jun [@coseyo](https://github.com/coseyo) +Junpei Tsuji [@jun06t](https://github.com/jun06t) +Kenta SUZUKI [@suzuken](https://github.com/suzuken) +Kyle Brandt [@kylebrandt](https://github.com/kylebrandt) +Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) +Mara Kim [@autochthe](https://github.com/autochthe) +Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato) +Mark Costello [@mcos](https://github.com/mcos) +Medhi Bechina [@mdzor](https://github.com/mdzor) +mosa [@mosasiru](https://github.com/mosasiru) +naimulhaider [@naimulhaider](https://github.com/naimulhaider) +navins [@ishare](https://github.com/ishare) +Naoya Tsutsumi [@tutuming](https://github.com/tutuming) +Nicholas Wolff [@nwolff](https://github.com/nwolff) +Nick Whyte [@nickw444](https://github.com/nickw444) +Orne Brocaar [@brocaar](https://github.com/brocaar) +Radoslaw Wesolowski [r--w](https://github.com/r--w) +Ryan Schmukler [@rschmukler](https://github.com/rschmukler) +Sacheendra talluri [@sacheendra](https://github.com/sacheendra) +Sean DuBois [@Sean-Der](https://github.com/Sean-Der) +Shalin LK [@shalinlk](https://github.com/shalinlk) +Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) +Stuart Warren [@Woz](https://github.com/stuart-warren) +Sulaiman [@salajlan](https://github.com/salajlan) +Sundar [@sundarv85](https://github.com/sundarv85) +Take [ww24](https://github.com/ww24) +Tetsuya Morimoto [@t2y](https://github.com/t2y) +TimeEmit [@TimeEmit](https://github.com/timeemit) +TusharM [@tusharm](https://github.com/tusharm) +wolfkdy [@wolfkdy](https://github.com/wolfkdy) +Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb) +Yarden Bar [@ayashjorden](https://github.com/ayashjorden) +zakthomas [@zakthomas](https://github.com/zakthomas) +singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) diff --git a/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000000000000000000000000000000000..c5eb690a752374b9ac22fa01b1d0e842290b48be --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ISSUE_TEMPLATE.md @@ -0,0 +1,17 @@ +Please use the following questions as a guideline to help me answer +your issue/question without further inquiry. Thank you. + +### Which version of Elastic are you using? + +[ ] elastic.v2 (for Elasticsearch 1.x) +[ ] elastic.v3 (for Elasticsearch 2.x) +[ ] elastic.v5 (for Elasticsearch 5.x) + +### Please describe the expected behavior + + +### Please describe the actual behavior + + +### Any steps to reproduce the behavior? + diff --git a/vendor/gopkg.in/olivere/elastic.v5/LICENSE b/vendor/gopkg.in/olivere/elastic.v5/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..8b22cdb6014c39ef4cd055215e9df74e86192f67 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) +Copyright © 2012-2015 Oliver Eilhard + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the “Softwareâ€), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS ISâ€, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v5/README.md b/vendor/gopkg.in/olivere/elastic.v5/README.md new file mode 100644 index 0000000000000000000000000000000000000000..09d7bbd3de1bcd2cca6d3981d2cfe1ab84d793cf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/README.md @@ -0,0 +1,461 @@ +# Elastic + +Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the +[Go](http://www.golang.org/) programming language. + +[](https://travis-ci.org/olivere/elastic) +[](http://godoc.org/gopkg.in/olivere/elastic.v5) +[](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) + +See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. + + +## Releases + +**The release branches (e.g. [`release-branch.v5`](https://github.com/olivere/elastic/tree/release-branch.v5)) +are actively being worked on and can break at any time. +If you want to use stable versions of Elastic, please use the packages released via [gopkg.in](https://gopkg.in).** + +Here's the version matrix: + +Elasticsearch version | Elastic version -| Package URL +----------------------|------------------|------------ +5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) +2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) +1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) +0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) + +**Example:** + +You have installed Elasticsearch 5.0.0 and want to use Elastic. +As listed above, you should use Elastic 5.0. +So you first install the stable release of Elastic 5.0 from gopkg.in. + +```sh +$ go get gopkg.in/olivere/elastic.v5 +``` + +You then import it with this import path: + +```go +import elastic "gopkg.in/olivere/elastic.v5" +``` + +### Elastic 5.0 + +Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was +[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released). + +Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html) +and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md) +as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x). + +Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack. + +### Elastic 3.0 + +Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3). + +Elastic 3.0 will only get critical bug fixes. You should update to a recent version. + +### Elastic 2.0 + +Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). + +Elastic 2.0 will only get critical bug fixes. You should update to a recent version. + +### Elastic 1.0 + +Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic +to a recent version. + +However, if you cannot update for some reason, don't worry. Version 1.0 is +still available. All you need to do is go-get it and change your import path +as described above. + + +## Status + +We use Elastic in production since 2012. Elastic is stable but the API changes +now and then. We strive for API compatibility. +However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) +and we sometimes have to adapt. + +Having said that, there have been no big API changes that required you +to rewrite your application big time. More often than not it's renaming APIs +and adding/removing features so that Elastic is in sync with Elasticsearch. + +Elastic has been used in production with the following Elasticsearch versions: +0.90, 1.0-1.7, and 2.0-2.4.1. Furthermore, we use [Travis CI](https://travis-ci.org/) +to test Elastic with the most recent versions of Elasticsearch and Go. +See the [.travis.yml](https://github.com/olivere/elastic/blob/master/.travis.yml) +file for the exact matrix and [Travis](https://travis-ci.org/olivere/elastic) +for the results. + +Elasticsearch has quite a few features. Most of them are implemented +by Elastic. I add features and APIs as required. It's straightforward +to implement missing pieces. I'm accepting pull requests :-) + +Having said that, I hope you find the project useful. + + +## Getting Started + +The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). +The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. + +You typically create one client for your app. Here's a complete example of +creating a client, creating an index, adding a document, executing a search etc. + +```go +// Create a context +ctx := context.Background() + +// Create a client +client, err := elastic.NewClient() +if err != nil { + // Handle error + panic(err) +} + +// Create an index +_, err = client.CreateIndex("twitter").Do(ctx) +if err != nil { + // Handle error + panic(err) +} + +// Add a document to the index +tweet := Tweet{User: "olivere", Message: "Take Five"} +_, err = client.Index(). + Index("twitter"). + Type("tweet"). + Id("1"). + BodyJson(tweet). + Refresh("true"). + Do(ctx) +if err != nil { + // Handle error + panic(err) +} + +// Search with a term query +termQuery := elastic.NewTermQuery("user", "olivere") +searchResult, err := client.Search(). + Index("twitter"). // search in index "twitter" + Query(termQuery). // specify the query + Sort("user", true). // sort by "user" field, ascending + From(0).Size(10). // take documents 0-9 + Pretty(true). // pretty print request and response JSON + Do(ctx) // execute +if err != nil { + // Handle error + panic(err) +} + +// searchResult is of type SearchResult and returns hits, suggestions, +// and all kinds of other information from Elasticsearch. +fmt.Printf("Query took %d milliseconds\n", searchResult.TookInMillis) + +// Each is a convenience function that iterates over hits in a search result. +// It makes sure you don't need to check for nil values in the response. +// However, it ignores errors in serialization. If you want full control +// over iterating the hits, see below. +var ttyp Tweet +for _, item := range searchResult.Each(reflect.TypeOf(ttyp)) { + if t, ok := item.(Tweet); ok { + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} +// TotalHits is another convenience function that works even when something goes wrong. +fmt.Printf("Found a total of %d tweets\n", searchResult.TotalHits()) + +// Here's how you iterate through results with full control over each step. +if searchResult.Hits.TotalHits > 0 { + fmt.Printf("Found a total of %d tweets\n", searchResult.Hits.TotalHits) + + // Iterate through results + for _, hit := range searchResult.Hits.Hits { + // hit.Index contains the name of the index + + // Deserialize hit.Source into a Tweet (could also be just a map[string]interface{}). + var t Tweet + err := json.Unmarshal(*hit.Source, &t) + if err != nil { + // Deserialization failed + } + + // Work with tweet + fmt.Printf("Tweet by %s: %s\n", t.User, t.Message) + } +} else { + // No hits + fmt.Print("Found no tweets\n") +} + +// Delete the index again +_, err = client.DeleteIndex("twitter").Do(ctx) +if err != nil { + // Handle error + panic(err) +} +``` + +Here's a [link to a complete working example](https://gist.github.com/olivere/114347ff9d9cfdca7bdc0ecea8b82263). + +See the [wiki](https://github.com/olivere/elastic/wiki) for more details. + + +## API Status + +### Document APIs + +- [x] Index API +- [x] Get API +- [x] Delete API +- [x] Delete By Query API +- [x] Update API +- [x] Update By Query API +- [x] Multi Get API +- [x] Bulk API +- [x] Reindex API +- [x] Term Vectors +- [x] Multi termvectors API + +### Search APIs + +- [x] Search +- [x] Search Template +- [ ] Multi Search Template +- [ ] Search Shards API +- [x] Suggesters + - [x] Term Suggester + - [x] Phrase Suggester + - [x] Completion Suggester + - [x] Context Suggester +- [x] Multi Search API +- [x] Count API +- [ ] Search Exists API +- [ ] Validate API +- [x] Explain API +- [ ] Profile API +- [x] Field Stats API + +### Aggregations + +- Metrics Aggregations + - [x] Avg + - [x] Cardinality + - [x] Extended Stats + - [x] Geo Bounds + - [ ] Geo Centroid + - [x] Max + - [x] Min + - [x] Percentiles + - [x] Percentile Ranks + - [ ] Scripted Metric + - [x] Stats + - [x] Sum + - [x] Top Hits + - [x] Value Count +- Bucket Aggregations + - [x] Children + - [x] Date Histogram + - [x] Date Range + - [x] Filter + - [x] Filters + - [x] Geo Distance + - [ ] GeoHash Grid + - [x] Global + - [x] Histogram + - [x] IP Range + - [x] Missing + - [x] Nested + - [x] Range + - [x] Reverse Nested + - [x] Sampler + - [x] Significant Terms + - [x] Terms +- Pipeline Aggregations + - [x] Avg Bucket + - [x] Derivative + - [x] Max Bucket + - [x] Min Bucket + - [x] Sum Bucket + - [x] Stats Bucket + - [ ] Extended Stats Bucket + - [ ] Percentiles Bucket + - [x] Moving Average + - [x] Cumulative Sum + - [x] Bucket Script + - [x] Bucket Selector + - [x] Serial Differencing +- [ ] Matrix Aggregations + - [ ] Matrix Stats +- [x] Aggregation Metadata + +### Indices APIs + +- [x] Create Index +- [x] Delete Index +- [x] Get Index +- [x] Indices Exists +- [x] Open / Close Index +- [x] Shrink Index +- [ ] Rollover Index +- [x] Put Mapping +- [x] Get Mapping +- [ ] Get Field Mapping +- [ ] Types Exists +- [x] Index Aliases +- [x] Update Indices Settings +- [x] Get Settings +- [x] Analyze +- [x] Index Templates +- [ ] Shadow Replica Indices +- [x] Indices Stats +- [ ] Indices Segments +- [ ] Indices Recovery +- [ ] Indices Shard Stores +- [ ] Clear Cache +- [x] Flush +- [x] Refresh +- [x] Force Merge +- [ ] Upgrade + +### cat APIs + +The cat APIs are not implemented as of now. We think they are better suited for operating with Elasticsearch on the command line. + +- [ ] cat aliases +- [ ] cat allocation +- [ ] cat count +- [ ] cat fielddata +- [ ] cat health +- [ ] cat indices +- [ ] cat master +- [ ] cat nodeattrs +- [ ] cat nodes +- [ ] cat pending tasks +- [ ] cat plugins +- [ ] cat recovery +- [ ] cat repositories +- [ ] cat thread pool +- [ ] cat shards +- [ ] cat segments +- [ ] cat snapshots + +### Cluster APIs + +- [x] Cluster Health +- [x] Cluster State +- [x] Cluster Stats +- [ ] Pending Cluster Tasks +- [ ] Cluster Reroute +- [ ] Cluster Update Settings +- [x] Nodes Stats +- [x] Nodes Info +- [x] Task Management API +- [ ] Nodes hot_threads +- [ ] Cluster Allocation Explain API + +### Query DSL + +- [x] Match All Query +- [x] Inner hits +- Full text queries + - [x] Match Query + - [x] Match Phrase Query + - [x] Match Phrase Prefix Query + - [x] Multi Match Query + - [x] Common Terms Query + - [x] Query String Query + - [x] Simple Query String Query +- Term level queries + - [x] Term Query + - [x] Terms Query + - [x] Range Query + - [x] Exists Query + - [x] Prefix Query + - [x] Wildcard Query + - [x] Regexp Query + - [x] Fuzzy Query + - [x] Type Query + - [x] Ids Query +- Compound queries + - [x] Constant Score Query + - [x] Bool Query + - [x] Dis Max Query + - [x] Function Score Query + - [x] Boosting Query + - [x] Indices Query +- Joining queries + - [x] Nested Query + - [x] Has Child Query + - [x] Has Parent Query + - [x] Parent Id Query +- Geo queries + - [ ] GeoShape Query + - [x] Geo Bounding Box Query + - [x] Geo Distance Query + - [ ] Geo Distance Range Query + - [x] Geo Polygon Query + - [ ] Geohash Cell Query +- Specialized queries + - [x] More Like This Query + - [x] Template Query + - [x] Script Query + - [x] Percolate Query +- Span queries + - [ ] Span Term Query + - [ ] Span Multi Term Query + - [ ] Span First Query + - [ ] Span Near Query + - [ ] Span Or Query + - [ ] Span Not Query + - [ ] Span Containing Query + - [ ] Span Within Query + - [ ] Span Field Masking Query +- [ ] Minimum Should Match +- [ ] Multi Term Query Rewrite + +### Modules + +- [ ] Snapshot and Restore + +### Sorting + +- [x] Sort by score +- [x] Sort by field +- [x] Sort by geo distance +- [x] Sort by script +- [x] Sort by doc + +### Scrolling + +Scrolling is supported via a `ScrollService`. It supports an iterator-like interface. +The `ClearScroll` API is implemented as well. + +A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel) +is described in the [Wiki](https://github.com/olivere/elastic/wiki). + +## How to contribute + +Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). + +## Credits + +Thanks a lot for the great folks working hard on +[Elasticsearch](https://www.elastic.co/products/elasticsearch) +and +[Go](https://golang.org/). + +Elastic uses portions of the +[uritemplates](https://github.com/jtacoma/uritemplates) library +by Joshua Tacoma and +[backoff](https://github.com/cenkalti/backoff) by Cenk Altı. + +## LICENSE + +MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) +or the LICENSE file provided in the repository for details. diff --git a/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go new file mode 100644 index 0000000000000000000000000000000000000000..83f954f44ce84e1f8c3b1d5154dedc732f20d141 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/acknowledged_response.go @@ -0,0 +1,11 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AcknowledgedResponse is returned from various APIs. It simply indicates +// whether the operation is ack'd or not. +type AcknowledgedResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/backoff.go b/vendor/gopkg.in/olivere/elastic.v5/backoff.go new file mode 100644 index 0000000000000000000000000000000000000000..2b4f874989537d61ae56bc1e6ae6cc1113822967 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/backoff.go @@ -0,0 +1,152 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "math" + "math/rand" + "sync" + "time" +) + +// BackoffFunc specifies the signature of a function that returns the +// time to wait before the next call to a resource. To stop retrying +// return false in the 2nd return value. +type BackoffFunc func(retry int) (time.Duration, bool) + +// Backoff allows callers to implement their own Backoff strategy. +type Backoff interface { + // Next implements a BackoffFunc. + Next(retry int) (time.Duration, bool) +} + +// -- ZeroBackoff -- + +// ZeroBackoff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, +// indefinitely. +type ZeroBackoff struct{} + +// Next implements BackoffFunc for ZeroBackoff. +func (b ZeroBackoff) Next(retry int) (time.Duration, bool) { + return 0, true +} + +// -- StopBackoff -- + +// StopBackoff is a fixed backoff policy that always returns false for +// Next(), meaning that the operation should never be retried. +type StopBackoff struct{} + +// Next implements BackoffFunc for StopBackoff. +func (b StopBackoff) Next(retry int) (time.Duration, bool) { + return 0, false +} + +// -- ConstantBackoff -- + +// ConstantBackoff is a backoff policy that always returns the same delay. +type ConstantBackoff struct { + interval time.Duration +} + +// NewConstantBackoff returns a new ConstantBackoff. +func NewConstantBackoff(interval time.Duration) *ConstantBackoff { + return &ConstantBackoff{interval: interval} +} + +// Next implements BackoffFunc for ConstantBackoff. +func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) { + return b.interval, true +} + +// -- Exponential -- + +// ExponentialBackoff implements the simple exponential backoff described by +// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. +type ExponentialBackoff struct { + sync.Mutex + t float64 // initial timeout (in msec) + f float64 // exponential factor (e.g. 2) + m float64 // maximum timeout (in msec) +} + +// NewExponentialBackoff returns a ExponentialBackoff backoff policy. +// Use initialTimeout to set the first/minimal interval +// and maxTimeout to set the maximum wait interval. +func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { + return &ExponentialBackoff{ + t: float64(int64(initialTimeout / time.Millisecond)), + f: 2.0, + m: float64(int64(maxTimeout / time.Millisecond)), + } +} + +// Next implements BackoffFunc for ExponentialBackoff. +func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) { + b.Lock() + defer b.Unlock() + + r := 1.0 + rand.Float64() // random number in [1..2] + m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m) + if m >= b.m { + return 0, false + } + d := time.Duration(int64(m)) * time.Millisecond + return d, true +} + +// -- Simple Backoff -- + +// SimpleBackoff takes a list of fixed values for backoff intervals. +// Each call to Next returns the next value from that fixed list. +// After each value is returned, subsequent calls to Next will only return +// the last element. The values are optionally "jittered" (off by default). +type SimpleBackoff struct { + sync.Mutex + ticks []int + jitter bool +} + +// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified +// list of fixed intervals in milliseconds. +func NewSimpleBackoff(ticks ...int) *SimpleBackoff { + return &SimpleBackoff{ + ticks: ticks, + jitter: false, + } +} + +// Jitter enables or disables jittering values. +func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff { + b.Lock() + b.jitter = flag + b.Unlock() + return b +} + +// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. +func jitter(millis int) int { + if millis <= 0 { + return 0 + } + return millis/2 + rand.Intn(millis) +} + +// Next implements BackoffFunc for SimpleBackoff. +func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) { + b.Lock() + defer b.Unlock() + + if retry >= len(b.ticks) { + return 0, false + } + + ms := b.ticks[retry] + if b.jitter { + ms = jitter(ms) + } + return time.Duration(ms) * time.Millisecond, true +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk.go b/vendor/gopkg.in/olivere/elastic.v5/bulk.go new file mode 100644 index 0000000000000000000000000000000000000000..6b6020f37e3eb30a7e634a67ff4f5115f213c523 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk.go @@ -0,0 +1,397 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// BulkService allows for batching bulk requests and sending them to +// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest, +// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch, +// then use Do to send them to Elasticsearch. +// +// BulkService will be reset after each Do call. In other words, you can +// reuse BulkService to send many batches. You do not have to create a new +// BulkService for each batch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for more details. +type BulkService struct { + client *Client + + index string + typ string + requests []BulkableRequest + pipeline string + timeout string + refresh string + routing string + waitForActiveShards string + pretty bool + + // estimated bulk size in bytes, up to the request index sizeInBytesCursor + sizeInBytes int64 + sizeInBytesCursor int +} + +// NewBulkService initializes a new BulkService. +func NewBulkService(client *Client) *BulkService { + builder := &BulkService{ + client: client, + } + return builder +} + +func (s *BulkService) reset() { + s.requests = make([]BulkableRequest, 0) + s.sizeInBytes = 0 + s.sizeInBytesCursor = 0 +} + +// Index specifies the index to use for all batches. You may also leave +// this blank and specify the index in the individual bulk requests. +func (s *BulkService) Index(index string) *BulkService { + s.index = index + return s +} + +// Type specifies the type to use for all batches. You may also leave +// this blank and specify the type in the individual bulk requests. +func (s *BulkService) Type(typ string) *BulkService { + s.typ = typ + return s +} + +// Timeout is a global timeout for processing bulk requests. This is a +// server-side timeout, i.e. it tells Elasticsearch the time after which +// it should stop processing. +func (s *BulkService) Timeout(timeout string) *BulkService { + s.timeout = timeout + return s +} + +// Refresh controls when changes made by this request are made visible +// to search. The allowed values are: "true" (refresh the relevant +// primary and replica shards immediately), "wait_for" (wait for the +// changes to be made visible by a refresh before applying), or "false" +// (no refresh related actions). +func (s *BulkService) Refresh(refresh string) *BulkService { + s.refresh = refresh + return s +} + +// Routing specifies the routing value. +func (s *BulkService) Routing(routing string) *BulkService { + s.routing = routing + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *BulkService) Pipeline(pipeline string) *BulkService { + s.pipeline = pipeline + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the bulk operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty tells Elasticsearch whether to return a formatted JSON response. +func (s *BulkService) Pretty(pretty bool) *BulkService { + s.pretty = pretty + return s +} + +// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest, +// and/or BulkDeleteRequest. +func (s *BulkService) Add(requests ...BulkableRequest) *BulkService { + for _, r := range requests { + s.requests = append(s.requests, r) + } + return s +} + +// EstimatedSizeInBytes returns the estimated size of all bulkable +// requests added via Add. +func (s *BulkService) EstimatedSizeInBytes() int64 { + if s.sizeInBytesCursor == len(s.requests) { + return s.sizeInBytes + } + for _, r := range s.requests[s.sizeInBytesCursor:] { + s.sizeInBytes += s.estimateSizeInBytes(r) + s.sizeInBytesCursor++ + } + return s.sizeInBytes +} + +// estimateSizeInBytes returns the estimates size of the given +// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and +// BulkDeleteRequest. +func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { + lines, _ := r.Source() + size := 0 + for _, line := range lines { + // +1 for the \n + size += len(line) + 1 + } + return int64(size) +} + +// NumberOfActions returns the number of bulkable requests that need to +// be sent to Elasticsearch on the next batch. +func (s *BulkService) NumberOfActions() int { + return len(s.requests) +} + +func (s *BulkService) bodyAsString() (string, error) { + var buf bytes.Buffer + + for _, req := range s.requests { + source, err := req.Source() + if err != nil { + return "", err + } + for _, line := range source { + buf.WriteString(line) + buf.WriteByte('\n') + } + } + + return buf.String(), nil +} + +// Do sends the batched requests to Elasticsearch. Note that, when successful, +// you can reuse the BulkService for the next batch as the list of bulk +// requests is cleared on success. +func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) { + // No actions? + if s.NumberOfActions() == 0 { + return nil, errors.New("elastic: No bulk actions to commit") + } + + // Get body + body, err := s.bodyAsString() + if err != nil { + return nil, err + } + + // Build url + path := "/" + if len(s.index) > 0 { + index, err := uritemplates.Expand("{index}", map[string]string{ + "index": s.index, + }) + if err != nil { + return nil, err + } + path += index + "/" + } + if len(s.typ) > 0 { + typ, err := uritemplates.Expand("{type}", map[string]string{ + "type": s.typ, + }) + if err != nil { + return nil, err + } + path += typ + "/" + } + path += "_bulk" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(BulkResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + + // Reset so the request can be reused + s.reset() + + return ret, nil +} + +// BulkResponse is a response to a bulk execution. +// +// Example: +// { +// "took":3, +// "errors":false, +// "items":[{ +// "index":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":3, +// "status":201 +// } +// },{ +// "index":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":3, +// "status":200 +// } +// },{ +// "delete":{ +// "_index":"index1", +// "_type":"tweet", +// "_id":"1", +// "_version":4, +// "status":200, +// "found":true +// } +// },{ +// "update":{ +// "_index":"index2", +// "_type":"tweet", +// "_id":"2", +// "_version":4, +// "status":200 +// } +// }] +// } +type BulkResponse struct { + Took int `json:"took,omitempty"` + Errors bool `json:"errors,omitempty"` + Items []map[string]*BulkResponseItem `json:"items,omitempty"` +} + +// BulkResponseItem is the result of a single bulk request. +type BulkResponseItem struct { + Index string `json:"_index,omitempty"` + Type string `json:"_type,omitempty"` + Id string `json:"_id,omitempty"` + Version int64 `json:"_version,omitempty"` + Status int `json:"status,omitempty"` + Found bool `json:"found,omitempty"` + Error *ErrorDetails `json:"error,omitempty"` +} + +// Indexed returns all bulk request results of "index" actions. +func (r *BulkResponse) Indexed() []*BulkResponseItem { + return r.ByAction("index") +} + +// Created returns all bulk request results of "create" actions. +func (r *BulkResponse) Created() []*BulkResponseItem { + return r.ByAction("create") +} + +// Updated returns all bulk request results of "update" actions. +func (r *BulkResponse) Updated() []*BulkResponseItem { + return r.ByAction("update") +} + +// Deleted returns all bulk request results of "delete" actions. +func (r *BulkResponse) Deleted() []*BulkResponseItem { + return r.ByAction("delete") +} + +// ByAction returns all bulk request results of a certain action, +// e.g. "index" or "delete". +func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + var items []*BulkResponseItem + for _, item := range r.Items { + if result, found := item[action]; found { + items = append(items, result) + } + } + return items +} + +// ById returns all bulk request results of a given document id, +// regardless of the action ("index", "delete" etc.). +func (r *BulkResponse) ById(id string) []*BulkResponseItem { + if r.Items == nil { + return nil + } + var items []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if result.Id == id { + items = append(items, result) + } + } + } + return items +} + +// Failed returns those items of a bulk response that have errors, +// i.e. those that don't have a status code between 200 and 299. +func (r *BulkResponse) Failed() []*BulkResponseItem { + if r.Items == nil { + return nil + } + var errors []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if !(result.Status >= 200 && result.Status <= 299) { + errors = append(errors, result) + } + } + } + return errors +} + +// Succeeded returns those items of a bulk response that have no errors, +// i.e. those have a status code between 200 and 299. +func (r *BulkResponse) Succeeded() []*BulkResponseItem { + if r.Items == nil { + return nil + } + var succeeded []*BulkResponseItem + for _, item := range r.Items { + for _, result := range item { + if result.Status >= 200 && result.Status <= 299 { + succeeded = append(succeeded, result) + } + } + } + return succeeded +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go new file mode 100644 index 0000000000000000000000000000000000000000..5d4a2a5a7a059b3c81cc25e5fc5fcebddc37c6d6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_delete_request.go @@ -0,0 +1,145 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// -- Bulk delete request -- + +// BulkDeleteRequest is a request to remove a document from Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +type BulkDeleteRequest struct { + BulkableRequest + index string + typ string + id string + parent string + routing string + version int64 // default is MATCH_ANY + versionType string // default is "internal" + + source []string +} + +// NewBulkDeleteRequest returns a new BulkDeleteRequest. +func NewBulkDeleteRequest() *BulkDeleteRequest { + return &BulkDeleteRequest{} +} + +// Index specifies the Elasticsearch index to use for this delete request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this delete request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to delete. +func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { + r.id = id + r.source = nil + return r +} + +// Parent specifies the parent of the request, which is used in parent/child +// mappings. +func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest { + r.parent = parent + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { + r.routing = routing + r.source = nil + return r +} + +// Version indicates the version to be deleted as part of an optimistic +// concurrency model. +func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { + r.versionType = versionType + r.source = nil + return r +} + +// String returns the on-wire representation of the delete request, +// concatenated as a single string. +func (r *BulkDeleteRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the delete request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +func (r *BulkDeleteRequest) Source() ([]string, error) { + if r.source != nil { + return r.source, nil + } + lines := make([]string, 1) + + source := make(map[string]interface{}) + deleteCommand := make(map[string]interface{}) + if r.index != "" { + deleteCommand["_index"] = r.index + } + if r.typ != "" { + deleteCommand["_type"] = r.typ + } + if r.id != "" { + deleteCommand["_id"] = r.id + } + if r.parent != "" { + deleteCommand["_parent"] = r.parent + } + if r.routing != "" { + deleteCommand["_routing"] = r.routing + } + if r.version > 0 { + deleteCommand["_version"] = r.version + } + if r.versionType != "" { + deleteCommand["_version_type"] = r.versionType + } + source["delete"] = deleteCommand + + body, err := json.Marshal(source) + if err != nil { + return nil, err + } + + lines[0] = string(body) + r.source = lines + + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go new file mode 100644 index 0000000000000000000000000000000000000000..1c9302881a33429ed0c79b6f8aab44f7758b96bb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_index_request.go @@ -0,0 +1,225 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// BulkIndexRequest is a request to add a document to Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +type BulkIndexRequest struct { + BulkableRequest + index string + typ string + id string + opType string + routing string + parent string + version int64 // default is MATCH_ANY + versionType string // default is "internal" + doc interface{} + pipeline string + retryOnConflict *int + ttl string + + source []string +} + +// NewBulkIndexRequest returns a new BulkIndexRequest. +// The operation type is "index" by default. +func NewBulkIndexRequest() *BulkIndexRequest { + return &BulkIndexRequest{ + opType: "index", + } +} + +// Index specifies the Elasticsearch index to use for this index request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this index request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to index. +func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { + r.id = id + r.source = nil + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type +// for details. +func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { + r.opType = opType + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { + r.parent = parent + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { + r.version = version + r.source = nil + return r +} + +// VersionType specifies how versions are created. It can be e.g. internal, +// external, external_gte, or force. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-versioning +// for details. +func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Doc specifies the document to index. +func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { + r.doc = doc + r.source = nil + return r +} + +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// TTL is an expiration time for the document. +func (r *BulkIndexRequest) TTL(ttl string) *BulkIndexRequest { + r.ttl = ttl + r.source = nil + return r +} + +// Pipeline to use while processing the request. +func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest { + r.pipeline = pipeline + r.source = nil + return r +} + +// String returns the on-wire representation of the index request, +// concatenated as a single string. +func (r *BulkIndexRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +// Source returns the on-wire representation of the index request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +func (r *BulkIndexRequest) Source() ([]string, error) { + // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } + // { "field1" : "value1" } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "index" ... + command := make(map[string]interface{}) + indexCommand := make(map[string]interface{}) + if r.index != "" { + indexCommand["_index"] = r.index + } + if r.typ != "" { + indexCommand["_type"] = r.typ + } + if r.id != "" { + indexCommand["_id"] = r.id + } + if r.routing != "" { + indexCommand["_routing"] = r.routing + } + if r.parent != "" { + indexCommand["_parent"] = r.parent + } + if r.version > 0 { + indexCommand["_version"] = r.version + } + if r.versionType != "" { + indexCommand["_version_type"] = r.versionType + } + if r.retryOnConflict != nil { + indexCommand["_retry_on_conflict"] = *r.retryOnConflict + } + if r.ttl != "" { + indexCommand["_ttl"] = r.ttl + } + if r.pipeline != "" { + indexCommand["pipeline"] = r.pipeline + } + command[r.opType] = indexCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // "field1" ... + if r.doc != nil { + switch t := r.doc.(type) { + default: + body, err := json.Marshal(r.doc) + if err != nil { + return nil, err + } + lines[1] = string(body) + case json.RawMessage: + lines[1] = string(t) + case *json.RawMessage: + lines[1] = string(*t) + case string: + lines[1] = t + case *string: + lines[1] = *t + } + } else { + lines[1] = "{}" + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go new file mode 100644 index 0000000000000000000000000000000000000000..523ca9fdad23b8baac78804929ef875e7b81f041 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_processor.go @@ -0,0 +1,546 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" +) + +// BulkProcessorService allows to easily process bulk requests. It allows setting +// policies when to flush new bulk requests, e.g. based on a number of actions, +// on the size of the actions, and/or to flush periodically. It also allows +// to control the number of concurrent bulk requests allowed to be executed +// in parallel. +// +// BulkProcessorService, by default, commits either every 1000 requests or when the +// (estimated) size of the bulk requests exceeds 5 MB. However, it does not +// commit periodically. BulkProcessorService also does retry by default, using +// an exponential backoff algorithm. +// +// The caller is responsible for setting the index and type on every +// bulk request added to BulkProcessorService. +// +// BulkProcessorService takes ideas from the BulkProcessor of the +// Elasticsearch Java API as documented in +// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. +type BulkProcessorService struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string // name of processor + numWorkers int // # of workers (>= 1) + bulkActions int // # of requests after which to commit + bulkSize int // # of bytes after which to commit + flushInterval time.Duration // periodic flush interval + wantStats bool // indicates whether to gather statistics + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors +} + +// NewBulkProcessorService creates a new BulkProcessorService. +func NewBulkProcessorService(client *Client) *BulkProcessorService { + return &BulkProcessorService{ + c: client, + numWorkers: 1, + bulkActions: 1000, + bulkSize: 5 << 20, // 5 MB + initialTimeout: time.Duration(200) * time.Millisecond, + maxTimeout: time.Duration(10000) * time.Millisecond, + } +} + +// BulkBeforeFunc defines the signature of callbacks that are executed +// before a commit to Elasticsearch. +type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) + +// BulkAfterFunc defines the signature of callbacks that are executed +// after a commit to Elasticsearch. The err parameter signals an error. +type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) + +// Before specifies a function to be executed before bulk requests get comitted +// to Elasticsearch. +func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { + s.beforeFn = fn + return s +} + +// After specifies a function to be executed when bulk requests have been +// comitted to Elasticsearch. The After callback executes both when the +// commit was successful as well as on failures. +func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { + s.afterFn = fn + return s +} + +// Name is an optional name to identify this bulk processor. +func (s *BulkProcessorService) Name(name string) *BulkProcessorService { + s.name = name + return s +} + +// Workers is the number of concurrent workers allowed to be +// executed. Defaults to 1 and must be greater or equal to 1. +func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { + s.numWorkers = num + return s +} + +// BulkActions specifies when to flush based on the number of actions +// currently added. Defaults to 1000 and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { + s.bulkActions = bulkActions + return s +} + +// BulkSize specifies when to flush based on the size (in bytes) of the actions +// currently added. Defaults to 5 MB and can be set to -1 to be disabled. +func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { + s.bulkSize = bulkSize + return s +} + +// FlushInterval specifies when to flush at the end of the given interval. +// This is disabled by default. If you want the bulk processor to +// operate completely asynchronously, set both BulkActions and BulkSize to +// -1 and set the FlushInterval to a meaningful interval. +func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { + s.flushInterval = interval + return s +} + +// Stats tells bulk processor to gather stats while running. +// Use Stats to return the stats. This is disabled by default. +func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { + s.wantStats = wantStats + return s +} + +// Do creates a new BulkProcessor and starts it. +// Consider the BulkProcessor as a running instance that accepts bulk requests +// and commits them to Elasticsearch, spreading the work across one or more +// workers. +// +// You can interoperate with the BulkProcessor returned by Do, e.g. Start and +// Stop (or Close) it. +// +// Context is an optional context that is passed into the bulk request +// service calls. In contrast to other operations, this context is used in +// a long running process. You could use it to pass e.g. loggers, but you +// shouldn't use it for cancellation. +// +// Calling Do several times returns new BulkProcessors. You probably don't +// want to do this. BulkProcessorService implements just a builder pattern. +func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) { + p := newBulkProcessor( + s.c, + s.beforeFn, + s.afterFn, + s.name, + s.numWorkers, + s.bulkActions, + s.bulkSize, + s.flushInterval, + s.wantStats, + s.initialTimeout, + s.maxTimeout) + + err := p.Start(ctx) + if err != nil { + return nil, err + } + return p, nil +} + +// -- Bulk Processor Statistics -- + +// BulkProcessorStats contains various statistics of a bulk processor +// while it is running. Use the Stats func to return it while running. +type BulkProcessorStats struct { + Flushed int64 // number of times the flush interval has been invoked + Committed int64 // # of times workers committed bulk requests + Indexed int64 // # of requests indexed + Created int64 // # of requests that ES reported as creates (201) + Updated int64 // # of requests that ES reported as updates + Deleted int64 // # of requests that ES reported as deletes + Succeeded int64 // # of requests that ES reported as successful + Failed int64 // # of requests that ES reported as failed + + Workers []*BulkProcessorWorkerStats // stats for each worker +} + +// BulkProcessorWorkerStats represents per-worker statistics. +type BulkProcessorWorkerStats struct { + Queued int64 // # of requests queued in this worker + LastDuration time.Duration // duration of last commit +} + +// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. +func newBulkProcessorStats(workers int) *BulkProcessorStats { + stats := &BulkProcessorStats{ + Workers: make([]*BulkProcessorWorkerStats, workers), + } + for i := 0; i < workers; i++ { + stats.Workers[i] = &BulkProcessorWorkerStats{} + } + return stats +} + +func (st *BulkProcessorStats) dup() *BulkProcessorStats { + dst := new(BulkProcessorStats) + dst.Flushed = st.Flushed + dst.Committed = st.Committed + dst.Indexed = st.Indexed + dst.Created = st.Created + dst.Updated = st.Updated + dst.Deleted = st.Deleted + dst.Succeeded = st.Succeeded + dst.Failed = st.Failed + for _, src := range st.Workers { + dst.Workers = append(dst.Workers, src.dup()) + } + return dst +} + +func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats { + dst := new(BulkProcessorWorkerStats) + dst.Queued = st.Queued + dst.LastDuration = st.LastDuration + return dst +} + +// -- Bulk Processor -- + +// BulkProcessor encapsulates a task that accepts bulk requests and +// orchestrates committing them to Elasticsearch via one or more workers. +// +// BulkProcessor is returned by setting up a BulkProcessorService and +// calling the Do method. +type BulkProcessor struct { + c *Client + beforeFn BulkBeforeFunc + afterFn BulkAfterFunc + name string + bulkActions int + bulkSize int + numWorkers int + executionId int64 + requestsC chan BulkableRequest + workerWg sync.WaitGroup + workers []*bulkWorker + flushInterval time.Duration + flusherStopC chan struct{} + wantStats bool + initialTimeout time.Duration // initial wait time before retry on errors + maxTimeout time.Duration // max time to wait for retry on errors + + startedMu sync.Mutex // guards the following block + started bool + + statsMu sync.Mutex // guards the following block + stats *BulkProcessorStats +} + +func newBulkProcessor( + client *Client, + beforeFn BulkBeforeFunc, + afterFn BulkAfterFunc, + name string, + numWorkers int, + bulkActions int, + bulkSize int, + flushInterval time.Duration, + wantStats bool, + initialTimeout time.Duration, + maxTimeout time.Duration) *BulkProcessor { + return &BulkProcessor{ + c: client, + beforeFn: beforeFn, + afterFn: afterFn, + name: name, + numWorkers: numWorkers, + bulkActions: bulkActions, + bulkSize: bulkSize, + flushInterval: flushInterval, + wantStats: wantStats, + initialTimeout: initialTimeout, + maxTimeout: maxTimeout, + } +} + +// Start starts the bulk processor. If the processor is already started, +// nil is returned. +func (p *BulkProcessor) Start(ctx context.Context) error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + if p.started { + return nil + } + + // We must have at least one worker. + if p.numWorkers < 1 { + p.numWorkers = 1 + } + + p.requestsC = make(chan BulkableRequest) + p.executionId = 0 + p.stats = newBulkProcessorStats(p.numWorkers) + + // Create and start up workers. + p.workers = make([]*bulkWorker, p.numWorkers) + for i := 0; i < p.numWorkers; i++ { + p.workerWg.Add(1) + p.workers[i] = newBulkWorker(p, i) + go p.workers[i].work(ctx) + } + + // Start the ticker for flush (if enabled) + if int64(p.flushInterval) > 0 { + p.flusherStopC = make(chan struct{}) + go p.flusher(p.flushInterval) + } + + p.started = true + + return nil +} + +// Stop is an alias for Close. +func (p *BulkProcessor) Stop() error { + return p.Close() +} + +// Close stops the bulk processor previously started with Do. +// If it is already stopped, this is a no-op and nil is returned. +// +// By implementing Close, BulkProcessor implements the io.Closer interface. +func (p *BulkProcessor) Close() error { + p.startedMu.Lock() + defer p.startedMu.Unlock() + + // Already stopped? Do nothing. + if !p.started { + return nil + } + + // Stop flusher (if enabled) + if p.flusherStopC != nil { + p.flusherStopC <- struct{}{} + <-p.flusherStopC + close(p.flusherStopC) + p.flusherStopC = nil + } + + // Stop all workers. + close(p.requestsC) + p.workerWg.Wait() + + p.started = false + + return nil +} + +// Stats returns the latest bulk processor statistics. +// Collecting stats must be enabled first by calling Stats(true) on +// the service that created this processor. +func (p *BulkProcessor) Stats() BulkProcessorStats { + p.statsMu.Lock() + defer p.statsMu.Unlock() + return *p.stats.dup() +} + +// Add adds a single request to commit by the BulkProcessorService. +// +// The caller is responsible for setting the index and type on the request. +func (p *BulkProcessor) Add(request BulkableRequest) { + p.requestsC <- request +} + +// Flush manually asks all workers to commit their outstanding requests. +// It returns only when all workers acknowledge completion. +func (p *BulkProcessor) Flush() error { + p.statsMu.Lock() + p.stats.Flushed++ + p.statsMu.Unlock() + + for _, w := range p.workers { + w.flushC <- struct{}{} + <-w.flushAckC // wait for completion + } + return nil +} + +// flusher is a single goroutine that periodically asks all workers to +// commit their outstanding bulk requests. It is only started if +// FlushInterval is greater than 0. +func (p *BulkProcessor) flusher(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: // Periodic flush + p.Flush() // TODO swallow errors here? + + case <-p.flusherStopC: + p.flusherStopC <- struct{}{} + return + } + } +} + +// -- Bulk Worker -- + +// bulkWorker encapsulates a single worker, running in a goroutine, +// receiving bulk requests and eventually committing them to Elasticsearch. +// It is strongly bound to a BulkProcessor. +type bulkWorker struct { + p *BulkProcessor + i int + bulkActions int + bulkSize int + service *BulkService + flushC chan struct{} + flushAckC chan struct{} +} + +// newBulkWorker creates a new bulkWorker instance. +func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { + return &bulkWorker{ + p: p, + i: i, + bulkActions: p.bulkActions, + bulkSize: p.bulkSize, + service: NewBulkService(p.c), + flushC: make(chan struct{}), + flushAckC: make(chan struct{}), + } +} + +// work waits for bulk requests and manual flush calls on the respective +// channels and is invoked as a goroutine when the bulk processor is started. +func (w *bulkWorker) work(ctx context.Context) { + defer func() { + w.p.workerWg.Done() + close(w.flushAckC) + close(w.flushC) + }() + + var stop bool + for !stop { + select { + case req, open := <-w.p.requestsC: + if open { + // Received a new request + w.service.Add(req) + if w.commitRequired() { + w.commit(ctx) // TODO swallow errors here? + } + } else { + // Channel closed: Stop. + stop = true + if w.service.NumberOfActions() > 0 { + w.commit(ctx) // TODO swallow errors here? + } + } + + case <-w.flushC: + // Commit outstanding requests + if w.service.NumberOfActions() > 0 { + w.commit(ctx) // TODO swallow errors here? + } + w.flushAckC <- struct{}{} + } + } +} + +// commit commits the bulk requests in the given service, +// invoking callbacks as specified. +func (w *bulkWorker) commit(ctx context.Context) error { + var res *BulkResponse + + // commitFunc will commit bulk requests and, on failure, be retried + // via exponential backoff + commitFunc := func() error { + var err error + res, err = w.service.Do(ctx) + return err + } + // notifyFunc will be called if retry fails + notifyFunc := func(err error) { + w.p.c.errorf("elastic: bulk processor %q failed but will retry: %v", w.p.name, err) + } + + id := atomic.AddInt64(&w.p.executionId, 1) + + // Update # documents in queue before eventual retries + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + } + w.p.statsMu.Unlock() + + // Save requests because they will be reset in commitFunc + reqs := w.service.requests + + // Invoke before callback + if w.p.beforeFn != nil { + w.p.beforeFn(id, reqs) + } + + // Commit bulk requests + policy := NewExponentialBackoff(w.p.initialTimeout, w.p.maxTimeout) + err := RetryNotify(commitFunc, policy, notifyFunc) + w.updateStats(res) + if err != nil { + w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) + } + + // Invoke after callback + if w.p.afterFn != nil { + w.p.afterFn(id, reqs, res, err) + } + + return err +} + +func (w *bulkWorker) updateStats(res *BulkResponse) { + // Update stats + if res != nil { + w.p.statsMu.Lock() + if w.p.wantStats { + w.p.stats.Committed++ + if res != nil { + w.p.stats.Indexed += int64(len(res.Indexed())) + w.p.stats.Created += int64(len(res.Created())) + w.p.stats.Updated += int64(len(res.Updated())) + w.p.stats.Deleted += int64(len(res.Deleted())) + w.p.stats.Succeeded += int64(len(res.Succeeded())) + w.p.stats.Failed += int64(len(res.Failed())) + } + w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) + w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond + } + w.p.statsMu.Unlock() + } +} + +// commitRequired returns true if the service has to commit its +// bulk requests. This can be either because the number of actions +// or the estimated size in bytes is larger than specified in the +// BulkProcessorService. +func (w *bulkWorker) commitRequired() bool { + if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { + return true + } + if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { + return true + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go new file mode 100644 index 0000000000000000000000000000000000000000..ce3bf0768b704d5cf1dadde6ee22b9e2069008e0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_request.go @@ -0,0 +1,17 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// -- Bulkable request (index/update/delete) -- + +// BulkableRequest is a generic interface to bulkable requests. +type BulkableRequest interface { + fmt.Stringer + Source() ([]string, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go new file mode 100644 index 0000000000000000000000000000000000000000..80637cc3eed9cdf943d1f4ea034e59338a838e20 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/bulk_update_request.go @@ -0,0 +1,270 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "strings" +) + +// BulkUpdateRequest is a request to update a document in Elasticsearch. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +type BulkUpdateRequest struct { + BulkableRequest + index string + typ string + id string + + routing string + parent string + script *Script + scriptedUpsert *bool + version int64 // default is MATCH_ANY + versionType string // default is "internal" + retryOnConflict *int + upsert interface{} + docAsUpsert *bool + detectNoop *bool + doc interface{} + + source []string +} + +// NewBulkUpdateRequest returns a new BulkUpdateRequest. +func NewBulkUpdateRequest() *BulkUpdateRequest { + return &BulkUpdateRequest{} +} + +// Index specifies the Elasticsearch index to use for this update request. +// If unspecified, the index set on the BulkService will be used. +func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { + r.index = index + r.source = nil + return r +} + +// Type specifies the Elasticsearch type to use for this update request. +// If unspecified, the type set on the BulkService will be used. +func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { + r.typ = typ + r.source = nil + return r +} + +// Id specifies the identifier of the document to update. +func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { + r.id = id + r.source = nil + return r +} + +// Routing specifies a routing value for the request. +func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { + r.routing = routing + r.source = nil + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { + r.parent = parent + r.source = nil + return r +} + +// Script specifies an update script. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html#bulk-update +// and https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html +// for details. +func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { + r.script = script + r.source = nil + return r +} + +// ScripedUpsert specifies if your script will run regardless of +// whether the document exists or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_scripted_upsert_literal +func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest { + r.scriptedUpsert = &upsert + r.source = nil + return r +} + +// RetryOnConflict specifies how often to retry in case of a version conflict. +func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { + r.retryOnConflict = &retryOnConflict + r.source = nil + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { + r.version = version + r.source = nil + return r +} + +// VersionType can be "internal" (default), "external", "external_gte", +// "external_gt", or "force". +func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { + r.versionType = versionType + r.source = nil + return r +} + +// Doc specifies the updated document. +func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { + r.doc = doc + r.source = nil + return r +} + +// DocAsUpsert indicates whether the contents of Doc should be used as +// the Upsert value. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html#_literal_doc_as_upsert_literal +// for details. +func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { + r.docAsUpsert = &docAsUpsert + r.source = nil + return r +} + +// DetectNoop specifies whether changes that don't affect the document +// should be ignored (true) or unignored (false). This is enabled by default +// in Elasticsearch. +func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest { + r.detectNoop = &detectNoop + r.source = nil + return r +} + +// Upsert specifies the document to use for upserts. It will be used for +// create if the original document does not exist. +func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { + r.upsert = doc + r.source = nil + return r +} + +// String returns the on-wire representation of the update request, +// concatenated as a single string. +func (r *BulkUpdateRequest) String() string { + lines, err := r.Source() + if err != nil { + return fmt.Sprintf("error: %v", err) + } + return strings.Join(lines, "\n") +} + +func (r *BulkUpdateRequest) getSourceAsString(data interface{}) (string, error) { + switch t := data.(type) { + default: + body, err := json.Marshal(data) + if err != nil { + return "", err + } + return string(body), nil + case json.RawMessage: + return string(t), nil + case *json.RawMessage: + return string(*t), nil + case string: + return t, nil + case *string: + return *t, nil + } +} + +// Source returns the on-wire representation of the update request, +// split into an action-and-meta-data line and an (optional) source line. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-bulk.html +// for details. +func (r BulkUpdateRequest) Source() ([]string, error) { + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "doc" : { "field1" : "value1", ... } } + // or + // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } + // { "script" : { ... } } + + if r.source != nil { + return r.source, nil + } + + lines := make([]string, 2) + + // "update" ... + command := make(map[string]interface{}) + updateCommand := make(map[string]interface{}) + if r.index != "" { + updateCommand["_index"] = r.index + } + if r.typ != "" { + updateCommand["_type"] = r.typ + } + if r.id != "" { + updateCommand["_id"] = r.id + } + if r.routing != "" { + updateCommand["_routing"] = r.routing + } + if r.parent != "" { + updateCommand["_parent"] = r.parent + } + if r.version > 0 { + updateCommand["_version"] = r.version + } + if r.versionType != "" { + updateCommand["_version_type"] = r.versionType + } + if r.retryOnConflict != nil { + updateCommand["_retry_on_conflict"] = *r.retryOnConflict + } + command["update"] = updateCommand + line, err := json.Marshal(command) + if err != nil { + return nil, err + } + lines[0] = string(line) + + // 2nd line: {"doc" : { ... }} or {"script": {...}} + source := make(map[string]interface{}) + if r.docAsUpsert != nil { + source["doc_as_upsert"] = *r.docAsUpsert + } + if r.detectNoop != nil { + source["detect_noop"] = *r.detectNoop + } + if r.upsert != nil { + source["upsert"] = r.upsert + } + if r.scriptedUpsert != nil { + source["scripted_upsert"] = *r.scriptedUpsert + } + if r.doc != nil { + // {"doc":{...}} + source["doc"] = r.doc + } else if r.script != nil { + // {"script":...} + src, err := r.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + lines[1], err = r.getSourceAsString(source) + if err != nil { + return nil, err + } + + r.source = lines + return lines, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go b/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go new file mode 100644 index 0000000000000000000000000000000000000000..a436f03b6a70dedf747de04b095cd79295ba3761 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/canonicalize.go @@ -0,0 +1,34 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "net/url" + +// canonicalize takes a list of URLs and returns its canonicalized form, i.e. +// remove anything but scheme, userinfo, host, path, and port. +// It also removes all trailing slashes. Invalid URLs or URLs that do not +// use protocol http or https are skipped. +// +// Example: +// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200 +// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1 +func canonicalize(rawurls ...string) []string { + var canonicalized []string + for _, rawurl := range rawurls { + u, err := url.Parse(rawurl) + if err == nil { + if u.Scheme == "http" || u.Scheme == "https" { + // Trim trailing slashes + for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' { + u.Path = u.Path[0 : len(u.Path)-1] + } + u.Fragment = "" + u.RawQuery = "" + canonicalized = append(canonicalized, u.String()) + } + } + } + return canonicalized +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go new file mode 100644 index 0000000000000000000000000000000000000000..9c9fa4f7c74bc329285cd13b8f3f6709c33be570 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/clear_scroll.go @@ -0,0 +1,103 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// ClearScrollService clears one or more scroll contexts by their ids. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#_clear_scroll_api +// for details. +type ClearScrollService struct { + client *Client + pretty bool + scrollId []string +} + +// NewClearScrollService creates a new ClearScrollService. +func NewClearScrollService(client *Client) *ClearScrollService { + return &ClearScrollService{ + client: client, + scrollId: make([]string, 0), + } +} + +// ScrollId is a list of scroll IDs to clear. +// Use _all to clear all search contexts. +func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { + s.scrollId = append(s.scrollId, scrollIds...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClearScrollService) buildURL() (string, url.Values, error) { + // Build URL + path := "/_search/scroll/" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClearScrollService) Validate() error { + var invalid []string + if len(s.scrollId) == 0 { + invalid = append(invalid, "ScrollId") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body := strings.Join(s.scrollId, ",") + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClearScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClearScrollResponse is the response of ClearScrollService.Do. +type ClearScrollResponse struct { +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/client.go b/vendor/gopkg.in/olivere/elastic.v5/client.go new file mode 100644 index 0000000000000000000000000000000000000000..4876f04d98bd306786b400662192d31c6221306d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/client.go @@ -0,0 +1,1695 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httputil" + "net/url" + "regexp" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // Version is the current version of Elastic. + Version = "5.0.30" + + // DefaultURL is the default endpoint of Elasticsearch on the local machine. + // It is used e.g. when initializing a new Client without a specific URL. + DefaultURL = "http://127.0.0.1:9200" + + // DefaultScheme is the default protocol scheme to use when sniffing + // the Elasticsearch cluster. + DefaultScheme = "http" + + // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. + DefaultHealthcheckEnabled = true + + // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits + // for a response from Elasticsearch on startup, i.e. when creating a + // client. After the client is started, a shorter timeout is commonly used + // (its default is specified in DefaultHealthcheckTimeout). + DefaultHealthcheckTimeoutStartup = 5 * time.Second + + // DefaultHealthcheckTimeout specifies the time a running client waits for + // a response from Elasticsearch. Notice that the healthcheck timeout + // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). + DefaultHealthcheckTimeout = 1 * time.Second + + // DefaultHealthcheckInterval is the default interval between + // two health checks of the nodes in the cluster. + DefaultHealthcheckInterval = 60 * time.Second + + // DefaultSnifferEnabled specifies if the sniffer is enabled by default. + DefaultSnifferEnabled = true + + // DefaultSnifferInterval is the interval between two sniffing procedures, + // i.e. the lookup of all nodes in the cluster and their addition/removal + // from the list of actual connections. + DefaultSnifferInterval = 15 * time.Minute + + // DefaultSnifferTimeoutStartup is the default timeout for the sniffing + // process that is initiated while creating a new client. For subsequent + // sniffing processes, DefaultSnifferTimeout is used (by default). + DefaultSnifferTimeoutStartup = 5 * time.Second + + // DefaultSnifferTimeout is the default timeout after which the + // sniffing process times out. Notice that for the initial sniffing + // process, DefaultSnifferTimeoutStartup is used. + DefaultSnifferTimeout = 2 * time.Second + + // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending + // a GET request with a body. + DefaultSendGetBodyAs = "GET" + + // DefaultGzipEnabled specifies if gzip compression is enabled by default. + DefaultGzipEnabled = false + + // off is used to disable timeouts. + off = -1 * time.Second +) + +var ( + // ErrNoClient is raised when no Elasticsearch node is available. + ErrNoClient = errors.New("no Elasticsearch node available") + + // ErrRetry is raised when a request cannot be executed after the configured + // number of retries. + ErrRetry = errors.New("cannot connect after several retries") + + // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus + // didn't return in time. + ErrTimeout = errors.New("timeout") + + // noRetries is a retrier that does not retry. + noRetries = NewStopRetrier() +) + +// ClientOptionFunc is a function that configures a Client. +// It is used in NewClient. +type ClientOptionFunc func(*Client) error + +// Client is an Elasticsearch client. Create one by calling NewClient. +type Client struct { + c *http.Client // net/http Client to use for requests + + connsMu sync.RWMutex // connsMu guards the next block + conns []*conn // all connections + cindex int // index into conns + + mu sync.RWMutex // guards the next block + urls []string // set of URLs passed initially to the client + running bool // true if the client's background processes are running + errorlog Logger // error log for critical messages + infolog Logger // information log for e.g. response times + tracelog Logger // trace log for debugging + scheme string // http or https + healthcheckEnabled bool // healthchecks enabled or disabled + healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup + healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch + healthcheckInterval time.Duration // interval between healthchecks + healthcheckStop chan bool // notify healthchecker to stop, and notify back + snifferEnabled bool // sniffer enabled or disabled + snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup + snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API + snifferInterval time.Duration // interval between sniffing + snifferCallback SnifferCallback // callback to modify the sniffing decision + snifferStop chan bool // notify sniffer to stop, and notify back + decoder Decoder // used to decode data sent from Elasticsearch + basicAuth bool // indicates whether to send HTTP Basic Auth credentials + basicAuthUsername string // username for HTTP Basic Auth + basicAuthPassword string // password for HTTP Basic Auth + sendGetBodyAs string // override for when sending a GET with a body + requiredPlugins []string // list of required plugins + gzipEnabled bool // gzip compression enabled or disabled (default) + retrier Retrier // strategy for retries +} + +// NewClient creates a new client to work with Elasticsearch. +// +// NewClient, by default, is meant to be long-lived and shared across +// your application. If you need a short-lived client, e.g. for request-scope, +// consider using NewSimpleClient instead. +// +// The caller can configure the new client by passing configuration options +// to the func. +// +// Example: +// +// client, err := elastic.NewClient( +// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), +// elastic.SetBasicAuth("user", "secret")) +// +// If no URL is configured, Elastic uses DefaultURL by default. +// +// If the sniffer is enabled (the default), the new client then sniffes +// the cluster via the Nodes Info API +// (see https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html#cluster-nodes-info). +// It uses the URLs specified by the caller. The caller is responsible +// to only pass a list of URLs of nodes that belong to the same cluster. +// This sniffing process is run on startup and periodically. +// Use SnifferInterval to set the interval between two sniffs (default is +// 15 minutes). In other words: By default, the client will find new nodes +// in the cluster and remove those that are no longer available every +// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. +// +// The list of nodes found in the sniffing process will be used to make +// connections to the REST API of Elasticsearch. These nodes are also +// periodically checked in a shorter time frame. This process is called +// a health check. By default, a health check is done every 60 seconds. +// You can set a shorter or longer interval by SetHealthcheckInterval. +// Disabling health checks is not recommended, but can be done by +// SetHealthcheck(false). +// +// Connections are automatically marked as dead or healthy while +// making requests to Elasticsearch. When a request fails, Elastic will +// call into the Retry strategy which can be specified with SetRetry. +// The Retry strategy is also responsible for handling backoff i.e. the time +// to wait before starting the next request. There are various standard +// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff. +// Retries are disabled by default. +// +// If no HttpClient is configured, then http.DefaultClient is used. +// You can use your own http.Client with some http.Transport for +// advanced scenarios. +// +// An error is also returned when some configuration option is invalid or +// the new client cannot sniff the cluster (if enabled). +func NewClient(options ...ClientOptionFunc) (*Client, error) { + // Set up the client + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + healthcheckEnabled: DefaultHealthcheckEnabled, + healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, + healthcheckTimeout: DefaultHealthcheckTimeout, + healthcheckInterval: DefaultHealthcheckInterval, + healthcheckStop: make(chan bool), + snifferEnabled: DefaultSnifferEnabled, + snifferTimeoutStartup: DefaultSnifferTimeoutStartup, + snifferTimeout: DefaultSnifferTimeout, + snifferInterval: DefaultSnifferInterval, + snifferCallback: nopSnifferCallback, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + retrier: noRetries, // no retries by default + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + // Use a default URL and normalize them + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // If the URLs have auth info, use them here as an alternative to SetBasicAuth + if !c.basicAuth { + for _, urlStr := range c.urls { + u, err := url.Parse(urlStr) + if err == nil && u.User != nil { + c.basicAuth = true + c.basicAuthUsername = u.User.Username() + c.basicAuthPassword, _ = u.User.Password() + break + } + } + } + + // Check if we can make a request to any of the specified URLs + if c.healthcheckEnabled { + if err := c.startupHealthcheck(c.healthcheckTimeoutStartup); err != nil { + return nil, err + } + } + + if c.snifferEnabled { + // Sniff the cluster initially + if err := c.sniff(c.snifferTimeoutStartup); err != nil { + return nil, err + } + } else { + // Do not sniff the cluster initially. Use the provided URLs instead. + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + } + + if c.healthcheckEnabled { + // Perform an initial health check + c.healthcheck(c.healthcheckTimeoutStartup, true) + } + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + if c.snifferEnabled { + go c.sniffer() // periodically update cluster information + } + if c.healthcheckEnabled { + go c.healthchecker() // start goroutine periodically ping all nodes of the cluster + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// NewSimpleClient creates a new short-lived Client that can be used in +// use cases where you need e.g. one client per request. +// +// While NewClient by default sets up e.g. periodic health checks +// and sniffing for new nodes in separate goroutines, NewSimpleClient does +// not and is meant as a simple replacement where you don't need all the +// heavy lifting of NewClient. +// +// NewSimpleClient does the following by default: First, all health checks +// are disabled, including timeouts and periodic checks. Second, sniffing +// is disabled, including timeouts and periodic checks. The number of retries +// is set to 1. NewSimpleClient also does not start any goroutines. +// +// Notice that you can still override settings by passing additional options, +// just like with NewClient. +func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { + c := &Client{ + c: http.DefaultClient, + conns: make([]*conn, 0), + cindex: -1, + scheme: DefaultScheme, + decoder: &DefaultDecoder{}, + healthcheckEnabled: false, + healthcheckTimeoutStartup: off, + healthcheckTimeout: off, + healthcheckInterval: off, + healthcheckStop: make(chan bool), + snifferEnabled: false, + snifferTimeoutStartup: off, + snifferTimeout: off, + snifferInterval: off, + snifferCallback: nopSnifferCallback, + snifferStop: make(chan bool), + sendGetBodyAs: DefaultSendGetBodyAs, + gzipEnabled: DefaultGzipEnabled, + retrier: noRetries, // no retries by default + } + + // Run the options on it + for _, option := range options { + if err := option(c); err != nil { + return nil, err + } + } + + // Use a default URL and normalize them + if len(c.urls) == 0 { + c.urls = []string{DefaultURL} + } + c.urls = canonicalize(c.urls...) + + // If the URLs have auth info, use them here as an alternative to SetBasicAuth + if !c.basicAuth { + for _, urlStr := range c.urls { + u, err := url.Parse(urlStr) + if err == nil && u.User != nil { + c.basicAuth = true + c.basicAuthUsername = u.User.Username() + c.basicAuthPassword, _ = u.User.Password() + break + } + } + } + + for _, url := range c.urls { + c.conns = append(c.conns, newConn(url, url)) + } + + // Ensure that we have at least one connection available + if err := c.mustActiveConn(); err != nil { + return nil, err + } + + // Check the required plugins + for _, plugin := range c.requiredPlugins { + found, err := c.HasPlugin(plugin) + if err != nil { + return nil, err + } + if !found { + return nil, fmt.Errorf("elastic: plugin %s not found", plugin) + } + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + return c, nil +} + +// SetHttpClient can be used to specify the http.Client to use when making +// HTTP requests to Elasticsearch. +func SetHttpClient(httpClient *http.Client) ClientOptionFunc { + return func(c *Client) error { + if httpClient != nil { + c.c = httpClient + } else { + c.c = http.DefaultClient + } + return nil + } +} + +// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to +// use when making HTTP requests to Elasticsearch. +func SetBasicAuth(username, password string) ClientOptionFunc { + return func(c *Client) error { + c.basicAuthUsername = username + c.basicAuthPassword = password + c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" + return nil + } +} + +// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that +// when sniffing is enabled, these URLs are used to initially sniff the +// cluster on startup. +func SetURL(urls ...string) ClientOptionFunc { + return func(c *Client) error { + switch len(urls) { + case 0: + c.urls = []string{DefaultURL} + default: + c.urls = urls + } + return nil + } +} + +// SetScheme sets the HTTP scheme to look for when sniffing (http or https). +// This is http by default. +func SetScheme(scheme string) ClientOptionFunc { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// SetSniff enables or disables the sniffer (enabled by default). +func SetSniff(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.snifferEnabled = enabled + return nil + } +} + +// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used +// when creating a new client. The default is 5 seconds. Notice that the +// timeout being used for subsequent sniffing processes is set with +// SetSnifferTimeout. +func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeoutStartup = timeout + return nil + } +} + +// SetSnifferTimeout sets the timeout for the sniffer that finds the +// nodes in a cluster. The default is 2 seconds. Notice that the timeout +// used when creating a new client on startup is usually greater and can +// be set with SetSnifferTimeoutStartup. +func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferTimeout = timeout + return nil + } +} + +// SetSnifferInterval sets the interval between two sniffing processes. +// The default interval is 15 minutes. +func SetSnifferInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.snifferInterval = interval + return nil + } +} + +// SnifferCallback defines the protocol for sniffing decisions. +type SnifferCallback func(*NodesInfoNode) bool + +// nopSnifferCallback is the default sniffer callback: It accepts +// all nodes the sniffer finds. +var nopSnifferCallback = func(*NodesInfoNode) bool { return true } + +// SetSnifferCallback allows the caller to modify sniffer decisions. +// When setting the callback, the given SnifferCallback is called for +// each (healthy) node found during the sniffing process. +// If the callback returns false, the node is ignored: No requests +// are routed to it. +func SetSnifferCallback(f SnifferCallback) ClientOptionFunc { + return func(c *Client) error { + if f != nil { + c.snifferCallback = f + } + return nil + } +} + +// SetHealthcheck enables or disables healthchecks (enabled by default). +func SetHealthcheck(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckEnabled = enabled + return nil + } +} + +// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. +// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). +// Notice that timeouts for subsequent health checks can be modified with +// SetHealthcheckTimeout. +func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeoutStartup = timeout + return nil + } +} + +// SetHealthcheckTimeout sets the timeout for periodic health checks. +// The default timeout is 1 second (see DefaultHealthcheckTimeout). +// Notice that a different (usually larger) timeout is used for the initial +// healthcheck, which is initiated while creating a new client. +// The startup timeout can be modified with SetHealthcheckTimeoutStartup. +func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckTimeout = timeout + return nil + } +} + +// SetHealthcheckInterval sets the interval between two health checks. +// The default interval is 60 seconds. +func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { + return func(c *Client) error { + c.healthcheckInterval = interval + return nil + } +} + +// SetMaxRetries sets the maximum number of retries before giving up when +// performing a HTTP request to Elasticsearch. +// +// Deprecated: Replace with a Retry implementation. +func SetMaxRetries(maxRetries int) ClientOptionFunc { + return func(c *Client) error { + if maxRetries < 0 { + return errors.New("MaxRetries must be greater than or equal to 0") + } else if maxRetries == 0 { + c.retrier = noRetries + } else { + // Create a Retrier that will wait for 100ms (+/- jitter) between requests. + // This resembles the old behavior with maxRetries. + ticks := make([]int, maxRetries) + for i := 0; i < len(ticks); i++ { + ticks[i] = 100 + } + backoff := NewSimpleBackoff(ticks...) + c.retrier = NewBackoffRetrier(backoff) + } + return nil + } +} + +// SetGzip enables or disables gzip compression (disabled by default). +func SetGzip(enabled bool) ClientOptionFunc { + return func(c *Client) error { + c.gzipEnabled = enabled + return nil + } +} + +// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. +// DefaultDecoder is used by default. +func SetDecoder(decoder Decoder) ClientOptionFunc { + return func(c *Client) error { + if decoder != nil { + c.decoder = decoder + } else { + c.decoder = &DefaultDecoder{} + } + return nil + } +} + +// SetRequiredPlugins can be used to indicate that some plugins are required +// before a Client will be created. +func SetRequiredPlugins(plugins ...string) ClientOptionFunc { + return func(c *Client) error { + if c.requiredPlugins == nil { + c.requiredPlugins = make([]string, 0) + } + c.requiredPlugins = append(c.requiredPlugins, plugins...) + return nil + } +} + +// SetErrorLog sets the logger for critical messages like nodes joining +// or leaving the cluster or failing requests. It is nil by default. +func SetErrorLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.errorlog = logger + return nil + } +} + +// SetInfoLog sets the logger for informational messages, e.g. requests +// and their response times. It is nil by default. +func SetInfoLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.infolog = logger + return nil + } +} + +// SetTraceLog specifies the log.Logger to use for output of HTTP requests +// and responses which is helpful during debugging. It is nil by default. +func SetTraceLog(logger Logger) ClientOptionFunc { + return func(c *Client) error { + c.tracelog = logger + return nil + } +} + +// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request +// with a body. It is GET by default. +func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { + return func(c *Client) error { + c.sendGetBodyAs = httpMethod + return nil + } +} + +// SetRetrier specifies the retry strategy that handles errors during +// HTTP request/response with Elasticsearch. +func SetRetrier(retrier Retrier) ClientOptionFunc { + return func(c *Client) error { + if retrier == nil { + retrier = noRetries // no retries by default + } + c.retrier = retrier + return nil + } +} + +// String returns a string representation of the client status. +func (c *Client) String() string { + c.connsMu.Lock() + conns := c.conns + c.connsMu.Unlock() + + var buf bytes.Buffer + for i, conn := range conns { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(conn.String()) + } + return buf.String() +} + +// IsRunning returns true if the background processes of the client are +// running, false otherwise. +func (c *Client) IsRunning() bool { + c.mu.RLock() + defer c.mu.RUnlock() + return c.running +} + +// Start starts the background processes like sniffing the cluster and +// periodic health checks. You don't need to run Start when creating a +// client with NewClient; the background processes are run by default. +// +// If the background processes are already running, this is a no-op. +func (c *Client) Start() { + c.mu.RLock() + if c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.snifferEnabled { + go c.sniffer() + } + if c.healthcheckEnabled { + go c.healthchecker() + } + + c.mu.Lock() + c.running = true + c.mu.Unlock() + + c.infof("elastic: client started") +} + +// Stop stops the background processes that the client is running, +// i.e. sniffing the cluster periodically and running health checks +// on the nodes. +// +// If the background processes are not running, this is a no-op. +func (c *Client) Stop() { + c.mu.RLock() + if !c.running { + c.mu.RUnlock() + return + } + c.mu.RUnlock() + + if c.healthcheckEnabled { + c.healthcheckStop <- true + <-c.healthcheckStop + } + + if c.snifferEnabled { + c.snifferStop <- true + <-c.snifferStop + } + + c.mu.Lock() + c.running = false + c.mu.Unlock() + + c.infof("elastic: client stopped") +} + +// errorf logs to the error log. +func (c *Client) errorf(format string, args ...interface{}) { + if c.errorlog != nil { + c.errorlog.Printf(format, args...) + } +} + +// infof logs informational messages. +func (c *Client) infof(format string, args ...interface{}) { + if c.infolog != nil { + c.infolog.Printf(format, args...) + } +} + +// tracef logs to the trace log. +func (c *Client) tracef(format string, args ...interface{}) { + if c.tracelog != nil { + c.tracelog.Printf(format, args...) + } +} + +// dumpRequest dumps the given HTTP request to the trace log. +func (c *Client) dumpRequest(r *http.Request) { + if c.tracelog != nil { + out, err := httputil.DumpRequestOut(r, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// dumpResponse dumps the given HTTP response to the trace log. +func (c *Client) dumpResponse(resp *http.Response) { + if c.tracelog != nil { + out, err := httputil.DumpResponse(resp, true) + if err == nil { + c.tracef("%s\n", string(out)) + } + } +} + +// sniffer periodically runs sniff. +func (c *Client) sniffer() { + c.mu.RLock() + timeout := c.snifferTimeout + interval := c.snifferInterval + c.mu.RUnlock() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.snifferStop: + // we are asked to stop, so we signal back that we're stopping now + c.snifferStop <- true + return + case <-ticker.C: + c.sniff(timeout) + } + } +} + +// sniff uses the Node Info API to return the list of nodes in the cluster. +// It uses the list of URLs passed on startup plus the list of URLs found +// by the preceding sniffing process (if sniffing is enabled). +// +// If sniffing is disabled, this is a no-op. +func (c *Client) sniff(timeout time.Duration) error { + c.mu.RLock() + if !c.snifferEnabled { + c.mu.RUnlock() + return nil + } + + // Use all available URLs provided to sniff the cluster. + var urls []string + urlsMap := make(map[string]bool) + + // Add all URLs provided on startup + for _, url := range c.urls { + urlsMap[url] = true + urls = append(urls, url) + } + c.mu.RUnlock() + + // Add all URLs found by sniffing + c.connsMu.RLock() + for _, conn := range c.conns { + if !conn.IsDead() { + url := conn.URL() + if _, found := urlsMap[url]; !found { + urls = append(urls, url) + } + } + } + c.connsMu.RUnlock() + + if len(urls) == 0 { + return ErrNoClient + } + + // Start sniffing on all found URLs + ch := make(chan []*conn, len(urls)) + for _, url := range urls { + go func(url string) { ch <- c.sniffNode(url) }(url) + } + + // Wait for the results to come back, or the process times out. + for { + select { + case conns := <-ch: + if len(conns) > 0 { + c.updateConns(conns) + return nil + } + case <-time.After(timeout): + // We get here if no cluster responds in time + return ErrNoClient + } + } +} + +// sniffNode sniffs a single node. This method is run as a goroutine +// in sniff. If successful, it returns the list of node URLs extracted +// from the result of calling Nodes Info API. Otherwise, an empty array +// is returned. +func (c *Client) sniffNode(url string) []*conn { + var nodes []*conn + + // Call the Nodes Info API at /_nodes/http + req, err := NewRequest("GET", url+"/_nodes/http") + if err != nil { + return nodes + } + + c.mu.RLock() + if c.basicAuth { + req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) + } + c.mu.RUnlock() + + res, err := c.c.Do((*http.Request)(req)) + if err != nil { + return nodes + } + if res == nil { + return nodes + } + + if res.Body != nil { + defer res.Body.Close() + } + + var info NodesInfoResponse + if err := json.NewDecoder(res.Body).Decode(&info); err == nil { + if len(info.Nodes) > 0 { + for nodeID, node := range info.Nodes { + if c.snifferCallback(node) { + if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 { + url := c.extractHostname(c.scheme, node.HTTP.PublishAddress) + if url != "" { + nodes = append(nodes, newConn(nodeID, url)) + } + } + } + } + } + } + return nodes +} + +// reSniffHostAndPort is used to extract hostname and port from a result +// from a Nodes Info API (example: "inet[/127.0.0.1:9200]"). +var reSniffHostAndPort = regexp.MustCompile(`\/([^:]*):([0-9]+)\]`) + +func (c *Client) extractHostname(scheme, address string) string { + if strings.HasPrefix(address, "inet") { + m := reSniffHostAndPort.FindStringSubmatch(address) + if len(m) == 3 { + return fmt.Sprintf("%s://%s:%s", scheme, m[1], m[2]) + } + } + s := address + if idx := strings.Index(s, "/"); idx >= 0 { + s = s[idx+1:] + } + if strings.Index(s, ":") < 0 { + return "" + } + return fmt.Sprintf("%s://%s", scheme, s) +} + +// updateConns updates the clients' connections with new information +// gather by a sniff operation. +func (c *Client) updateConns(conns []*conn) { + c.connsMu.Lock() + + // Build up new connections: + // If we find an existing connection, use that (including no. of failures etc.). + // If we find a new connection, add it. + var newConns []*conn + for _, conn := range conns { + var found bool + for _, oldConn := range c.conns { + if oldConn.NodeID() == conn.NodeID() { + // Take over the old connection + newConns = append(newConns, oldConn) + found = true + break + } + } + if !found { + // New connection didn't exist, so add it to our list of new conns. + c.infof("elastic: %s joined the cluster", conn.URL()) + newConns = append(newConns, conn) + } + } + + c.conns = newConns + c.cindex = -1 + c.connsMu.Unlock() +} + +// healthchecker periodically runs healthcheck. +func (c *Client) healthchecker() { + c.mu.RLock() + timeout := c.healthcheckTimeout + interval := c.healthcheckInterval + c.mu.RUnlock() + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-c.healthcheckStop: + // we are asked to stop, so we signal back that we're stopping now + c.healthcheckStop <- true + return + case <-ticker.C: + c.healthcheck(timeout, false) + } + } +} + +// healthcheck does a health check on all nodes in the cluster. Depending on +// the node state, it marks connections as dead, sets them alive etc. +// If healthchecks are disabled and force is false, this is a no-op. +// The timeout specifies how long to wait for a response from Elasticsearch. +func (c *Client) healthcheck(timeout time.Duration, force bool) { + c.mu.RLock() + if !c.healthcheckEnabled && !force { + c.mu.RUnlock() + return + } + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.RUnlock() + + c.connsMu.RLock() + conns := c.conns + c.connsMu.RUnlock() + + for _, conn := range conns { + // Run the HEAD request against ES with a timeout + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Goroutine executes the HTTP request, returns an error and sets status + var status int + errc := make(chan error, 1) + go func(url string) { + req, err := NewRequest("HEAD", url) + if err != nil { + errc <- err + return + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := c.c.Do((*http.Request)(req)) + if res != nil { + status = res.StatusCode + if res.Body != nil { + res.Body.Close() + } + } + errc <- err + }(conn.URL()) + + // Wait for the Goroutine (or its timeout) + select { + case <-ctx.Done(): // timeout + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + break + case err := <-errc: + if err != nil { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + break + } + if status >= 200 && status < 300 { + conn.MarkAsAlive() + } else { + conn.MarkAsDead() + c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status) + } + break + } + } +} + +// startupHealthcheck is used at startup to check if the server is available +// at all. +func (c *Client) startupHealthcheck(timeout time.Duration) error { + c.mu.Lock() + urls := c.urls + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + c.mu.Unlock() + + // If we don't get a connection after "timeout", we bail. + start := time.Now() + for { + // Make a copy of the HTTP client provided via options to respect + // settings like Basic Auth or a user-specified http.Transport. + cl := new(http.Client) + *cl = *c.c + cl.Timeout = timeout + for _, url := range urls { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return err + } + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + res, err := cl.Do(req) + if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { + return nil + } + } + time.Sleep(1 * time.Second) + if time.Now().Sub(start) > timeout { + break + } + } + return ErrNoClient +} + +// next returns the next available connection, or ErrNoClient. +func (c *Client) next() (*conn, error) { + // We do round-robin here. + // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. + c.connsMu.Lock() + defer c.connsMu.Unlock() + + i := 0 + numConns := len(c.conns) + for { + i++ + if i > numConns { + break // we visited all conns: they all seem to be dead + } + c.cindex++ + if c.cindex >= numConns { + c.cindex = 0 + } + conn := c.conns[c.cindex] + if !conn.IsDead() { + return conn, nil + } + } + + // We have a deadlock here: All nodes are marked as dead. + // If sniffing is disabled, connections will never be marked alive again. + // So we are marking them as alive--if sniffing is disabled. + // They'll then be picked up in the next call to PerformRequest. + if !c.snifferEnabled { + c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) + for _, conn := range c.conns { + conn.MarkAsAlive() + } + } + + // We tried hard, but there is no node available + return nil, ErrNoClient +} + +// mustActiveConn returns nil if there is an active connection, +// otherwise ErrNoClient is returned. +func (c *Client) mustActiveConn() error { + c.connsMu.Lock() + defer c.connsMu.Unlock() + + for _, c := range c.conns { + if !c.IsDead() { + return nil + } + } + return ErrNoClient +} + +// PerformRequest does a HTTP request to Elasticsearch. +// It returns a response (which might be nil) and an error on failure. +// +// Optionally, a list of HTTP error codes to ignore can be passed. +// This is necessary for services that expect e.g. HTTP status 404 as a +// valid outcome (Exists, IndicesExists, IndicesTypeExists). +func (c *Client) PerformRequest(ctx context.Context, method, path string, params url.Values, body interface{}, ignoreErrors ...int) (*Response, error) { + start := time.Now().UTC() + + c.mu.RLock() + timeout := c.healthcheckTimeout + basicAuth := c.basicAuth + basicAuthUsername := c.basicAuthUsername + basicAuthPassword := c.basicAuthPassword + sendGetBodyAs := c.sendGetBodyAs + gzipEnabled := c.gzipEnabled + c.mu.RUnlock() + + var err error + var conn *conn + var req *Request + var resp *Response + var retried bool + var n int + + // Change method if sendGetBodyAs is specified. + if method == "GET" && body != nil && sendGetBodyAs != "GET" { + method = sendGetBodyAs + } + + for { + pathWithParams := path + if len(params) > 0 { + pathWithParams += "?" + params.Encode() + } + + // Get a connection + conn, err = c.next() + if err == ErrNoClient { + n++ + if !retried { + // Force a healtcheck as all connections seem to be dead. + c.healthcheck(timeout, false) + } + wait, ok, rerr := c.retrier.Retry(ctx, n, nil, nil, err) + if rerr != nil { + return nil, rerr + } + if !ok { + return nil, err + } + retried = true + time.Sleep(wait) + continue // try again + } + if err != nil { + c.errorf("elastic: cannot get connection from pool") + return nil, err + } + + req, err = NewRequest(method, conn.URL()+pathWithParams) + if err != nil { + c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(method), conn.URL()+pathWithParams, err) + return nil, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + // Set body + if body != nil { + err = req.SetBody(body, gzipEnabled) + if err != nil { + c.errorf("elastic: couldn't set body %+v for request: %v", body, err) + return nil, err + } + } + + // Tracing + c.dumpRequest((*http.Request)(req)) + + // Get response + res, err := ctxhttp.Do(ctx, c.c, (*http.Request)(req)) + if err != nil { + n++ + wait, ok, rerr := c.retrier.Retry(ctx, n, (*http.Request)(req), res, err) + if rerr != nil { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, rerr + } + if !ok { + c.errorf("elastic: %s is dead", conn.URL()) + conn.MarkAsDead() + return nil, err + } + retried = true + time.Sleep(wait) + continue // try again + } + if res.Body != nil { + defer res.Body.Close() + } + + // Check for errors + if err := checkResponse((*http.Request)(req), res, ignoreErrors...); err != nil { + // No retry if request succeeded + // We still try to return a response. + resp, _ = c.newResponse(res) + return resp, err + } + + // Tracing + c.dumpResponse(res) + + // We successfully made a request with this connection + conn.MarkAsHealthy() + + resp, err = c.newResponse(res) + if err != nil { + return nil, err + } + + break + } + + duration := time.Now().UTC().Sub(start) + c.infof("%s %s [status:%d, request:%.3fs]", + strings.ToUpper(method), + req.URL, + resp.StatusCode, + float64(int64(duration/time.Millisecond))/1000) + + return resp, nil +} + +// -- Document APIs -- + +// Index a document. +func (c *Client) Index() *IndexService { + return NewIndexService(c) +} + +// Get a document. +func (c *Client) Get() *GetService { + return NewGetService(c) +} + +// MultiGet retrieves multiple documents in one roundtrip. +func (c *Client) MultiGet() *MgetService { + return NewMgetService(c) +} + +// Mget retrieves multiple documents in one roundtrip. +func (c *Client) Mget() *MgetService { + return NewMgetService(c) +} + +// Delete a document. +func (c *Client) Delete() *DeleteService { + return NewDeleteService(c) +} + +// DeleteByQuery deletes documents as found by a query. +func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { + return NewDeleteByQueryService(c).Index(indices...) +} + +// Update a document. +func (c *Client) Update() *UpdateService { + return NewUpdateService(c) +} + +// UpdateByQuery performs an update on a set of documents. +func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService { + return NewUpdateByQueryService(c).Index(indices...) +} + +// Bulk is the entry point to mass insert/update/delete documents. +func (c *Client) Bulk() *BulkService { + return NewBulkService(c) +} + +// BulkProcessor allows setting up a concurrent processor of bulk requests. +func (c *Client) BulkProcessor() *BulkProcessorService { + return NewBulkProcessorService(c) +} + +// Reindex copies data from a source index into a destination index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-reindex.html +// for details on the Reindex API. +func (c *Client) Reindex() *ReindexService { + return NewReindexService(c) +} + +// TermVectors returns information and statistics on terms in the fields +// of a particular document. +func (c *Client) TermVectors(index, typ string) *TermvectorsService { + builder := NewTermvectorsService(c) + builder = builder.Index(index).Type(typ) + return builder +} + +// MultiTermVectors returns information and statistics on terms in the fields +// of multiple documents. +func (c *Client) MultiTermVectors() *MultiTermvectorService { + return NewMultiTermvectorService(c) +} + +// -- Search APIs -- + +// Search is the entry point for searches. +func (c *Client) Search(indices ...string) *SearchService { + return NewSearchService(c).Index(indices...) +} + +// Suggest returns a service to return suggestions. +func (c *Client) Suggest(indices ...string) *SuggestService { + return NewSuggestService(c).Index(indices...) +} + +// MultiSearch is the entry point for multi searches. +func (c *Client) MultiSearch() *MultiSearchService { + return NewMultiSearchService(c) +} + +// Count documents. +func (c *Client) Count(indices ...string) *CountService { + return NewCountService(c).Index(indices...) +} + +// Explain computes a score explanation for a query and a specific document. +func (c *Client) Explain(index, typ, id string) *ExplainService { + return NewExplainService(c).Index(index).Type(typ).Id(id) +} + +// TODO Search Template +// TODO Search Shards API +// TODO Search Exists API +// TODO Validate API + +// FieldStats returns statistical information about fields in indices. +func (c *Client) FieldStats(indices ...string) *FieldStatsService { + return NewFieldStatsService(c).Index(indices...) +} + +// Exists checks if a document exists. +func (c *Client) Exists() *ExistsService { + return NewExistsService(c) +} + +// Scroll through documents. Use this to efficiently scroll through results +// while returning the results to a client. +func (c *Client) Scroll(indices ...string) *ScrollService { + return NewScrollService(c).Index(indices...) +} + +// ClearScroll can be used to clear search contexts manually. +func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { + return NewClearScrollService(c).ScrollId(scrollIds...) +} + +// -- Indices APIs -- + +// CreateIndex returns a service to create a new index. +func (c *Client) CreateIndex(name string) *IndicesCreateService { + return NewIndicesCreateService(c).Index(name) +} + +// DeleteIndex returns a service to delete an index. +func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { + return NewIndicesDeleteService(c).Index(indices) +} + +// IndexExists allows to check if an index exists. +func (c *Client) IndexExists(indices ...string) *IndicesExistsService { + return NewIndicesExistsService(c).Index(indices) +} + +// ShrinkIndex returns a service to shrink one index into another. +func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService { + return NewIndicesShrinkService(c).Source(source).Target(target) +} + +// RolloverIndex rolls an alias over to a new index when the existing index +// is considered to be too large or too old. +func (c *Client) RolloverIndex(alias string) *IndicesRolloverService { + return NewIndicesRolloverService(c).Alias(alias) +} + +// TypeExists allows to check if one or more types exist in one or more indices. +func (c *Client) TypeExists() *IndicesExistsTypeService { + return NewIndicesExistsTypeService(c) +} + +// IndexStats provides statistics on different operations happining +// in one or more indices. +func (c *Client) IndexStats(indices ...string) *IndicesStatsService { + return NewIndicesStatsService(c).Index(indices...) +} + +// OpenIndex opens an index. +func (c *Client) OpenIndex(name string) *IndicesOpenService { + return NewIndicesOpenService(c).Index(name) +} + +// CloseIndex closes an index. +func (c *Client) CloseIndex(name string) *IndicesCloseService { + return NewIndicesCloseService(c).Index(name) +} + +// IndexGet retrieves information about one or more indices. +// IndexGet is only available for Elasticsearch 1.4 or later. +func (c *Client) IndexGet(indices ...string) *IndicesGetService { + return NewIndicesGetService(c).Index(indices...) +} + +// IndexGetSettings retrieves settings of all, one or more indices. +func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { + return NewIndicesGetSettingsService(c).Index(indices...) +} + +// IndexPutSettings sets settings for all, one or more indices. +func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { + return NewIndicesPutSettingsService(c).Index(indices...) +} + +// IndexAnalyze performs the analysis process on a text and returns the +// token breakdown of the text. +func (c *Client) IndexAnalyze() *IndicesAnalyzeService { + return NewIndicesAnalyzeService(c) +} + +// Forcemerge optimizes one or more indices. +// It replaces the deprecated Optimize API. +func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { + return NewIndicesForcemergeService(c).Index(indices...) +} + +// Refresh asks Elasticsearch to refresh one or more indices. +func (c *Client) Refresh(indices ...string) *RefreshService { + return NewRefreshService(c).Index(indices...) +} + +// Flush asks Elasticsearch to free memory from the index and +// flush data to disk. +func (c *Client) Flush(indices ...string) *IndicesFlushService { + return NewIndicesFlushService(c).Index(indices...) +} + +// Alias enables the caller to add and/or remove aliases. +func (c *Client) Alias() *AliasService { + return NewAliasService(c) +} + +// Aliases returns aliases by index name(s). +func (c *Client) Aliases() *AliasesService { + return NewAliasesService(c) +} + +// GetTemplate gets a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) GetTemplate() *GetTemplateService { + return NewGetTemplateService(c) +} + +// PutTemplate creates or updates a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) PutTemplate() *PutTemplateService { + return NewPutTemplateService(c) +} + +// DeleteTemplate deletes a search template. +// Use IndexXXXTemplate funcs to manage index templates. +func (c *Client) DeleteTemplate() *DeleteTemplateService { + return NewDeleteTemplateService(c) +} + +// IndexGetTemplate gets an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { + return NewIndicesGetTemplateService(c).Name(names...) +} + +// IndexTemplateExists gets check if an index template exists. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { + return NewIndicesExistsTemplateService(c).Name(name) +} + +// IndexPutTemplate creates or updates an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { + return NewIndicesPutTemplateService(c).Name(name) +} + +// IndexDeleteTemplate deletes an index template. +// Use XXXTemplate funcs to manage search templates. +func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { + return NewIndicesDeleteTemplateService(c).Name(name) +} + +// GetMapping gets a mapping. +func (c *Client) GetMapping() *IndicesGetMappingService { + return NewIndicesGetMappingService(c) +} + +// PutMapping registers a mapping. +func (c *Client) PutMapping() *IndicesPutMappingService { + return NewIndicesPutMappingService(c) +} + +// -- cat APIs -- + +// TODO cat aliases +// TODO cat allocation +// TODO cat count +// TODO cat fielddata +// TODO cat health +// TODO cat indices +// TODO cat master +// TODO cat nodes +// TODO cat pending tasks +// TODO cat plugins +// TODO cat recovery +// TODO cat thread pool +// TODO cat shards +// TODO cat segments + +// -- Ingest APIs -- + +// IngestPutPipeline adds pipelines and updates existing pipelines in +// the cluster. +func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService { + return NewIngestPutPipelineService(c).Id(id) +} + +// IngestGetPipeline returns pipelines based on ID. +func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService { + return NewIngestGetPipelineService(c).Id(ids...) +} + +// IngestDeletePipeline deletes a pipeline by ID. +func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService { + return NewIngestDeletePipelineService(c).Id(id) +} + +// IngestSimulatePipeline executes a specific pipeline against the set of +// documents provided in the body of the request. +func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService { + return NewIngestSimulatePipelineService(c) +} + +// -- Cluster APIs -- + +// ClusterHealth retrieves the health of the cluster. +func (c *Client) ClusterHealth() *ClusterHealthService { + return NewClusterHealthService(c) +} + +// ClusterState retrieves the state of the cluster. +func (c *Client) ClusterState() *ClusterStateService { + return NewClusterStateService(c) +} + +// ClusterStats retrieves cluster statistics. +func (c *Client) ClusterStats() *ClusterStatsService { + return NewClusterStatsService(c) +} + +// NodesInfo retrieves one or more or all of the cluster nodes information. +func (c *Client) NodesInfo() *NodesInfoService { + return NewNodesInfoService(c) +} + +// NodesStats retrieves one or more or all of the cluster nodes statistics. +func (c *Client) NodesStats() *NodesStatsService { + return NewNodesStatsService(c) +} + +// TasksCancel cancels tasks running on the specified nodes. +func (c *Client) TasksCancel() *TasksCancelService { + return NewTasksCancelService(c) +} + +// TasksList retrieves the list of tasks running on the specified nodes. +func (c *Client) TasksList() *TasksListService { + return NewTasksListService(c) +} + +// TODO Pending cluster tasks +// TODO Cluster Reroute +// TODO Cluster Update Settings +// TODO Nodes Stats +// TODO Nodes hot_threads + +// -- Snapshot and Restore -- + +// TODO Snapshot Create +// TODO Snapshot Create Repository +// TODO Snapshot Delete +// TODO Snapshot Delete Repository +// TODO Snapshot Get +// TODO Snapshot Get Repository +// TODO Snapshot Restore +// TODO Snapshot Status +// TODO Snapshot Verify Repository + +// -- Helpers and shortcuts -- + +// ElasticsearchVersion returns the version number of Elasticsearch +// running on the given URL. +func (c *Client) ElasticsearchVersion(url string) (string, error) { + res, _, err := c.Ping(url).Do(context.Background()) + if err != nil { + return "", err + } + return res.Version.Number, nil +} + +// IndexNames returns the names of all indices in the cluster. +func (c *Client) IndexNames() ([]string, error) { + res, err := c.IndexGetSettings().Index("_all").Do(context.Background()) + if err != nil { + return nil, err + } + var names []string + for name := range res { + names = append(names, name) + } + return names, nil +} + +// Ping checks if a given node in a cluster exists and (optionally) +// returns some basic information about the Elasticsearch server, +// e.g. the Elasticsearch version number. +// +// Notice that you need to specify a URL here explicitly. +func (c *Client) Ping(url string) *PingService { + return NewPingService(c).URL(url) +} + +// WaitForStatus waits for the cluster to have the given status. +// This is a shortcut method for the ClusterHealth service. +// +// WaitForStatus waits for the specified timeout, e.g. "10s". +// If the cluster will have the given state within the timeout, nil is returned. +// If the request timed out, ErrTimeout is returned. +func (c *Client) WaitForStatus(status string, timeout string) error { + health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background()) + if err != nil { + return err + } + if health.TimedOut { + return ErrTimeout + } + return nil +} + +// WaitForGreenStatus waits for the cluster to have the "green" status. +// See WaitForStatus for more details. +func (c *Client) WaitForGreenStatus(timeout string) error { + return c.WaitForStatus("green", timeout) +} + +// WaitForYellowStatus waits for the cluster to have the "yellow" status. +// See WaitForStatus for more details. +func (c *Client) WaitForYellowStatus(timeout string) error { + return c.WaitForStatus("yellow", timeout) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go new file mode 100644 index 0000000000000000000000000000000000000000..caf15fe871dc26cc41c8c7c6be642d6b1350d55e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_health.go @@ -0,0 +1,245 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterHealthService allows to get a very simple status on the health of the cluster. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-health.html +// for details. +type ClusterHealthService struct { + client *Client + pretty bool + indices []string + level string + local *bool + masterTimeout string + timeout string + waitForActiveShards *int + waitForNodes string + waitForNoRelocatingShards *bool + waitForStatus string +} + +// NewClusterHealthService creates a new ClusterHealthService. +func NewClusterHealthService(client *Client) *ClusterHealthService { + return &ClusterHealthService{ + client: client, + indices: make([]string, 0), + } +} + +// Index limits the information returned to specific indices. +func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { + s.indices = append(s.indices, indices...) + return s +} + +// Level specifies the level of detail for returned information. +func (s *ClusterHealthService) Level(level string) *ClusterHealthService { + s.level = level + return s +} + +// Local indicates whether to return local information. If it is true, +// we do not retrieve the state from master node (default: false). +func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { + s.local = &local + return s +} + +// MasterTimeout specifies an explicit operation timeout for connection to master node. +func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { + s.timeout = timeout + return s +} + +// WaitForActiveShards can be used to wait until the specified number of shards are active. +func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { + s.waitForActiveShards = &waitForActiveShards + return s +} + +// WaitForNodes can be used to wait until the specified number of nodes are available. +// Example: "12" to wait for exact values, ">12" and "<12" for ranges. +func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { + s.waitForNodes = waitForNodes + return s +} + +// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished. +func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService { + s.waitForNoRelocatingShards = &waitForNoRelocatingShards + return s +} + +// WaitForStatus can be used to wait until the cluster is in a specific state. +// Valid values are: green, yellow, or red. +func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { + s.waitForStatus = waitForStatus + return s +} + +// WaitForGreenStatus will wait for the "green" state. +func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { + return s.WaitForStatus("green") +} + +// WaitForYellowStatus will wait for the "yellow" state. +func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { + return s.WaitForStatus("yellow") +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterHealthService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) > 0 { + path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else { + path = "/_cluster/health" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.level != "" { + params.Set("level", s.level) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != nil { + params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) + } + if s.waitForNodes != "" { + params.Set("wait_for_nodes", s.waitForNodes) + } + if s.waitForNoRelocatingShards != nil { + params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards)) + } + if s.waitForStatus != "" { + params.Set("wait_for_status", s.waitForStatus) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterHealthService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterHealthResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterHealthResponse is the response of ClusterHealthService.Do. +type ClusterHealthResponse struct { + ClusterName string `json:"cluster_name"` + Status string `json:"status"` + TimedOut bool `json:"timed_out"` + NumberOfNodes int `json:"number_of_nodes"` + NumberOfDataNodes int `json:"number_of_data_nodes"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + DelayedUnassignedShards int `json:"delayed_unassigned_shards"` + NumberOfPendingTasks int `json:"number_of_pending_tasks"` + NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` + TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` + ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` + + // Validation failures -> index name -> array of validation failures + ValidationFailures []map[string][]string `json:"validation_failures"` + + // Index name -> index health + Indices map[string]*ClusterIndexHealth `json:"indices"` +} + +// ClusterIndexHealth will be returned as part of ClusterHealthResponse. +type ClusterIndexHealth struct { + Status string `json:"status"` + NumberOfShards int `json:"number_of_shards"` + NumberOfReplicas int `json:"number_of_replicas"` + ActivePrimaryShards int `json:"active_primary_shards"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` + // Validation failures + ValidationFailures []string `json:"validation_failures"` + // Shards by id, e.g. "0" or "1" + Shards map[string]*ClusterShardHealth `json:"shards"` +} + +// ClusterShardHealth will be returned as part of ClusterHealthResponse. +type ClusterShardHealth struct { + Status string `json:"status"` + PrimaryActive bool `json:"primary_active"` + ActiveShards int `json:"active_shards"` + RelocatingShards int `json:"relocating_shards"` + InitializingShards int `json:"initializing_shards"` + UnassignedShards int `json:"unassigned_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go new file mode 100644 index 0000000000000000000000000000000000000000..3458efc48cedaf72954c062146cd850dcf3906eb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_state.go @@ -0,0 +1,285 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterStateService allows to get a comprehensive state information of the whole cluster. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-state.html +// for details. +type ClusterStateService struct { + client *Client + pretty bool + indices []string + metrics []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + local *bool + masterTimeout string +} + +// NewClusterStateService creates a new ClusterStateService. +func NewClusterStateService(client *Client) *ClusterStateService { + return &ClusterStateService{ + client: client, + indices: make([]string, 0), + metrics: make([]string, 0), + } +} + +// Index is a list of index names. Use _all or an empty string to +// perform the operation on all indices. +func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { + s.indices = append(s.indices, indices...) + return s +} + +// Metric limits the information returned to the specified metric. +// It can be one of: version, master_node, nodes, routing_table, metadata, +// blocks, or customs. +func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { + s.metrics = append(s.metrics, metrics...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings, when set, returns settings in flat format (default: false). +func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Local indicates whether to return local information. When set, it does not +// retrieve the state from master node (default: false). +func (s *ClusterStateService) Local(local bool) *ClusterStateService { + s.local = &local + return s +} + +// MasterTimeout specifies timeout for connection to master. +func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStateService) buildURL() (string, url.Values, error) { + // Build URL + metrics := strings.Join(s.metrics, ",") + if metrics == "" { + metrics = "_all" + } + indices := strings.Join(s.indices, ",") + if indices == "" { + indices = "_all" + } + path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ + "metrics": metrics, + "indices": indices, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStateResponse is the response of ClusterStateService.Do. +type ClusterStateResponse struct { + ClusterName string `json:"cluster_name"` + Version int64 `json:"version"` + StateUUID string `json:"state_uuid"` + MasterNode string `json:"master_node"` + Blocks map[string]*clusterBlocks `json:"blocks"` + Nodes map[string]*discoveryNode `json:"nodes"` + Metadata *clusterStateMetadata `json:"metadata"` + RoutingTable map[string]*clusterStateRoutingTable `json:"routing_table"` + RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type clusterBlocks struct { + Global map[string]*clusterBlock `json:"global"` // id -> cluster block + Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block +} + +type clusterBlock struct { + Description string `json:"description"` + Retryable bool `json:"retryable"` + DisableStatePersistence bool `json:"disable_state_persistence"` + Levels []string `json:"levels"` +} + +type clusterStateMetadata struct { + ClusterUUID string `json:"cluster_uuid"` + Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata + Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data + RoutingTable struct { + Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table + } `json:"routing_table"` + RoutingNodes struct { + Unassigned []*shardRouting `json:"unassigned"` + Nodes []*shardRouting `json:"nodes"` + } `json:"routing_nodes"` + Customs map[string]interface{} `json:"customs"` +} + +type discoveryNode struct { + Name string `json:"name"` // server name, e.g. "es1" + TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] + Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } +} + +type clusterStateRoutingTable struct { + Indices map[string]interface{} `json:"indices"` +} + +type clusterStateRoutingNode struct { + Unassigned []*shardRouting `json:"unassigned"` + // Node Id -> shardRouting + Nodes map[string][]*shardRouting `json:"nodes"` +} + +type indexTemplateMetaData struct { + Template string `json:"template"` // e.g. "store-*" + Order int `json:"order"` + Settings map[string]interface{} `json:"settings"` // index settings + Mappings map[string]interface{} `json:"mappings"` // type name -> mapping +} + +type indexMetaData struct { + State string `json:"state"` + Settings map[string]interface{} `json:"settings"` + Mappings map[string]interface{} `json:"mappings"` + Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] +} + +type indexRoutingTable struct { + Shards map[string]*shardRouting `json:"shards"` +} + +type shardRouting struct { + State string `json:"state"` + Primary bool `json:"primary"` + Node string `json:"node"` + RelocatingNode string `json:"relocating_node"` + Shard int `json:"shard"` + Index string `json:"index"` + Version int64 `json:"version"` + RestoreSource *RestoreSource `json:"restore_source"` + AllocationId *allocationId `json:"allocation_id"` + UnassignedInfo *unassignedInfo `json:"unassigned_info"` +} + +type RestoreSource struct { + Repository string `json:"repository"` + Snapshot string `json:"snapshot"` + Version string `json:"version"` + Index string `json:"index"` +} + +type allocationId struct { + Id string `json:"id"` + RelocationId string `json:"relocation_id"` +} + +type unassignedInfo struct { + Reason string `json:"reason"` + At string `json:"at"` + Details string `json:"details"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..e71a561fc75e6bd1baf52b5e14f2edf0d348fbed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/cluster_stats.go @@ -0,0 +1,351 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ClusterStatsService is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-stats.html. +type ClusterStatsService struct { + client *Client + pretty bool + nodeId []string + flatSettings *bool + human *bool +} + +// NewClusterStatsService creates a new ClusterStatsService. +func NewClusterStatsService(client *Client) *ClusterStatsService { + return &ClusterStatsService{ + client: client, + nodeId: make([]string, 0), + } +} + +// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. +func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { + s.nodeId = nodeId + return s +} + +// FlatSettings is documented as: Return settings in flat format (default: false). +func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { + s.flatSettings = &flatSettings + return s +} + +// Human is documented as: Whether to return time and byte values in human-readable format.. +func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ClusterStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.nodeId) > 0 { + path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + if err != nil { + return "", url.Values{}, err + } + } else { + path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) + if err != nil { + return "", url.Values{}, err + } + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ClusterStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ClusterStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ClusterStatsResponse is the response of ClusterStatsService.Do. +type ClusterStatsResponse struct { + Timestamp int64 `json:"timestamp"` + ClusterName string `json:"cluster_name"` + ClusterUUID string `json:"uuid"` + Status string `json:"status"` + Indices *ClusterStatsIndices `json:"indices"` + Nodes *ClusterStatsNodes `json:"nodes"` +} + +type ClusterStatsIndices struct { + Count int `json:"count"` + Shards *ClusterStatsIndicesShards `json:"shards"` + Docs *ClusterStatsIndicesDocs `json:"docs"` + Store *ClusterStatsIndicesStore `json:"store"` + FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` + FilterCache *ClusterStatsIndicesFilterCache `json:"filter_cache"` + IdCache *ClusterStatsIndicesIdCache `json:"id_cache"` + Completion *ClusterStatsIndicesCompletion `json:"completion"` + Segments *ClusterStatsIndicesSegments `json:"segments"` + Percolate *ClusterStatsIndicesPercolate `json:"percolate"` +} + +type ClusterStatsIndicesShards struct { + Total int `json:"total"` + Primaries int `json:"primaries"` + Replication float64 `json:"replication"` + Index *ClusterStatsIndicesShardsIndex `json:"index"` +} + +type ClusterStatsIndicesShardsIndex struct { + Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` + Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` + Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` +} + +type ClusterStatsIndicesShardsIndexIntMinMax struct { + Min int `json:"min"` + Max int `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesShardsIndexFloat64MinMax struct { + Min float64 `json:"min"` + Max float64 `json:"max"` + Avg float64 `json:"avg"` +} + +type ClusterStatsIndicesDocs struct { + Count int `json:"count"` + Deleted int `json:"deleted"` +} + +type ClusterStatsIndicesStore struct { + Size string `json:"size"` // e.g. "5.3gb" + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` // e.g. "0s" + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type ClusterStatsIndicesFieldData struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesFilterCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` +} + +type ClusterStatsIndicesIdCache struct { + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` +} + +type ClusterStatsIndicesCompletion struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` // e.g. "61.3kb" + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type ClusterStatsIndicesSegments struct { + Count int64 `json:"count"` + Memory string `json:"memory"` // e.g. "61.3kb" + MemoryInBytes int64 `json:"memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` // e.g. "61.3kb" + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" + FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type ClusterStatsIndicesPercolate struct { + Total int64 `json:"total"` + // TODO(oe) The JSON tag here is wrong as of ES 1.5.2 it seems + Time string `json:"get_time"` // e.g. "1s" + TimeInBytes int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` // e.g. "61.3kb" + MemorySizeInBytes int64 `json:"memory_sitze_in_bytes"` + Queries int64 `json:"queries"` +} + +// --- + +type ClusterStatsNodes struct { + Count *ClusterStatsNodesCount `json:"count"` + Versions []string `json:"versions"` + OS *ClusterStatsNodesOsStats `json:"os"` + Process *ClusterStatsNodesProcessStats `json:"process"` + JVM *ClusterStatsNodesJvmStats `json:"jvm"` + FS *ClusterStatsNodesFsStats `json:"fs"` + Plugins []*ClusterStatsNodesPlugin `json:"plugins"` +} + +type ClusterStatsNodesCount struct { + Total int `json:"total"` + MasterOnly int `json:"master_only"` + DataOnly int `json:"data_only"` + MasterData int `json:"master_data"` + Client int `json:"client"` +} + +type ClusterStatsNodesOsStats struct { + AvailableProcessors int `json:"available_processors"` + Mem *ClusterStatsNodesOsStatsMem `json:"mem"` + CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` +} + +type ClusterStatsNodesOsStatsMem struct { + Total string `json:"total"` // e.g. "16gb" + TotalInBytes int64 `json:"total_in_bytes"` +} + +type ClusterStatsNodesOsStatsCPU struct { + Vendor string `json:"vendor"` + Model string `json:"model"` + MHz int `json:"mhz"` + TotalCores int `json:"total_cores"` + TotalSockets int `json:"total_sockets"` + CoresPerSocket int `json:"cores_per_socket"` + CacheSize string `json:"cache_size"` // e.g. "256b" + CacheSizeInBytes int64 `json:"cache_size_in_bytes"` + Count int `json:"count"` +} + +type ClusterStatsNodesProcessStats struct { + CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` + OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` +} + +type ClusterStatsNodesProcessStatsCPU struct { + Percent float64 `json:"percent"` +} + +type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { + Min int64 `json:"min"` + Max int64 `json:"max"` + Avg int64 `json:"avg"` +} + +type ClusterStatsNodesJvmStats struct { + MaxUptime string `json:"max_uptime"` // e.g. "5h" + MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` + Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` + Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` + Threads int64 `json:"threads"` +} + +type ClusterStatsNodesJvmStatsVersion struct { + Version string `json:"version"` // e.g. "1.8.0_45" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.45-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + Count int `json:"count"` +} + +type ClusterStatsNodesJvmStatsMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` +} + +type ClusterStatsNodesFsStats struct { + Path string `json:"path"` + Mount string `json:"mount"` + Dev string `json:"dev"` + Total string `json:"total"` // e.g. "930.7gb"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` // e.g. "930.7gb"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` // e.g. "930.7gb"` + AvailableInBytes int64 `json:"available_in_bytes"` + DiskReads int64 `json:"disk_reads"` + DiskWrites int64 `json:"disk_writes"` + DiskIOOp int64 `json:"disk_io_op"` + DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` + DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` + DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` + DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` + DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` + DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` + DiskQueue string `json:"disk_queue"` + DiskServiceTime string `json:"disk_service_time"` +} + +type ClusterStatsNodesPlugin struct { + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + URL string `json:"url"` + JVM bool `json:"jvm"` + Site bool `json:"site"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/connection.go b/vendor/gopkg.in/olivere/elastic.v5/connection.go new file mode 100644 index 0000000000000000000000000000000000000000..0f27a8756b2c1ce5922bcb2efc6b97e81e11a171 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/connection.go @@ -0,0 +1,90 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "sync" + "time" +) + +// conn represents a single connection to a node in a cluster. +type conn struct { + sync.RWMutex + nodeID string // node ID + url string + failures int + dead bool + deadSince *time.Time +} + +// newConn creates a new connection to the given URL. +func newConn(nodeID, url string) *conn { + c := &conn{ + nodeID: nodeID, + url: url, + } + return c +} + +// String returns a representation of the connection status. +func (c *conn) String() string { + c.RLock() + defer c.RUnlock() + return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) +} + +// NodeID returns the ID of the node of this connection. +func (c *conn) NodeID() string { + c.RLock() + defer c.RUnlock() + return c.nodeID +} + +// URL returns the URL of this connection. +func (c *conn) URL() string { + c.RLock() + defer c.RUnlock() + return c.url +} + +// IsDead returns true if this connection is marked as dead, i.e. a previous +// request to the URL has been unsuccessful. +func (c *conn) IsDead() bool { + c.RLock() + defer c.RUnlock() + return c.dead +} + +// MarkAsDead marks this connection as dead, increments the failures +// counter and stores the current time in dead since. +func (c *conn) MarkAsDead() { + c.Lock() + c.dead = true + if c.deadSince == nil { + utcNow := time.Now().UTC() + c.deadSince = &utcNow + } + c.failures += 1 + c.Unlock() +} + +// MarkAsAlive marks this connection as eligible to be returned from the +// pool of connections by the selector. +func (c *conn) MarkAsAlive() { + c.Lock() + c.dead = false + c.Unlock() +} + +// MarkAsHealthy marks this connection as healthy, i.e. a request has been +// successfully performed with it. +func (c *conn) MarkAsHealthy() { + c.Lock() + c.dead = false + c.deadSince = nil + c.failures = 0 + c.Unlock() +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/count.go b/vendor/gopkg.in/olivere/elastic.v5/count.go new file mode 100644 index 0000000000000000000000000000000000000000..459ea0bffaa04210f3dc18d215ecf1c458552331 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/count.go @@ -0,0 +1,311 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// CountService is a convenient service for determining the +// number of documents in an index. Use SearchService with +// a SearchType of count for counting with queries etc. +type CountService struct { + client *Client + pretty bool + index []string + typ []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + defaultOperator string + df string + expandWildcards string + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + minScore interface{} + preference string + q string + query Query + routing string + bodyJson interface{} + bodyString string +} + +// NewCountService creates a new CountService. +func NewCountService(client *Client) *CountService { + return &CountService{ + client: client, + } +} + +// Index sets the names of the indices to restrict the results. +func (s *CountService) Index(index ...string) *CountService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// Type sets the types to use to restrict the results. +func (s *CountService) Type(typ ...string) *CountService { + if s.typ == nil { + s.typ = make([]string, 0) + } + s.typ = append(s.typ, typ...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes "_all" string +// or when no indices have been specified). +func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *CountService) Analyzer(analyzer string) *CountService { + s.analyzer = analyzer + return s +} + +// DefaultOperator specifies the default operator for query string query (AND or OR). +func (s *CountService) DefaultOperator(defaultOperator string) *CountService { + s.defaultOperator = defaultOperator + return s +} + +// Df specifies the field to use as default where no field prefix is given +// in the query string. +func (s *CountService) Df(df string) *CountService { + s.df = df + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures (such as +// providing text to a numeric field) should be ignored. +func (s *CountService) Lenient(lenient bool) *CountService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// MinScore indicates to include only documents with a specific `_score` +// value in the result. +func (s *CountService) MinScore(minScore interface{}) *CountService { + s.minScore = minScore + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *CountService) Preference(preference string) *CountService { + s.preference = preference + return s +} + +// Q in the Lucene query string syntax. You can also use Query to pass +// a Query struct. +func (s *CountService) Q(q string) *CountService { + s.q = q + return s +} + +// Query specifies the query to pass. You can also pass a query string with Q. +func (s *CountService) Query(query Query) *CountService { + s.query = query + return s +} + +// Routing specifies the routing value. +func (s *CountService) Routing(routing string) *CountService { + s.routing = routing + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *CountService) Pretty(pretty bool) *CountService { + s.pretty = pretty + return s +} + +// BodyJson specifies the query to restrict the results specified with the +// Query DSL (optional). The interface{} will be serialized to a JSON document, +// so use a map[string]interface{}. +func (s *CountService) BodyJson(body interface{}) *CountService { + s.bodyJson = body + return s +} + +// Body specifies a query to restrict the results specified with +// the Query DSL (optional). +func (s *CountService) BodyString(body string) *CountService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *CountService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_count", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_all/_count" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.minScore != nil { + params.Set("min_score", fmt.Sprintf("%v", s.minScore)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *CountService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *CountService) Do(ctx context.Context) (int64, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return 0, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return 0, err + } + + // Setup HTTP request body + var body interface{} + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return 0, err + } + query := make(map[string]interface{}) + query["query"] = src + body = query + } else if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return 0, err + } + + // Return result + ret := new(CountResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return 0, err + } + if ret != nil { + return ret.Count, nil + } + + return int64(0), nil +} + +// CountResponse is the response of using the Count API. +type CountResponse struct { + Count int64 `json:"count"` + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/decoder.go b/vendor/gopkg.in/olivere/elastic.v5/decoder.go new file mode 100644 index 0000000000000000000000000000000000000000..9cd2cf720db7a2c7e2c1009806337233f22f5a0c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/decoder.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" +) + +// Decoder is used to decode responses from Elasticsearch. +// Users of elastic can implement their own marshaler for advanced purposes +// and set them per Client (see SetDecoder). If none is specified, +// DefaultDecoder is used. +type Decoder interface { + Decode(data []byte, v interface{}) error +} + +// DefaultDecoder uses json.Unmarshal from the Go standard library +// to decode JSON data. +type DefaultDecoder struct{} + +// Decode decodes with json.Unmarshal from the Go standard library. +func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete.go b/vendor/gopkg.in/olivere/elastic.v5/delete.go new file mode 100644 index 0000000000000000000000000000000000000000..01ca5e289ae8b88ca783adb7b44ad950ca4b8fdb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete.go @@ -0,0 +1,209 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteService allows to delete a typed JSON document from a specified +// index based on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete.html +// for details. +type DeleteService struct { + client *Client + pretty bool + id string + index string + typ string + routing string + timeout string + version interface{} + versionType string + waitForActiveShards string + parent string + refresh string +} + +// NewDeleteService creates a new DeleteService. +func NewDeleteService(client *Client) *DeleteService { + return &DeleteService{ + client: client, + } +} + +// Type is the type of the document. +func (s *DeleteService) Type(typ string) *DeleteService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *DeleteService) Id(id string) *DeleteService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *DeleteService) Index(index string) *DeleteService { + s.index = index + return s +} + +// Routing is a specific routing value. +func (s *DeleteService) Routing(routing string) *DeleteService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *DeleteService) Timeout(timeout string) *DeleteService { + s.timeout = timeout + return s +} + +// Version is an explicit version number for concurrency control. +func (s *DeleteService) Version(version interface{}) *DeleteService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *DeleteService) VersionType(versionType string) *DeleteService { + s.versionType = versionType + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the delete operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Parent is the ID of parent document. +func (s *DeleteService) Parent(parent string) *DeleteService { + s.parent = parent + return s +} + +// Refresh the index after performing the operation. +func (s *DeleteService) Refresh(refresh string) *DeleteService { + s.refresh = refresh + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *DeleteService) Pretty(pretty bool) *DeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "index": s.index, + "type": s.typ, + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(DeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete request. + +// DeleteResponse is the outcome of running DeleteService.Do. +type DeleteResponse struct { + // TODO _shards { total, failed, successful } + Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int64 `json:"_version"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go new file mode 100644 index 0000000000000000000000000000000000000000..25ae53ba1149b36fbaf5b53cfa8f38ded78acf5e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_by_query.go @@ -0,0 +1,649 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteByQueryService deletes documents that match a query. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-delete-by-query.html. +type DeleteByQueryService struct { + client *Client + index []string + typ []string + query Query + body interface{} + xSource []string + xSourceExclude []string + xSourceInclude []string + analyzer string + analyzeWildcard *bool + allowNoIndices *bool + conflicts string + defaultOperator string + df string + docvalueFields []string + expandWildcards string + explain *bool + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + preference string + q string + refresh string + requestCache *bool + requestsPerSecond *int + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + storedFields []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + waitForActiveShards string + waitForCompletion *bool + pretty bool +} + +// NewDeleteByQueryService creates a new DeleteByQueryService. +// You typically use the client's DeleteByQuery to get a reference to +// the service. +func NewDeleteByQueryService(client *Client) *DeleteByQueryService { + builder := &DeleteByQueryService{ + client: client, + } + return builder +} + +// Index sets the indices on which to perform the delete operation. +func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService { + s.index = append(s.index, index...) + return s +} + +// Type limits the delete operation to the given types. +func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// Analyzer to use for the query string. +func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { + s.analyzer = analyzer + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices (including the _all string +// or when no indices have been specified). +func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { + s.allowNoIndices = &allow + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService { + s.conflicts = "proceed" + return s +} + +// DefaultOperator for query string query (AND or OR). +func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { + s.defaultOperator = defaultOperator + return s +} + +// DF is the field to use as default where no field prefix is given in the query string. +func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DefaultField is the field to use as default where no field prefix is given in the query string. +// It is an alias to the DF func. +func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { + s.df = defaultField + return s +} + +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService { + s.docvalueFields = docvalueFields + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. It can be "open" or "closed". +func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { + s.expandWildcards = expand + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService { + s.explain = &explain + return s +} + +// From is the starting offset (default: 0). +func (s *DeleteByQueryService) From(from int) *DeleteByQueryService { + s.from = &from + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { + s.ignoreUnavailable = &ignore + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService { + s.preference = preference + return s +} + +// Q specifies the query in Lucene query string syntax. You can also use +// Query to programmatically specify the query. +func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { + s.q = query + return s +} + +// QueryString is an alias to Q. Notice that you can also use Query to +// programmatically set the query. +func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { + s.q = query + return s +} + +// Query sets the query programmatically. +func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { + s.query = query + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService { + s.refresh = refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService { + s.requestCache = &requestCache + return s +} + +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Routing is a list of specific routing values. +func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService { + s.scrollSize = &scrollSize + return s +} + +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService { + s.size = &size + return s +} + +// Sort is a list of <field>:<direction> pairs. +func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService { + s.storedFields = storedFields + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { + s.timeout = timeout + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService { + s.trackScores = &trackScores + return s +} + +// Version specifies whether to return document version as part of a hit. +func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService { + s.version = &version + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indents the JSON output from Elasticsearch. +func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { + s.pretty = pretty + return s +} + +// Body specifies the body of the request. It overrides data being specified via SearchService. +func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService { + s.body = body + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the delete-by-query operation. +func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Set body if there is a query set + var body interface{} + if s.body != nil { + body = s.body + } else if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + body = map[string]interface{}{ + "query": src, + } + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// BulkIndexByScrollResponse is the outcome of executing Do with +// DeleteByQueryService and UpdateByQueryService. +type BulkIndexByScrollResponse struct { + Took int64 `json:"took"` + TimedOut bool `json:"timed_out"` + Total int64 `json:"total"` + Updated int64 `json:"updated"` + Created int64 `json:"created"` + Deleted int64 `json:"deleted"` + Batches int64 `json:"batches"` + VersionConflicts int64 `json:"version_conflicts"` + Noops int64 `json:"noops"` + Retries struct { + Bulk int64 `json:"bulk"` + Search int64 `json:"search"` + } `json:"retries"` + Throttled string `json:"throttled"` + ThrottledMillis int64 `json:"throttled_millis"` + RequestsPerSecond float64 `json:"requests_per_second"` + Canceled string `json:"canceled"` + ThrottledUntil string `json:"throttled_until"` + ThrottledUntilMillis int64 `json:"throttled_until_millis"` + Failures []bulkIndexByScrollResponseFailure `json:"failures"` +} + +type bulkIndexByScrollResponseFailure struct { + Index string `json:"index,omitempty"` + Type string `json:"type,omitempty"` + Id string `json:"id,omitempty"` + Status int `json:"status,omitempty"` + Shard int `json:"shard,omitempty"` + Node int `json:"node,omitempty"` + // TOOD "cause" contains exception details + // TOOD "reason" contains exception details +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/delete_template.go new file mode 100644 index 0000000000000000000000000000000000000000..87c2312bfb09bfa1504f90eded2d528733693aa6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/delete_template.go @@ -0,0 +1,110 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// DeleteTemplateService deletes a search template. More information can +// be found at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html. +type DeleteTemplateService struct { + client *Client + pretty bool + id string + version *int + versionType string +} + +// NewDeleteTemplateService creates a new DeleteTemplateService. +func NewDeleteTemplateService(client *Client) *DeleteTemplateService { + return &DeleteTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *DeleteTemplateService) Id(id string) *DeleteTemplateService { + s.id = id + return s +} + +// Version an explicit version number for concurrency control. +func (s *DeleteTemplateService) Version(version int) *DeleteTemplateService { + s.version = &version + return s +} + +// VersionType specifies a version type. +func (s *DeleteTemplateService) VersionType(versionType string) *DeleteTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *DeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *DeleteTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *DeleteTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(AcknowledgedResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/doc.go b/vendor/gopkg.in/olivere/elastic.v5/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..ea16d66982ba541d122f426371422681faae0305 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/doc.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +/* +Package elastic provides an interface to the Elasticsearch server +(https://www.elastic.co/products/elasticsearch). + +The first thing you do is to create a Client. If you have Elasticsearch +installed and running with its default settings +(i.e. available at http://127.0.0.1:9200), all you need to do is: + + client, err := elastic.NewClient() + if err != nil { + // Handle error + } + +If your Elasticsearch server is running on a different IP and/or port, +just provide a URL to NewClient: + + // Create a client and connect to http://192.168.2.10:9201 + client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) + if err != nil { + // Handle error + } + +You can pass many more configuration parameters to NewClient. Review the +documentation of NewClient for more information. + +If no Elasticsearch server is available, services will fail when creating +a new request and will return ErrNoClient. + +A Client provides services. The services usually come with a variety of +methods to prepare the query and a Do function to execute it against the +Elasticsearch REST interface and return a response. Here is an example +of the IndexExists service that checks if a given index already exists. + + exists, err := client.IndexExists("twitter").Do(context.Background()) + if err != nil { + // Handle error + } + if !exists { + // Index does not exist yet. + } + +Look up the documentation for Client to get an idea of the services provided +and what kinds of responses you get when executing the Do function of a service. +Also see the wiki on Github for more details. + +*/ +package elastic diff --git a/vendor/gopkg.in/olivere/elastic.v5/errors.go b/vendor/gopkg.in/olivere/elastic.v5/errors.go new file mode 100644 index 0000000000000000000000000000000000000000..009123531905d26b22d778b6bf4613e84a914132 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/errors.go @@ -0,0 +1,141 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +// checkResponse will return an error if the request/response indicates +// an error returned from Elasticsearch. +// +// HTTP status codes between in the range [200..299] are considered successful. +// All other errors are considered errors except they are specified in +// ignoreErrors. This is necessary because for some services, HTTP status 404 +// is a valid response from Elasticsearch (e.g. the Exists service). +// +// The func tries to parse error details as returned from Elasticsearch +// and encapsulates them in type elastic.Error. +func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { + // 200-299 are valid status codes + if res.StatusCode >= 200 && res.StatusCode <= 299 { + return nil + } + // Ignore certain errors? + for _, code := range ignoreErrors { + if code == res.StatusCode { + return nil + } + } + return createResponseError(res) +} + +// createResponseError creates an Error structure from the HTTP response, +// its status code and the error information sent by Elasticsearch. +func createResponseError(res *http.Response) error { + if res.Body == nil { + return &Error{Status: res.StatusCode} + } + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return &Error{Status: res.StatusCode} + } + errReply := new(Error) + err = json.Unmarshal(data, errReply) + if err != nil { + return &Error{Status: res.StatusCode} + } + if errReply != nil { + if errReply.Status == 0 { + errReply.Status = res.StatusCode + } + return errReply + } + return &Error{Status: res.StatusCode} +} + +// Error encapsulates error details as returned from Elasticsearch. +type Error struct { + Status int `json:"status"` + Details *ErrorDetails `json:"error,omitempty"` +} + +// ErrorDetails encapsulate error details from Elasticsearch. +// It is used in e.g. elastic.Error and elastic.BulkResponseItem. +type ErrorDetails struct { + Type string `json:"type"` + Reason string `json:"reason"` + ResourceType string `json:"resource.type,omitempty"` + ResourceId string `json:"resource.id,omitempty"` + Index string `json:"index,omitempty"` + Phase string `json:"phase,omitempty"` + Grouped bool `json:"grouped,omitempty"` + CausedBy map[string]interface{} `json:"caused_by,omitempty"` + RootCause []*ErrorDetails `json:"root_cause,omitempty"` + FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` +} + +// Error returns a string representation of the error. +func (e *Error) Error() string { + if e.Details != nil && e.Details.Reason != "" { + return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) + } else { + return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) + } +} + +// IsNotFound returns true if the given error indicates that Elasticsearch +// returned HTTP status 404. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsNotFound(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusNotFound + case *Error: + return e.Status == http.StatusNotFound + case Error: + return e.Status == http.StatusNotFound + case int: + return e == http.StatusNotFound + } + return false +} + +// IsTimeout returns true if the given error indicates that Elasticsearch +// returned HTTP status 408. The err parameter can be of type *elastic.Error, +// elastic.Error, *http.Response or int (indicating the HTTP status code). +func IsTimeout(err interface{}) bool { + switch e := err.(type) { + case *http.Response: + return e.StatusCode == http.StatusRequestTimeout + case *Error: + return e.Status == http.StatusRequestTimeout + case Error: + return e.Status == http.StatusRequestTimeout + case int: + return e == http.StatusRequestTimeout + } + return false +} + +// -- General errors -- + +// shardsInfo represents information from a shard. +type shardsInfo struct { + Total int `json:"total"` + Successful int `json:"successful"` + Failed int `json:"failed"` +} + +// shardOperationFailure represents a shard failure. +type shardOperationFailure struct { + Shard int `json:"shard"` + Index string `json:"index"` + Status string `json:"status"` + // "reason" +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/exists.go b/vendor/gopkg.in/olivere/elastic.v5/exists.go new file mode 100644 index 0000000000000000000000000000000000000000..f9e9a1679e9b9369b709fd4c07aac77dd422de82 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/exists.go @@ -0,0 +1,177 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ExistsService checks for the existence of a document using HEAD. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html +// for details. +type ExistsService struct { + client *Client + pretty bool + id string + index string + typ string + preference string + realtime *bool + refresh string + routing string + parent string +} + +// NewExistsService creates a new ExistsService. +func NewExistsService(client *Client) *ExistsService { + return &ExistsService{ + client: client, + } +} + +// Id is the document ID. +func (s *ExistsService) Id(id string) *ExistsService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExistsService) Index(index string) *ExistsService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *ExistsService) Type(typ string) *ExistsService { + s.typ = typ + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExistsService) Preference(preference string) *ExistsService { + s.preference = preference + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *ExistsService) Realtime(realtime bool) *ExistsService { + s.realtime = &realtime + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *ExistsService) Refresh(refresh string) *ExistsService { + s.refresh = refresh + return s +} + +// Routing is a specific routing value. +func (s *ExistsService) Routing(routing string) *ExistsService { + s.routing = routing + return s +} + +// Parent is the ID of the parent document. +func (s *ExistsService) Parent(parent string) *ExistsService { + s.parent = parent + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExistsService) Pretty(pretty bool) *ExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *ExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExistsService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExistsService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/explain.go b/vendor/gopkg.in/olivere/elastic.v5/explain.go new file mode 100644 index 0000000000000000000000000000000000000000..72e60a9d7432fbd786efcc0195e7ddf340292efe --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/explain.go @@ -0,0 +1,322 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// ExplainService computes a score explanation for a query and +// a specific document. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-explain.html. +type ExplainService struct { + client *Client + pretty bool + id string + index string + typ string + q string + routing string + lenient *bool + analyzer string + df string + fields []string + lowercaseExpandedTerms *bool + xSourceInclude []string + analyzeWildcard *bool + parent string + preference string + xSource []string + defaultOperator string + xSourceExclude []string + source string + bodyJson interface{} + bodyString string +} + +// NewExplainService creates a new ExplainService. +func NewExplainService(client *Client) *ExplainService { + return &ExplainService{ + client: client, + xSource: make([]string, 0), + xSourceExclude: make([]string, 0), + fields: make([]string, 0), + xSourceInclude: make([]string, 0), + } +} + +// Id is the document ID. +func (s *ExplainService) Id(id string) *ExplainService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *ExplainService) Index(index string) *ExplainService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *ExplainService) Type(typ string) *ExplainService { + s.typ = typ + return s +} + +// Source is the URL-encoded query definition (instead of using the request body). +func (s *ExplainService) Source(source string) *ExplainService { + s.source = source + return s +} + +// XSourceExclude is a list of fields to exclude from the returned _source field. +func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *ExplainService) Lenient(lenient bool) *ExplainService { + s.lenient = &lenient + return s +} + +// Query in the Lucene query string syntax. +func (s *ExplainService) Q(q string) *ExplainService { + s.q = q + return s +} + +// Routing sets a specific routing value. +func (s *ExplainService) Routing(routing string) *ExplainService { + s.routing = routing + return s +} + +// AnalyzeWildcard specifies whether wildcards and prefix queries +// in the query string query should be analyzed (default: false). +func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer is the analyzer for the query string query. +func (s *ExplainService) Analyzer(analyzer string) *ExplainService { + s.analyzer = analyzer + return s +} + +// Df is the default field for query string query (default: _all). +func (s *ExplainService) Df(df string) *ExplainService { + s.df = df + return s +} + +// Fields is a list of fields to return in the response. +func (s *ExplainService) Fields(fields ...string) *ExplainService { + s.fields = append(s.fields, fields...) + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// XSourceInclude is a list of fields to extract and return from the _source field. +func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { + s.defaultOperator = defaultOperator + return s +} + +// Parent is the ID of the parent document. +func (s *ExplainService) Parent(parent string) *ExplainService { + s.parent = parent + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *ExplainService) Preference(preference string) *ExplainService { + s.preference = preference + return s +} + +// XSource is true or false to return the _source field or not, or a list of fields to return. +func (s *ExplainService) XSource(xSource ...string) *ExplainService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ExplainService) Pretty(pretty bool) *ExplainService { + s.pretty = pretty + return s +} + +// Query sets a query definition using the Query DSL. +func (s *ExplainService) Query(query Query) *ExplainService { + src, err := query.Source() + if err != nil { + // Do nothing in case of an error + return s + } + body := make(map[string]interface{}) + body["query"] = src + s.bodyJson = body + return s +} + +// BodyJson sets the query definition using the Query DSL. +func (s *ExplainService) BodyJson(body interface{}) *ExplainService { + s.bodyJson = body + return s +} + +// BodyString sets the query definition using the Query DSL as a string. +func (s *ExplainService) BodyString(body string) *ExplainService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ExplainService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.source != "" { + params.Set("source", s.source) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.df != "" { + params.Set("df", s.df) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ExplainService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(ExplainResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// ExplainResponse is the response of ExplainService.Do. +type ExplainResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Matched bool `json:"matched"` + Explanation map[string]interface{} `json:"explanation"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go new file mode 100644 index 0000000000000000000000000000000000000000..59a453c9e4cd631c8ac738e2e89827f655d293c9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/fetch_source_context.go @@ -0,0 +1,74 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/url" + "strings" +) + +type FetchSourceContext struct { + fetchSource bool + transformSource bool + includes []string + excludes []string +} + +func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { + return &FetchSourceContext{ + fetchSource: fetchSource, + includes: make([]string, 0), + excludes: make([]string, 0), + } +} + +func (fsc *FetchSourceContext) FetchSource() bool { + return fsc.fetchSource +} + +func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { + fsc.fetchSource = fetchSource +} + +func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { + fsc.includes = append(fsc.includes, includes...) + return fsc +} + +func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { + fsc.excludes = append(fsc.excludes, excludes...) + return fsc +} + +func (fsc *FetchSourceContext) TransformSource(transformSource bool) *FetchSourceContext { + fsc.transformSource = transformSource + return fsc +} + +func (fsc *FetchSourceContext) Source() (interface{}, error) { + if !fsc.fetchSource { + return false, nil + } + return map[string]interface{}{ + "includes": fsc.includes, + "excludes": fsc.excludes, + }, nil +} + +// Query returns the parameters in a form suitable for a URL query string. +func (fsc *FetchSourceContext) Query() url.Values { + params := url.Values{} + if !fsc.fetchSource { + params.Add("_source", "false") + return params + } + if len(fsc.includes) > 0 { + params.Add("_source_include", strings.Join(fsc.includes, ",")) + } + if len(fsc.excludes) > 0 { + params.Add("_source_exclude", strings.Join(fsc.excludes, ",")) + } + return params +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/field_stats.go b/vendor/gopkg.in/olivere/elastic.v5/field_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..303122edc62a4556680bb3cb22182f924c1d41bb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/field_stats.go @@ -0,0 +1,260 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +const ( + FieldStatsClusterLevel = "cluster" + FieldStatsIndicesLevel = "indices" +) + +// FieldStatsService allows finding statistical properties of a field without executing a search, +// but looking up measurements that are natively available in the Lucene index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-field-stats.html +// for details +type FieldStatsService struct { + client *Client + pretty bool + level string + index []string + allowNoIndices *bool + expandWildcards string + fields []string + ignoreUnavailable *bool + bodyJson interface{} + bodyString string +} + +// NewFieldStatsService creates a new FieldStatsService +func NewFieldStatsService(client *Client) *FieldStatsService { + return &FieldStatsService{ + client: client, + index: make([]string, 0), + fields: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *FieldStatsService) Index(index ...string) *FieldStatsService { + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *FieldStatsService) AllowNoIndices(allowNoIndices bool) *FieldStatsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *FieldStatsService) ExpandWildcards(expandWildcards string) *FieldStatsService { + s.expandWildcards = expandWildcards + return s +} + +// Fields is a list of fields for to get field statistics +// for (min value, max value, and more). +func (s *FieldStatsService) Fields(fields ...string) *FieldStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed). +func (s *FieldStatsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldStatsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Level sets if stats should be returned on a per index level or on a cluster wide level; +// should be one of 'cluster' or 'indices'; defaults to former +func (s *FieldStatsService) Level(level string) *FieldStatsService { + s.level = level + return s +} + +// ClusterLevel is a helper that sets Level to "cluster". +func (s *FieldStatsService) ClusterLevel() *FieldStatsService { + s.level = FieldStatsClusterLevel + return s +} + +// IndicesLevel is a helper that sets Level to "indices". +func (s *FieldStatsService) IndicesLevel() *FieldStatsService { + s.level = FieldStatsIndicesLevel + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *FieldStatsService) Pretty(pretty bool) *FieldStatsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyJson(body interface{}) *FieldStatsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. +func (s *FieldStatsService) BodyString(body string) *FieldStatsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *FieldStatsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_field_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_field_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.level != "" { + params.Set("level", s.level) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *FieldStatsService) Validate() error { + var invalid []string + if s.level != "" && (s.level != FieldStatsIndicesLevel && s.level != FieldStatsClusterLevel) { + invalid = append(invalid, "Level") + } + if len(invalid) != 0 { + return fmt.Errorf("missing or invalid required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *FieldStatsService) Do(ctx context.Context) (*FieldStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body, http.StatusNotFound) + if err != nil { + return nil, err + } + + // TODO(oe): Is 404 really a valid response here? + if res.StatusCode == http.StatusNotFound { + return &FieldStatsResponse{make(map[string]IndexFieldStats)}, nil + } + + // Return operation response + ret := new(FieldStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Request -- + +// FieldStatsRequest can be used to set up the body to be used in the +// Field Stats API. +type FieldStatsRequest struct { + Fields []string `json:"fields"` + IndexConstraints map[string]*FieldStatsConstraints `json:"index_constraints,omitempty"` +} + +// FieldStatsConstraints is a constraint on a field. +type FieldStatsConstraints struct { + Min *FieldStatsComparison `json:"min_value,omitempty"` + Max *FieldStatsComparison `json:"max_value,omitempty"` +} + +// FieldStatsComparison contain all comparison operations that can be used +// in FieldStatsConstraints. +type FieldStatsComparison struct { + Lte interface{} `json:"lte,omitempty"` + Lt interface{} `json:"lt,omitempty"` + Gte interface{} `json:"gte,omitempty"` + Gt interface{} `json:"gt,omitempty"` +} + +// -- Response -- + +// FieldStatsResponse is the response body content +type FieldStatsResponse struct { + Indices map[string]IndexFieldStats `json:"indices,omitempty"` +} + +// IndexFieldStats contains field stats for an index +type IndexFieldStats struct { + Fields map[string]FieldStats `json:"fields,omitempty"` +} + +// FieldStats contains stats of an individual field +type FieldStats struct { + Type string `json:"type"` + MaxDoc int64 `json:"max_doc"` + DocCount int64 `json:"doc_count"` + Density int64 `json:"density"` + SumDocFrequeny int64 `json:"sum_doc_freq"` + SumTotalTermFrequency int64 `json:"sum_total_term_freq"` + Searchable bool `json:"searchable"` + Aggregatable bool `json:"aggregatable"` + MinValue interface{} `json:"min_value"` + MinValueAsString string `json:"min_value_as_string"` + MaxValue interface{} `json:"max_value"` + MaxValueAsString string `json:"max_value_as_string"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/geo_point.go b/vendor/gopkg.in/olivere/elastic.v5/geo_point.go new file mode 100644 index 0000000000000000000000000000000000000000..fb243671d2c63242e87fcb9a8a680afbbd5c944d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/geo_point.go @@ -0,0 +1,48 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strconv" + "strings" +) + +// GeoPoint is a geographic position described via latitude and longitude. +type GeoPoint struct { + Lat float64 `json:"lat"` + Lon float64 `json:"lon"` +} + +// Source returns the object to be serialized in Elasticsearch DSL. +func (pt *GeoPoint) Source() map[string]float64 { + return map[string]float64{ + "lat": pt.Lat, + "lon": pt.Lon, + } +} + +// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. +func GeoPointFromLatLon(lat, lon float64) *GeoPoint { + return &GeoPoint{Lat: lat, Lon: lon} +} + +// GeoPointFromString initializes a new GeoPoint by a string that is +// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". +func GeoPointFromString(latLon string) (*GeoPoint, error) { + latlon := strings.SplitN(latLon, ",", 2) + if len(latlon) != 2 { + return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) + } + lat, err := strconv.ParseFloat(latlon[0], 64) + if err != nil { + return nil, err + } + lon, err := strconv.ParseFloat(latlon[1], 64) + if err != nil { + return nil, err + } + return &GeoPoint{Lat: lat, Lon: lon}, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get.go b/vendor/gopkg.in/olivere/elastic.v5/get.go new file mode 100644 index 0000000000000000000000000000000000000000..6d45aa575506338672c6394ff6f72215959d0db4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get.go @@ -0,0 +1,257 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// GetService allows to get a typed JSON document from the index based +// on its id. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-get.html +// for details. +type GetService struct { + client *Client + pretty bool + index string + typ string + id string + routing string + preference string + storedFields []string + refresh string + realtime *bool + fsc *FetchSourceContext + version interface{} + versionType string + parent string + ignoreErrorsOnGeneratedFields *bool +} + +// NewGetService creates a new GetService. +func NewGetService(client *Client) *GetService { + return &GetService{ + client: client, + typ: "_all", + } +} + +// Index is the name of the index. +func (s *GetService) Index(index string) *GetService { + s.index = index + return s +} + +// Type is the type of the document (use `_all` to fetch the first document +// matching the ID across all types). +func (s *GetService) Type(typ string) *GetService { + s.typ = typ + return s +} + +// Id is the document ID. +func (s *GetService) Id(id string) *GetService { + s.id = id + return s +} + +// Parent is the ID of the parent document. +func (s *GetService) Parent(parent string) *GetService { + s.parent = parent + return s +} + +// Routing is the specific routing value. +func (s *GetService) Routing(routing string) *GetService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). +func (s *GetService) Preference(preference string) *GetService { + s.preference = preference + return s +} + +// StoredFields is a list of fields to return in the response. +func (s *GetService) StoredFields(storedFields ...string) *GetService { + s.storedFields = append(s.storedFields, storedFields...) + return s +} + +func (s *GetService) FetchSource(fetchSource bool) *GetService { + if s.fsc == nil { + s.fsc = NewFetchSourceContext(fetchSource) + } else { + s.fsc.SetFetchSource(fetchSource) + } + return s +} + +func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { + s.fsc = fetchSourceContext + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *GetService) Refresh(refresh string) *GetService { + s.refresh = refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *GetService) Realtime(realtime bool) *GetService { + s.realtime = &realtime + return s +} + +// VersionType is the specific version type. +func (s *GetService) VersionType(versionType string) *GetService { + s.versionType = versionType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetService) Version(version interface{}) *GetService { + s.version = version + return s +} + +// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that +// are generated if the transaction log is accessed. +func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { + s.ignoreErrorsOnGeneratedFields = &ignore + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *GetService) Pretty(pretty bool) *GetService { + s.pretty = pretty + return s +} + +// Validate checks if the operation is valid. +func (s *GetService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// buildURL builds the URL for the operation. +func (s *GetService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.ignoreErrorsOnGeneratedFields != nil { + params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) + } + if s.fsc != nil { + for k, values := range s.fsc.Query() { + params.Add(k, strings.Join(values, ",")) + } + } + return path, params, nil +} + +// Do executes the operation. +func (s *GetService) Do(ctx context.Context) (*GetResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(GetResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a get request. + +// GetResult is the outcome of GetService.Do. +type GetResult struct { + Index string `json:"_index"` // index meta field + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // id meta field + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Source *json.RawMessage `json:"_source,omitempty"` + Found bool `json:"found,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + //Error string `json:"error,omitempty"` // used only in MultiGet + // TODO double-check that MultiGet now returns details error information + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/get_template.go b/vendor/gopkg.in/olivere/elastic.v5/get_template.go new file mode 100644 index 0000000000000000000000000000000000000000..96dfad2400e95673eaeb201ce8829c87f4ef4ef4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/get_template.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// GetTemplateService reads a search template. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html. +type GetTemplateService struct { + client *Client + pretty bool + id string + version interface{} + versionType string +} + +// NewGetTemplateService creates a new GetTemplateService. +func NewGetTemplateService(client *Client) *GetTemplateService { + return &GetTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *GetTemplateService) Id(id string) *GetTemplateService { + s.id = id + return s +} + +// Version is an explicit version number for concurrency control. +func (s *GetTemplateService) Version(version interface{}) *GetTemplateService { + s.version = version + return s +} + +// VersionType is a specific version type. +func (s *GetTemplateService) VersionType(versionType string) *GetTemplateService { + s.versionType = versionType + return s +} + +// buildURL builds the URL for the operation. +func (s *GetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *GetTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation and returns the template. +func (s *GetTemplateService) Do(ctx context.Context) (*GetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(GetTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type GetTemplateResponse struct { + Template string `json:"template"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/highlight.go b/vendor/gopkg.in/olivere/elastic.v5/highlight.go new file mode 100644 index 0000000000000000000000000000000000000000..d28f03c3abc0f17bfe2c2ef66298b58fb0259261 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/highlight.go @@ -0,0 +1,451 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Highlight allows highlighting search results on one or more fields. +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-highlighting.html +type Highlight struct { + fields []*HighlighterField + tagsSchema *string + highlightFilter *bool + fragmentSize *int + numOfFragments *int + preTags []string + postTags []string + order *string + encoder *string + requireFieldMatch *bool + boundaryMaxScan *int + boundaryChars *string + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + phraseLimit *int + options map[string]interface{} + forceSource *bool + useExplicitFieldOrder bool +} + +func NewHighlight() *Highlight { + hl := &Highlight{ + options: make(map[string]interface{}), + } + return hl +} + +func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { + hl.fields = append(hl.fields, fields...) + return hl +} + +func (hl *Highlight) Field(name string) *Highlight { + field := NewHighlighterField(name) + hl.fields = append(hl.fields, field) + return hl +} + +func (hl *Highlight) TagsSchema(schemaName string) *Highlight { + hl.tagsSchema = &schemaName + return hl +} + +func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { + hl.highlightFilter = &highlightFilter + return hl +} + +func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { + hl.fragmentSize = &fragmentSize + return hl +} + +func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { + hl.numOfFragments = &numOfFragments + return hl +} + +func (hl *Highlight) Encoder(encoder string) *Highlight { + hl.encoder = &encoder + return hl +} + +func (hl *Highlight) PreTags(preTags ...string) *Highlight { + hl.preTags = append(hl.preTags, preTags...) + return hl +} + +func (hl *Highlight) PostTags(postTags ...string) *Highlight { + hl.postTags = append(hl.postTags, postTags...) + return hl +} + +func (hl *Highlight) Order(order string) *Highlight { + hl.order = &order + return hl +} + +func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { + hl.requireFieldMatch = &requireFieldMatch + return hl +} + +func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { + hl.boundaryMaxScan = &boundaryMaxScan + return hl +} + +func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight { + hl.boundaryChars = &boundaryChars + return hl +} + +func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { + hl.highlighterType = &highlighterType + return hl +} + +func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { + hl.fragmenter = &fragmenter + return hl +} + +func (hl *Highlight) HighlighQuery(highlightQuery Query) *Highlight { + hl.highlightQuery = highlightQuery + return hl +} + +func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { + hl.noMatchSize = &noMatchSize + return hl +} + +func (hl *Highlight) Options(options map[string]interface{}) *Highlight { + hl.options = options + return hl +} + +func (hl *Highlight) ForceSource(forceSource bool) *Highlight { + hl.forceSource = &forceSource + return hl +} + +func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { + hl.useExplicitFieldOrder = useExplicitFieldOrder + return hl +} + +// Creates the query source for the bool query. +func (hl *Highlight) Source() (interface{}, error) { + // Returns the map inside of "highlight": + // "highlight":{ + // ... this ... + // } + source := make(map[string]interface{}) + if hl.tagsSchema != nil { + source["tags_schema"] = *hl.tagsSchema + } + if hl.preTags != nil && len(hl.preTags) > 0 { + source["pre_tags"] = hl.preTags + } + if hl.postTags != nil && len(hl.postTags) > 0 { + source["post_tags"] = hl.postTags + } + if hl.order != nil { + source["order"] = *hl.order + } + if hl.highlightFilter != nil { + source["highlight_filter"] = *hl.highlightFilter + } + if hl.fragmentSize != nil { + source["fragment_size"] = *hl.fragmentSize + } + if hl.numOfFragments != nil { + source["number_of_fragments"] = *hl.numOfFragments + } + if hl.encoder != nil { + source["encoder"] = *hl.encoder + } + if hl.requireFieldMatch != nil { + source["require_field_match"] = *hl.requireFieldMatch + } + if hl.boundaryMaxScan != nil { + source["boundary_max_scan"] = *hl.boundaryMaxScan + } + if hl.boundaryChars != nil { + source["boundary_chars"] = *hl.boundaryChars + } + if hl.highlighterType != nil { + source["type"] = *hl.highlighterType + } + if hl.fragmenter != nil { + source["fragmenter"] = *hl.fragmenter + } + if hl.highlightQuery != nil { + src, err := hl.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if hl.noMatchSize != nil { + source["no_match_size"] = *hl.noMatchSize + } + if hl.phraseLimit != nil { + source["phrase_limit"] = *hl.phraseLimit + } + if hl.options != nil && len(hl.options) > 0 { + source["options"] = hl.options + } + if hl.forceSource != nil { + source["force_source"] = *hl.forceSource + } + + if hl.fields != nil && len(hl.fields) > 0 { + if hl.useExplicitFieldOrder { + // Use a slice for the fields + var fields []map[string]interface{} + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fmap := make(map[string]interface{}) + fmap[field.Name] = src + fields = append(fields, fmap) + } + source["fields"] = fields + } else { + // Use a map for the fields + fields := make(map[string]interface{}, 0) + for _, field := range hl.fields { + src, err := field.Source() + if err != nil { + return nil, err + } + fields[field.Name] = src + } + source["fields"] = fields + } + } + + return source, nil +} + +// HighlighterField specifies a highlighted field. +type HighlighterField struct { + Name string + + preTags []string + postTags []string + fragmentSize int + fragmentOffset int + numOfFragments int + highlightFilter *bool + order *string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType *string + fragmenter *string + highlightQuery Query + noMatchSize *int + matchedFields []string + phraseLimit *int + options map[string]interface{} + forceSource *bool + + /* + Name string + preTags []string + postTags []string + fragmentSize int + numOfFragments int + fragmentOffset int + highlightFilter *bool + order string + requireFieldMatch *bool + boundaryMaxScan int + boundaryChars []rune + highlighterType string + fragmenter string + highlightQuery Query + noMatchSize *int + matchedFields []string + options map[string]interface{} + forceSource *bool + */ +} + +func NewHighlighterField(name string) *HighlighterField { + return &HighlighterField{ + Name: name, + preTags: make([]string, 0), + postTags: make([]string, 0), + fragmentSize: -1, + fragmentOffset: -1, + numOfFragments: -1, + boundaryMaxScan: -1, + boundaryChars: make([]rune, 0), + matchedFields: make([]string, 0), + options: make(map[string]interface{}), + } +} + +func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { + f.preTags = append(f.preTags, preTags...) + return f +} + +func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { + f.postTags = append(f.postTags, postTags...) + return f +} + +func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { + f.fragmentSize = fragmentSize + return f +} + +func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { + f.fragmentOffset = fragmentOffset + return f +} + +func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { + f.numOfFragments = numOfFragments + return f +} + +func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { + f.highlightFilter = &highlightFilter + return f +} + +func (f *HighlighterField) Order(order string) *HighlighterField { + f.order = &order + return f +} + +func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { + f.requireFieldMatch = &requireFieldMatch + return f +} + +func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { + f.boundaryMaxScan = boundaryMaxScan + return f +} + +func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { + f.boundaryChars = append(f.boundaryChars, boundaryChars...) + return f +} + +func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { + f.highlighterType = &highlighterType + return f +} + +func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { + f.fragmenter = &fragmenter + return f +} + +func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { + f.highlightQuery = highlightQuery + return f +} + +func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { + f.noMatchSize = &noMatchSize + return f +} + +func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { + f.options = options + return f +} + +func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { + f.matchedFields = append(f.matchedFields, matchedFields...) + return f +} + +func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { + f.phraseLimit = &phraseLimit + return f +} + +func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { + f.forceSource = &forceSource + return f +} + +func (f *HighlighterField) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if f.preTags != nil && len(f.preTags) > 0 { + source["pre_tags"] = f.preTags + } + if f.postTags != nil && len(f.postTags) > 0 { + source["post_tags"] = f.postTags + } + if f.fragmentSize != -1 { + source["fragment_size"] = f.fragmentSize + } + if f.numOfFragments != -1 { + source["number_of_fragments"] = f.numOfFragments + } + if f.fragmentOffset != -1 { + source["fragment_offset"] = f.fragmentOffset + } + if f.highlightFilter != nil { + source["highlight_filter"] = *f.highlightFilter + } + if f.order != nil { + source["order"] = *f.order + } + if f.requireFieldMatch != nil { + source["require_field_match"] = *f.requireFieldMatch + } + if f.boundaryMaxScan != -1 { + source["boundary_max_scan"] = f.boundaryMaxScan + } + if f.boundaryChars != nil && len(f.boundaryChars) > 0 { + source["boundary_chars"] = f.boundaryChars + } + if f.highlighterType != nil { + source["type"] = *f.highlighterType + } + if f.fragmenter != nil { + source["fragmenter"] = *f.fragmenter + } + if f.highlightQuery != nil { + src, err := f.highlightQuery.Source() + if err != nil { + return nil, err + } + source["highlight_query"] = src + } + if f.noMatchSize != nil { + source["no_match_size"] = *f.noMatchSize + } + if f.matchedFields != nil && len(f.matchedFields) > 0 { + source["matched_fields"] = f.matchedFields + } + if f.phraseLimit != nil { + source["phrase_limit"] = *f.phraseLimit + } + if f.options != nil && len(f.options) > 0 { + source["options"] = f.options + } + if f.forceSource != nil { + source["force_source"] = *f.forceSource + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/index.go b/vendor/gopkg.in/olivere/elastic.v5/index.go new file mode 100644 index 0000000000000000000000000000000000000000..86663c5b13624fbf2b2f2f25779e24dae8ed0676 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/index.go @@ -0,0 +1,289 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndexService adds or updates a typed JSON document in a specified index, +// making it searchable. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html +// for details. +type IndexService struct { + client *Client + pretty bool + id string + index string + typ string + parent string + routing string + timeout string + timestamp string + ttl string + version interface{} + opType string + versionType string + refresh string + waitForActiveShards string + pipeline string + bodyJson interface{} + bodyString string +} + +// NewIndexService creates a new IndexService. +func NewIndexService(client *Client) *IndexService { + return &IndexService{ + client: client, + } +} + +// Id is the document ID. +func (s *IndexService) Id(id string) *IndexService { + s.id = id + return s +} + +// Index is the name of the index. +func (s *IndexService) Index(index string) *IndexService { + s.index = index + return s +} + +// Type is the type of the document. +func (s *IndexService) Type(typ string) *IndexService { + s.typ = typ + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active +// before proceeding with the index operation. Defaults to 1, meaning the +// primary shard only. Set to `all` for all shard copies, otherwise set to +// any non-negative value less than or equal to the total number of copies +// for the shard (number of replicas + 1). +func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pipeline specifies the pipeline id to preprocess incoming documents with. +func (s *IndexService) Pipeline(pipeline string) *IndexService { + s.pipeline = pipeline + return s +} + +// Refresh the index after performing the operation. +func (s *IndexService) Refresh(refresh string) *IndexService { + s.refresh = refresh + return s +} + +// Ttl is an expiration time for the document. +func (s *IndexService) Ttl(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// TTL is an expiration time for the document (alias for Ttl). +func (s *IndexService) TTL(ttl string) *IndexService { + s.ttl = ttl + return s +} + +// Version is an explicit version number for concurrency control. +func (s *IndexService) Version(version interface{}) *IndexService { + s.version = version + return s +} + +// OpType is an explicit operation type, i.e. "create" or "index" (default). +func (s *IndexService) OpType(opType string) *IndexService { + s.opType = opType + return s +} + +// Parent is the ID of the parent document. +func (s *IndexService) Parent(parent string) *IndexService { + s.parent = parent + return s +} + +// Routing is a specific routing value. +func (s *IndexService) Routing(routing string) *IndexService { + s.routing = routing + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndexService) Timeout(timeout string) *IndexService { + s.timeout = timeout + return s +} + +// Timestamp is an explicit timestamp for the document. +func (s *IndexService) Timestamp(timestamp string) *IndexService { + s.timestamp = timestamp + return s +} + +// VersionType is a specific version type. +func (s *IndexService) VersionType(versionType string) *IndexService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndexService) Pretty(pretty bool) *IndexService { + s.pretty = pretty + return s +} + +// BodyJson is the document as a serializable JSON interface. +func (s *IndexService) BodyJson(body interface{}) *IndexService { + s.bodyJson = body + return s +} + +// BodyString is the document encoded as a string. +func (s *IndexService) BodyString(body string) *IndexService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndexService) buildURL() (string, string, url.Values, error) { + var err error + var method, path string + + if s.id != "" { + // Create document with manual id + method = "PUT" + path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ + "id": s.id, + "index": s.index, + "type": s.typ, + }) + } else { + // Automatic ID generation + // See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#index-creation + method = "POST" + path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } + if err != nil { + return "", "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.timestamp != "" { + params.Set("timestamp", s.timestamp) + } + if s.ttl != "" { + params.Set("ttl", s.ttl) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return method, path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndexService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + method, path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, method, path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndexResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndexResponse is the result of indexing a document in Elasticsearch. +type IndexResponse struct { + // TODO _shards { total, failed, successful } + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go b/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go new file mode 100644 index 0000000000000000000000000000000000000000..8caea77186115df8635a3ce89ee4f804278c42c3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_analyze.go @@ -0,0 +1,280 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesAnalyzeService performs the analysis process on a text and returns +// the tokens breakdown of the text. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html +// for detail. +type IndicesAnalyzeService struct { + client *Client + pretty bool + index string + request *IndicesAnalyzeRequest + format string + preferLocal *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesAnalyzeService creates a new IndicesAnalyzeService. +func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService { + return &IndicesAnalyzeService{ + client: client, + request: new(IndicesAnalyzeRequest), + } +} + +// Index is the name of the index to scope the operation. +func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService { + s.index = index + return s +} + +// Format of the output. +func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService { + s.format = format + return s +} + +// PreferLocal, when true, specifies that a local shard should be used +// if available. When false, a random shard is used (default: true). +func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService { + s.preferLocal = &preferLocal + return s +} + +// Request passes the analyze request to use. +func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService { + if request == nil { + s.request = new(IndicesAnalyzeRequest) + } else { + s.request = request + } + return s +} + +// Analyzer is the name of the analyzer to use. +func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService { + s.request.Analyzer = analyzer + return s +} + +// Attributes is a list of token attributes to output; this parameter works +// only with explain=true. +func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService { + s.request.Attributes = attributes + return s +} + +// CharFilter is a list of character filters to use for the analysis. +func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService { + s.request.CharFilter = charFilter + return s +} + +// Explain, when true, outputs more advanced details (default: false). +func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService { + s.request.Explain = explain + return s +} + +// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name). +func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService { + s.request.Field = field + return s +} + +// Filter is a list of filters to use for the analysis. +func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService { + s.request.Filter = filter + return s +} + +// Text is the text on which the analysis should be performed (when request body is not used). +func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService { + s.request.Text = text + return s +} + +// Tokenizer is the name of the tokenizer to use for the analysis. +func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService { + s.request.Tokenizer = tokenizer + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService { + s.pretty = pretty + return s +} + +// BodyJson is the text on which the analysis should be performed. +func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService { + s.bodyJson = body + return s +} + +// BodyString is the text on which the analysis should be performed. +func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if s.index == "" { + path = "/_analyze" + } else { + path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{ + "index": s.index, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.format != "" { + params.Set("format", s.format) + } + if s.preferLocal != nil { + params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal)) + } + + return path, params, nil +} + +// Do will execute the request with the given context. +func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + // Request parameters are deprecated in 5.1.1, and we must use a JSON + // structure in the body to pass the parameters. + // See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-analyze.html + body = s.request + } + + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesAnalyzeResponse) + if err = s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + + return ret, nil +} + +func (s *IndicesAnalyzeService) Validate() error { + var invalid []string + if s.bodyJson == nil && s.bodyString == "" { + if len(s.request.Text) == 0 { + invalid = append(invalid, "Text") + } + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// IndicesAnalyzeRequest specifies the parameters of the analyze request. +type IndicesAnalyzeRequest struct { + Text []string `json:"text,omitempty"` + Analyzer string `json:"analyzer,omitempty"` + Tokenizer string `json:"tokenizer,omitempty"` + Filter []string `json:"filter,omitempty"` + CharFilter []string `json:"char_filter,omitempty"` + Field string `json:"field,omitempty"` + Explain bool `json:"explain,omitempty"` + Attributes []string `json:"attributes,omitempty"` +} + +type IndicesAnalyzeResponse struct { + Tokens []IndicesAnalyzeResponseToken `json:"tokens"` // json part for normal message + Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request +} + +type IndicesAnalyzeResponseToken struct { + Token string `json:"token"` + StartOffset int `json:"start_offset"` + EndOffset int `json:"end_offset"` + Type string `json:"type"` + Position int `json:"position"` +} + +type IndicesAnalyzeResponseDetail struct { + CustomAnalyzer bool `json:"custom_analyzer"` + Charfilters []interface{} `json:"charfilters"` + Analyzer struct { + Name string `json:"name"` + Tokens []struct { + Token string `json:"token"` + StartOffset int `json:"start_offset"` + EndOffset int `json:"end_offset"` + Type string `json:"type"` + Position int `json:"position"` + Bytes string `json:"bytes"` + PositionLength int `json:"positionLength"` + } `json:"tokens"` + } `json:"analyzer"` + Tokenizer struct { + Name string `json:"name"` + Tokens []struct { + Token string `json:"token"` + StartOffset int `json:"start_offset"` + EndOffset int `json:"end_offset"` + Type string `json:"type"` + Position int `json:"position"` + } `json:"tokens"` + } `json:"tokenizer"` + Tokenfilters []struct { + Name string `json:"name"` + Tokens []struct { + Token string `json:"token"` + StartOffset int `json:"start_offset"` + EndOffset int `json:"end_offset"` + Type string `json:"type"` + Position int `json:"position"` + Keyword bool `json:"keyword"` + } `json:"tokens"` + } `json:"tokenfilters"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_close.go b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go new file mode 100644 index 0000000000000000000000000000000000000000..35399418bac0fe5876593273ff074f2771bc4916 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_close.go @@ -0,0 +1,154 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesCloseService closes an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html +// for details. +type IndicesCloseService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesCloseService creates and initializes a new IndicesCloseService. +func NewIndicesCloseService(client *Client) *IndicesCloseService { + return &IndicesCloseService{client: client} +} + +// Index is the name of the index to close. +func (s *IndicesCloseService) Index(index string) *IndicesCloseService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesCloseService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_close", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesCloseService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesCloseResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesCloseResponse is the response of IndicesCloseService.Do. +type IndicesCloseResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_create.go b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go new file mode 100644 index 0000000000000000000000000000000000000000..43fe776efa7e037e2305b7d9a91d7d0fa5a485d4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_create.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "errors" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesCreateService creates a new index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-create-index.html +// for details. +type IndicesCreateService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesCreateService returns a new IndicesCreateService. +func NewIndicesCreateService(client *Client) *IndicesCreateService { + return &IndicesCreateService{client: client} +} + +// Index is the name of the index to create. +func (b *IndicesCreateService) Index(index string) *IndicesCreateService { + b.index = index + return b +} + +// Timeout the explicit operation timeout, e.g. "5s". +func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { + s.masterTimeout = masterTimeout + return s +} + +// Body specifies the configuration of the index as a string. +// It is an alias for BodyString. +func (b *IndicesCreateService) Body(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyString specifies the configuration of the index as a string. +func (b *IndicesCreateService) BodyString(body string) *IndicesCreateService { + b.bodyString = body + return b +} + +// BodyJson specifies the configuration of the index. The interface{} will +// be serializes as a JSON document, so use a map[string]interface{}. +func (b *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { + b.bodyJson = body + return b +} + +// Pretty indicates that the JSON response be indented and human readable. +func (b *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { + b.pretty = pretty + return b +} + +// Do executes the operation. +func (b *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) { + if b.index == "" { + return nil, errors.New("missing index name") + } + + // Build url + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": b.index, + }) + if err != nil { + return nil, err + } + + params := make(url.Values) + if b.pretty { + params.Set("pretty", "1") + } + if b.masterTimeout != "" { + params.Set("master_timeout", b.masterTimeout) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + + // Setup HTTP request body + var body interface{} + if b.bodyJson != nil { + body = b.bodyJson + } else { + body = b.bodyString + } + + // Get response + res, err := b.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + ret := new(IndicesCreateResult) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a create index request. + +// IndicesCreateResult is the outcome of creating a new index. +type IndicesCreateResult struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go new file mode 100644 index 0000000000000000000000000000000000000000..c5d7d351c0e655457b98e4f1b27561b618bbf4ae --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesDeleteService allows to delete existing indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-delete-index.html +// for details. +type IndicesDeleteService struct { + client *Client + pretty bool + index []string + timeout string + masterTimeout string +} + +// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. +func NewIndicesDeleteService(client *Client) *IndicesDeleteService { + return &IndicesDeleteService{ + client: client, + index: make([]string, 0), + } +} + +// Index adds the list of indices to delete. +// Use `_all` or `*` string to delete all indices. +func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a delete index request. + +// IndicesDeleteResponse is the response of IndicesDeleteService.Do. +type IndicesDeleteResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go new file mode 100644 index 0000000000000000000000000000000000000000..7e5ceb546cf14a6d5f7a643f6416a2216714414a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_delete_template.go @@ -0,0 +1,123 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesDeleteTemplateService deletes index templates. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html. +type IndicesDeleteTemplateService struct { + client *Client + pretty bool + name string + timeout string + masterTimeout string +} + +// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. +func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { + return &IndicesDeleteTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesDeleteTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesDeleteTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. +type IndicesDeleteTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go new file mode 100644 index 0000000000000000000000000000000000000000..c1cb8e8466f192bb84d2117581d39efa4e4b7027 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists.go @@ -0,0 +1,151 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsService checks if an index or indices exist or not. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-exists.html +// for details. +type IndicesExistsService struct { + client *Client + pretty bool + index []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + local *bool +} + +// NewIndicesExistsService creates and initializes a new IndicesExistsService. +func NewIndicesExistsService(client *Client) *IndicesExistsService { + return &IndicesExistsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of one or more indices to check. +func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { + s.index = index + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or +// when no indices have been specified). +func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { + s.expandWildcards = expandWildcards + return s +} + +// Local, when set, returns local information and does not retrieve the state +// from master node (default: false). +func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(s.index, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go new file mode 100644 index 0000000000000000000000000000000000000000..97008a31dfd05fb2d0a16c9101cc4a818d022177 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_template.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsTemplateService checks if a given template exists. +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html#indices-templates-exists +// for documentation. +type IndicesExistsTemplateService struct { + client *Client + pretty bool + name string + local *bool +} + +// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. +func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { + return &IndicesExistsTemplateService{ + client: client, + } +} + +// Name is the name of the template. +func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { + s.name = name + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go new file mode 100644 index 0000000000000000000000000000000000000000..e8f8a7bbf67fa45ebb18aaf05dcf51c6a69dde88 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_exists_type.go @@ -0,0 +1,161 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesExistsTypeService checks if one or more types exist in one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-types-exists.html +// for details. +type IndicesExistsTypeService struct { + client *Client + pretty bool + typ []string + index []string + expandWildcards string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool +} + +// NewIndicesExistsTypeService creates a new IndicesExistsTypeService. +func NewIndicesExistsTypeService(client *Client) *IndicesExistsTypeService { + return &IndicesExistsTypeService{ + client: client, + } +} + +// Index is a list of index names; use `_all` to check the types across all indices. +func (s *IndicesExistsTypeService) Index(indices ...string) *IndicesExistsTypeService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types to check. +func (s *IndicesExistsTypeService) Type(types ...string) *IndicesExistsTypeService { + s.typ = append(s.typ, types...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesExistsTypeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsTypeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesExistsTypeService) AllowNoIndices(allowNoIndices bool) *IndicesExistsTypeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesExistsTypeService) ExpandWildcards(expandWildcards string) *IndicesExistsTypeService { + s.expandWildcards = expandWildcards + return s +} + +// Local specifies whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesExistsTypeService) Local(local bool) *IndicesExistsTypeService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesExistsTypeService) Pretty(pretty bool) *IndicesExistsTypeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesExistsTypeService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesExistsTypeService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(s.typ) == 0 { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesExistsTypeService) Do(ctx context.Context) (bool, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return false, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return false, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "HEAD", path, params, nil, 404) + if err != nil { + return false, err + } + + // Return operation response + switch res.StatusCode { + case http.StatusOK: + return true, nil + case http.StatusNotFound: + return false, nil + default: + return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go new file mode 100644 index 0000000000000000000000000000000000000000..8afad814a0c9a039464b1e3bc5ca2e628895b625 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_flush.go @@ -0,0 +1,170 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// Flush allows to flush one or more indices. The flush process of an index +// basically frees memory from the index by flushing data to the index +// storage and clearing the internal transaction log. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-flush.html +// for details. +type IndicesFlushService struct { + client *Client + pretty bool + index []string + force *bool + waitIfOngoing *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesFlushService creates a new IndicesFlushService. +func NewIndicesFlushService(client *Client) *IndicesFlushService { + return &IndicesFlushService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string for all indices. +func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { + s.index = append(s.index, indices...) + return s +} + +// Force indicates whether a flush should be forced even if it is not +// necessarily needed ie. if no changes will be committed to the index. +// This is useful if transaction log IDs should be incremented even if +// no uncommitted changes are present. (This setting can be considered as internal). +func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { + s.force = &force + return s +} + +// WaitIfOngoing, if set to true, indicates that the flush operation will +// block until the flush can be executed if another flush operation is +// already executing. The default is false and will cause an exception +// to be thrown on the shard level if another flush operation is already running.. +func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { + s.waitIfOngoing = &waitIfOngoing + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesFlushService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_flush" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.waitIfOngoing != nil { + params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesFlushService) Validate() error { + return nil +} + +// Do executes the service. +func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesFlushResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a flush request. + +type IndicesFlushResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go new file mode 100644 index 0000000000000000000000000000000000000000..46b180173ad0e058627334b72ddc75748525450b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_forcemerge.go @@ -0,0 +1,190 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesForcemergeService allows to force merging of one or more indices. +// The merge relates to the number of segments a Lucene index holds +// within each shard. The force merge operation allows to reduce the number +// of segments by merging them. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-forcemerge.html +// for more information. +type IndicesForcemergeService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flush *bool + ignoreUnavailable *bool + maxNumSegments interface{} + onlyExpungeDeletes *bool + operationThreading interface{} +} + +// NewIndicesForcemergeService creates a new IndicesForcemergeService. +func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { + return &IndicesForcemergeService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { + if s.index == nil { + s.index = make([]string, 0) + } + s.index = append(s.index, index...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { + s.expandWildcards = expandWildcards + return s +} + +// Flush specifies whether the index should be flushed after performing +// the operation (default: true). +func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { + s.flush = &flush + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MaxNumSegments specifies the number of segments the index should be +// merged into (default: dynamic). +func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { + s.maxNumSegments = maxNumSegments + return s +} + +// OnlyExpungeDeletes specifies whether the operation should only expunge +// deleted documents. +func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { + s.onlyExpungeDeletes = &onlyExpungeDeletes + return s +} + +func (s *IndicesForcemergeService) OperationThreading(operationThreading interface{}) *IndicesForcemergeService { + s.operationThreading = operationThreading + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_forcemerge" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flush != nil { + params.Set("flush", fmt.Sprintf("%v", *s.flush)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.maxNumSegments != nil { + params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) + } + if s.onlyExpungeDeletes != nil { + params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) + } + if s.operationThreading != nil { + params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesForcemergeService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesForcemergeResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. +type IndicesForcemergeResponse struct { + Shards shardsInfo `json:"_shards"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go new file mode 100644 index 0000000000000000000000000000000000000000..e576d5577e461333dcf5163260b2fa2596b1f94c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get.go @@ -0,0 +1,203 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetService retrieves information about one or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-index.html +// for more details. +type IndicesGetService struct { + client *Client + pretty bool + index []string + feature []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + human *bool +} + +// NewIndicesGetService creates a new IndicesGetService. +func NewIndicesGetService(client *Client) *IndicesGetService { + return &IndicesGetService{ + client: client, + index: make([]string, 0), + feature: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { + s.index = append(s.index, indices...) + return s +} + +// Feature is a list of features. +func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { + s.feature = append(s.feature, features...) + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetService) Local(local bool) *IndicesGetService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). +func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard expression +// resolves to no concrete indices (default: false). +func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether wildcard expressions should get +// expanded to open or closed indices (default: open). +func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { + s.expandWildcards = expandWildcards + return s +} + +/* Disabled because serialization would fail in that case. */ +/* +// FlatSettings make the service return settings in flat format (default: false). +func (s *IndicesGetService) FlatSettings(flatSettings bool) *IndicesGetService { + s.flatSettings = &flatSettings + return s +} +*/ + +// Human indicates whether to return version and creation date values +// in human-readable format (default: false). +func (s *IndicesGetService) Human(human bool) *IndicesGetService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.feature) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ + "index": strings.Join(index, ","), + "feature": strings.Join(s.feature, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetResponse is part of the response of IndicesGetService.Do. +type IndicesGetResponse struct { + Aliases map[string]interface{} `json:"aliases"` + Mappings map[string]interface{} `json:"mappings"` + Settings map[string]interface{} `json:"settings"` + Warmers map[string]interface{} `json:"warmers"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go new file mode 100644 index 0000000000000000000000000000000000000000..2f827f721ce632377a3249c4ab38f4e93621d7b9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_aliases.go @@ -0,0 +1,158 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// AliasesService returns the aliases associated with one or more indices. +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html. +type AliasesService struct { + client *Client + index []string + pretty bool +} + +// NewAliasesService instantiates a new AliasesService. +func NewAliasesService(client *Client) *AliasesService { + builder := &AliasesService{ + client: client, + } + return builder +} + +// Pretty asks Elasticsearch to indent the returned JSON. +func (s *AliasesService) Pretty(pretty bool) *AliasesService { + s.pretty = pretty + return s +} + +// Index adds one or more indices. +func (s *AliasesService) Index(index ...string) *AliasesService { + s.index = append(s.index, index...) + return s +} + +// buildURL builds the URL for the operation. +func (s *AliasesService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_aliases", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_aliases" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // { + // "indexName" : { + // "aliases" : { + // "alias1" : { }, + // "alias2" : { } + // } + // }, + // "indexName2" : { + // ... + // }, + // } + indexMap := make(map[string]interface{}) + if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil { + return nil, err + } + + // Each (indexName, _) + ret := &AliasesResult{ + Indices: make(map[string]indexResult), + } + for indexName, indexData := range indexMap { + indexOut, found := ret.Indices[indexName] + if !found { + indexOut = indexResult{Aliases: make([]aliasResult, 0)} + } + + // { "aliases" : { ... } } + indexDataMap, ok := indexData.(map[string]interface{}) + if ok { + aliasesData, ok := indexDataMap["aliases"].(map[string]interface{}) + if ok { + for aliasName, _ := range aliasesData { + aliasRes := aliasResult{AliasName: aliasName} + indexOut.Aliases = append(indexOut.Aliases, aliasRes) + } + } + } + + ret.Indices[indexName] = indexOut + } + + return ret, nil +} + +// -- Result of an alias request. + +type AliasesResult struct { + Indices map[string]indexResult +} + +type indexResult struct { + Aliases []aliasResult +} + +type aliasResult struct { + AliasName string +} + +func (ar AliasesResult) IndicesByAlias(aliasName string) []string { + var indices []string + for indexName, indexInfo := range ar.Indices { + for _, aliasInfo := range indexInfo.Aliases { + if aliasInfo.AliasName == aliasName { + indices = append(indices, indexName) + } + } + } + return indices +} + +func (ir indexResult) HasAlias(aliasName string) bool { + for _, alias := range ir.Aliases { + if alias.AliasName == aliasName { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go new file mode 100644 index 0000000000000000000000000000000000000000..d5a1d3d3fcecb570583d7d2dc5f5b121b86a9cac --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_mapping.go @@ -0,0 +1,171 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetMappingService retrieves the mapping definitions for an index or +// index/type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-mapping.html +// for details. +type IndicesGetMappingService struct { + client *Client + pretty bool + index []string + typ []string + local *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewGetMappingService is an alias for NewIndicesGetMappingService. +// Use NewIndicesGetMappingService. +func NewGetMappingService(client *Client) *IndicesGetMappingService { + return NewIndicesGetMappingService(client) +} + +// NewIndicesGetMappingService creates a new IndicesGetMappingService. +func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { + return &IndicesGetMappingService{ + client: client, + index: make([]string, 0), + typ: make([]string, 0), + } +} + +// Index is a list of index names. +func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types. +func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { + s.typ = append(s.typ, types...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { + s.expandWildcards = expandWildcards + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { + s.local = &local + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { + var index, typ []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.typ) > 0 { + typ = s.typ + } else { + typ = []string{"_all"} + } + + // Build URL + path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(index, ","), + "type": strings.Join(typ, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetMappingService) Validate() error { + return nil +} + +// Do executes the operation. It returns mapping definitions for an index +// or index/type. +func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]interface{} + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go new file mode 100644 index 0000000000000000000000000000000000000000..1d1733b4f8142e5a4e22c6f720d7f07ca06af821 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_settings.go @@ -0,0 +1,184 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetSettingsService allows to retrieve settings of one +// or more indices. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-get-settings.html +// for more details. +type IndicesGetSettingsService struct { + client *Client + pretty bool + index []string + name []string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + flatSettings *bool + local *bool +} + +// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. +func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { + return &IndicesGetSettingsService{ + client: client, + index: make([]string, 0), + name: make([]string, 0), + } +} + +// Index is a list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { + s.index = append(s.index, indices...) + return s +} + +// Name are the names of the settings that should be included. +func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { + s.name = append(s.name, name...) + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression +// to concrete indices that are open, closed or both. +// Options: open, closed, none, all. Default: open,closed. +func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, do not retrieve +// the state from master node (default: false). +func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { + var err error + var path string + var index []string + + if len(s.index) > 0 { + index = s.index + } else { + index = []string{"_all"} + } + + if len(s.name) > 0 { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ + "index": strings.Join(index, ","), + "name": strings.Join(s.name, ","), + }) + } else { + // Build URL + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetSettingsResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. +type IndicesGetSettingsResponse struct { + Settings map[string]interface{} `json:"settings"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go new file mode 100644 index 0000000000000000000000000000000000000000..5da39df0d2c42ce97f329dac4e7a44d57434c3e3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_get_template.go @@ -0,0 +1,129 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesGetTemplateService returns an index template. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html. +type IndicesGetTemplateService struct { + client *Client + pretty bool + name []string + flatSettings *bool + local *bool +} + +// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. +func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { + return &IndicesGetTemplateService{ + client: client, + name: make([]string, 0), + } +} + +// Name is the name of the index template. +func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { + s.name = append(s.name, name...) + return s +} + +// FlatSettings is returns settings in flat format (default: false). +func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Local indicates whether to return local information, i.e. do not retrieve +// the state from master node (default: false). +func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { + s.local = &local + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.name) > 0 { + path, err = uritemplates.Expand("/_template/{name}", map[string]string{ + "name": strings.Join(s.name, ","), + }) + } else { + path = "/_template" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.local != nil { + params.Set("local", fmt.Sprintf("%v", *s.local)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesGetTemplateService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret map[string]*IndicesGetTemplateResponse + if err := s.client.decoder.Decode(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. +type IndicesGetTemplateResponse struct { + Order int `json:"order,omitempty"` + Template string `json:"template,omitempty"` + Settings map[string]interface{} `json:"settings,omitempty"` + Mappings map[string]interface{} `json:"mappings,omitempty"` + Aliases map[string]interface{} `json:"aliases,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_open.go b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go new file mode 100644 index 0000000000000000000000000000000000000000..26fa477f17e71edd38e72165b22e74810158e6b4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_open.go @@ -0,0 +1,158 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesOpenService opens an index. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-open-close.html +// for details. +type IndicesOpenService struct { + client *Client + pretty bool + index string + timeout string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewIndicesOpenService creates and initializes a new IndicesOpenService. +func NewIndicesOpenService(client *Client) *IndicesOpenService { + return &IndicesOpenService{client: client} +} + +// Index is the name of the index to open. +func (s *IndicesOpenService) Index(index string) *IndicesOpenService { + s.index = index + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should +// be ignored when unavailable (missing or closed). +func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// (This includes `_all` string or when no indices have been specified). +func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both.. +func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { + s.expandWildcards = expandWildcards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesOpenService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{index}/_open", map[string]string{ + "index": s.index, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesOpenService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesOpenResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesOpenResponse is the response of IndicesOpenService.Do. +type IndicesOpenResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go new file mode 100644 index 0000000000000000000000000000000000000000..4b3047c6fbdd1f6c8b23eddc4cfeae9704a360e3 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_alias.go @@ -0,0 +1,296 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// -- Actions -- + +// AliasAction is an action to apply to an alias, e.g. "add" or "remove". +type AliasAction interface { + Source() (interface{}, error) +} + +// AliasAddAction is an action to add to an alias. +type AliasAddAction struct { + index []string // index name(s) + alias string // alias name + filter Query + routing string + searchRouting string + indexRouting string +} + +// NewAliasAddAction returns an action to add an alias. +func NewAliasAddAction(alias string) *AliasAddAction { + return &AliasAddAction{ + alias: alias, + } +} + +// Index associates one or more indices to the alias. +func (a *AliasAddAction) Index(index ...string) *AliasAddAction { + a.index = append(a.index, index...) + return a +} + +func (a *AliasAddAction) removeBlankIndexNames() { + var indices []string + for _, index := range a.index { + if len(index) > 0 { + indices = append(indices, index) + } + } + a.index = indices +} + +// Filter associates a filter to the alias. +func (a *AliasAddAction) Filter(filter Query) *AliasAddAction { + a.filter = filter + return a +} + +// Routing associates a routing value to the alias. +// This basically sets index and search routing to the same value. +func (a *AliasAddAction) Routing(routing string) *AliasAddAction { + a.routing = routing + return a +} + +// IndexRouting associates an index routing value to the alias. +func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction { + a.indexRouting = routing + return a +} + +// SearchRouting associates a search routing value to the alias. +func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction { + a.searchRouting = strings.Join(routing, ",") + return a +} + +// Validate checks if the operation is valid. +func (a *AliasAddAction) Validate() error { + var invalid []string + if len(a.alias) == 0 { + invalid = append(invalid, "Alias") + } + if len(a.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Source returns the JSON-serializable data. +func (a *AliasAddAction) Source() (interface{}, error) { + a.removeBlankIndexNames() + if err := a.Validate(); err != nil { + return nil, err + } + src := make(map[string]interface{}) + act := make(map[string]interface{}) + src["add"] = act + act["alias"] = a.alias + switch len(a.index) { + case 1: + act["index"] = a.index[0] + default: + act["indices"] = a.index + } + if a.filter != nil { + f, err := a.filter.Source() + if err != nil { + return nil, err + } + act["filter"] = f + } + if len(a.routing) > 0 { + act["routing"] = a.routing + } + if len(a.indexRouting) > 0 { + act["index_routing"] = a.indexRouting + } + if len(a.searchRouting) > 0 { + act["search_routing"] = a.searchRouting + } + return src, nil +} + +// AliasRemoveAction is an action to remove an alias. +type AliasRemoveAction struct { + index []string // index name(s) + alias string // alias name +} + +// NewAliasRemoveAction returns an action to remove an alias. +func NewAliasRemoveAction(alias string) *AliasRemoveAction { + return &AliasRemoveAction{ + alias: alias, + } +} + +// Index associates one or more indices to the alias. +func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction { + a.index = append(a.index, index...) + return a +} + +func (a *AliasRemoveAction) removeBlankIndexNames() { + var indices []string + for _, index := range a.index { + if len(index) > 0 { + indices = append(indices, index) + } + } + a.index = indices +} + +// Validate checks if the operation is valid. +func (a *AliasRemoveAction) Validate() error { + var invalid []string + if len(a.alias) == 0 { + invalid = append(invalid, "Alias") + } + if len(a.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Source returns the JSON-serializable data. +func (a *AliasRemoveAction) Source() (interface{}, error) { + a.removeBlankIndexNames() + if err := a.Validate(); err != nil { + return nil, err + } + src := make(map[string]interface{}) + act := make(map[string]interface{}) + src["remove"] = act + act["alias"] = a.alias + switch len(a.index) { + case 1: + act["index"] = a.index[0] + default: + act["indices"] = a.index + } + return src, nil +} + +// -- Service -- + +// AliasService enables users to add or remove an alias. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-aliases.html +// for details. +type AliasService struct { + client *Client + actions []AliasAction + pretty bool +} + +// NewAliasService implements a service to manage aliases. +func NewAliasService(client *Client) *AliasService { + builder := &AliasService{ + client: client, + } + return builder +} + +// Pretty asks Elasticsearch to indent the HTTP response. +func (s *AliasService) Pretty(pretty bool) *AliasService { + s.pretty = pretty + return s +} + +// Add adds an alias to an index. +func (s *AliasService) Add(indexName string, aliasName string) *AliasService { + action := NewAliasAddAction(aliasName).Index(indexName) + s.actions = append(s.actions, action) + return s +} + +// Add adds an alias to an index and associates a filter to the alias. +func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { + action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter) + s.actions = append(s.actions, action) + return s +} + +// Remove removes an alias. +func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { + action := NewAliasRemoveAction(aliasName).Index(indexName) + s.actions = append(s.actions, action) + return s +} + +// Action accepts one or more AliasAction instances which can be +// of type AliasAddAction or AliasRemoveAction. +func (s *AliasService) Action(action ...AliasAction) *AliasService { + s.actions = append(s.actions, action...) + return s +} + +// buildURL builds the URL for the operation. +func (s *AliasService) buildURL() (string, url.Values, error) { + path := "/_aliases" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Do executes the command. +func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Body with actions + body := make(map[string]interface{}) + var actions []interface{} + for _, action := range s.actions { + src, err := action.Source() + if err != nil { + return nil, err + } + actions = append(actions, src) + } + body["actions"] = actions + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return results + ret := new(AliasResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of an alias request. + +// AliasResult is the outcome of calling Do on AliasService. +type AliasResult struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go new file mode 100644 index 0000000000000000000000000000000000000000..0437c443b90e588e58659b949be2c51f6bbc5f6b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_mapping.go @@ -0,0 +1,222 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesPutMappingService allows to register specific mapping definition +// for a specific type. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-put-mapping.html +// for details. +type IndicesPutMappingService struct { + client *Client + pretty bool + typ string + index []string + masterTimeout string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + ignoreConflicts *bool + timeout string + bodyJson map[string]interface{} + bodyString string +} + +// NewPutMappingService is an alias for NewIndicesPutMappingService. +// Use NewIndicesPutMappingService. +func NewPutMappingService(client *Client) *IndicesPutMappingService { + return NewIndicesPutMappingService(client) +} + +// NewIndicesPutMappingService creates a new IndicesPutMappingService. +func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { + return &IndicesPutMappingService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { + s.index = append(s.index, indices...) + return s +} + +// Type is the name of the document type. +func (s *IndicesPutMappingService) Type(typ string) *IndicesPutMappingService { + s.typ = typ + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { + s.masterTimeout = masterTimeout + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. +// This includes `_all` string or when no indices have been specified. +func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { + s.expandWildcards = expandWildcards + return s +} + +// IgnoreConflicts specifies whether to ignore conflicts while updating +// the mapping (default: false). +func (s *IndicesPutMappingService) IgnoreConflicts(ignoreConflicts bool) *IndicesPutMappingService { + s.ignoreConflicts = &ignoreConflicts + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { + s.pretty = pretty + return s +} + +// BodyJson contains the mapping definition. +func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { + s.bodyJson = mapping + return s +} + +// BodyString is the mapping definition serialized as a string. +func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { + s.bodyString = mapping + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL: Typ MUST be specified and is verified in Validate. + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ + "index": strings.Join(s.index, ","), + "type": s.typ, + }) + } else { + path, err = uritemplates.Expand("/_mapping/{type}", map[string]string{ + "type": s.typ, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreConflicts != nil { + params.Set("ignore_conflicts", fmt.Sprintf("%v", *s.ignoreConflicts)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutMappingService) Validate() error { + var invalid []string + if s.typ == "" { + invalid = append(invalid, "Type") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(PutMappingResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// PutMappingResponse is the response of IndicesPutMappingService.Do. +type PutMappingResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go new file mode 100644 index 0000000000000000000000000000000000000000..4f246761ca2a5def854651b0d0330588ae1b4184 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_settings.go @@ -0,0 +1,185 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesPutSettingsService changes specific index level settings in +// real time. +// +// See the documentation at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-update-settings.html. +type IndicesPutSettingsService struct { + client *Client + pretty bool + index []string + allowNoIndices *bool + expandWildcards string + flatSettings *bool + ignoreUnavailable *bool + masterTimeout string + bodyJson interface{} + bodyString string +} + +// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. +func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { + return &IndicesPutSettingsService{ + client: client, + index: make([]string, 0), + } +} + +// Index is a list of index names the mapping should be added to +// (supports wildcards); use `_all` or omit to add the mapping on all indices. +func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { + s.index = append(s.index, indices...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` +// string or when no indices have been specified). +func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards specifies whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { + s.expandWildcards = expandWildcards + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { + s.flatSettings = &flatSettings + return s +} + +// IgnoreUnavailable specifies whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// MasterTimeout is the timeout for connection to master. +func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The index settings to be updated. +func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_settings" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutSettingsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutSettingsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. +type IndicesPutSettingsResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go new file mode 100644 index 0000000000000000000000000000000000000000..53d0027df891184d9b912c70092e5d3ec3fa103c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_put_template.go @@ -0,0 +1,180 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesPutTemplateService creates or updates index mappings. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-templates.html. +type IndicesPutTemplateService struct { + client *Client + pretty bool + name string + order interface{} + create *bool + timeout string + masterTimeout string + flatSettings *bool + bodyJson interface{} + bodyString string +} + +// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. +func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { + return &IndicesPutTemplateService{ + client: client, + } +} + +// Name is the name of the index template. +func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { + s.name = name + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { + s.timeout = timeout + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { + s.masterTimeout = masterTimeout + return s +} + +// FlatSettings indicates whether to return settings in flat format (default: false). +func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { + s.flatSettings = &flatSettings + return s +} + +// Order is the order for this template when merging multiple matching ones +// (higher numbers are merged later, overriding the lower numbers). +func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { + s.order = order + return s +} + +// Create indicates whether the index template should only be added if +// new or can also replace an existing one. +func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { + s.create = &create + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { + s.pretty = pretty + return s +} + +// BodyJson is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is documented as: The template definition. +func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_template/{name}", map[string]string{ + "name": s.name, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.order != nil { + params.Set("order", fmt.Sprintf("%v", s.order)) + } + if s.create != nil { + params.Set("create", fmt.Sprintf("%v", *s.create)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesPutTemplateService) Validate() error { + var invalid []string + if s.name == "" { + invalid = append(invalid, "Name") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesPutTemplateResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. +type IndicesPutTemplateResponse struct { + Acknowledged bool `json:"acknowledged,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go new file mode 100644 index 0000000000000000000000000000000000000000..53cee2a634b3b5c3a353a1bfbfdc0f209a0acf2f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_refresh.go @@ -0,0 +1,105 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// RefreshService explicitly refreshes one or more indices. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-refresh.html. +type RefreshService struct { + client *Client + index []string + force *bool + pretty bool +} + +// NewRefreshService creates a new instance of RefreshService. +func NewRefreshService(client *Client) *RefreshService { + builder := &RefreshService{ + client: client, + } + return builder +} + +// Index specifies the indices to refresh. +func (s *RefreshService) Index(index ...string) *RefreshService { + s.index = append(s.index, index...) + return s +} + +// Force forces a refresh. +func (s *RefreshService) Force(force bool) *RefreshService { + s.force = &force + return s +} + +// Pretty asks Elasticsearch to return indented JSON. +func (s *RefreshService) Pretty(pretty bool) *RefreshService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *RefreshService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_refresh" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.force != nil { + params.Set("force", fmt.Sprintf("%v", *s.force)) + } + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + return path, params, nil +} + +// Do executes the request. +func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return result + ret := new(RefreshResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Result of a refresh request. + +// RefreshResult is the outcome of RefreshService.Do. +type RefreshResult struct { + Shards shardsInfo `json:"_shards,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go new file mode 100644 index 0000000000000000000000000000000000000000..b6af635619308b79cd66783ae8c4b477e4079cf7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_rollover.go @@ -0,0 +1,268 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesRolloverService rolls an alias over to a new index when the +// existing index is considered to be too large or too old. +// +// It is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-rollover-index.html. +type IndicesRolloverService struct { + client *Client + pretty bool + dryRun bool + newIndex string + alias string + masterTimeout string + timeout string + waitForActiveShards string + conditions map[string]interface{} + settings map[string]interface{} + mappings map[string]interface{} + bodyJson interface{} + bodyString string +} + +// NewIndicesRolloverService creates a new IndicesRolloverService. +func NewIndicesRolloverService(client *Client) *IndicesRolloverService { + return &IndicesRolloverService{ + client: client, + conditions: make(map[string]interface{}), + settings: make(map[string]interface{}), + mappings: make(map[string]interface{}), + } +} + +// Alias is the name of the alias to rollover. +func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService { + s.alias = alias + return s +} + +// NewIndex is the name of the rollover index. +func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService { + s.newIndex = newIndex + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout sets an explicit operation timeout. +func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on the +// newly created rollover index before the operation returns. +func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService { + s.pretty = pretty + return s +} + +// DryRun, when set, specifies that only conditions are checked without +// performing the actual rollover. +func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService { + s.dryRun = dryRun + return s +} + +// Conditions allows to specify all conditions as a dictionary. +func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService { + s.conditions = conditions + return s +} + +// AddCondition adds a condition to the rollover decision. +func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService { + s.conditions[name] = value + return s +} + +// AddMaxIndexAgeCondition adds a condition to set the max index age. +func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService { + s.conditions["max_age"] = time + return s +} + +// AddMaxIndexDocsCondition adds a condition to set the max documents in the index. +func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService { + s.conditions["max_docs"] = docs + return s +} + +// Settings adds the index settings. +func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService { + s.settings = settings + return s +} + +// AddSetting adds an index setting. +func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService { + s.settings[name] = value + return s +} + +// Mappings adds the index mappings. +func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService { + s.mappings = mappings + return s +} + +// AddMapping adds a mapping for the given type. +func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService { + s.mappings[typ] = mapping + return s +} + +// BodyJson sets the conditions that needs to be met for executing rollover, +// specified as a serializable JSON instance which is sent as the body of +// the request. +func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService { + s.bodyJson = body + return s +} + +// BodyString sets the conditions that needs to be met for executing rollover, +// specified as a string which is sent as the body of the request. +func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService { + s.bodyString = body + return s +} + +// getBody returns the body of the request, if not explicitly set via +// BodyJson or BodyString. +func (s *IndicesRolloverService) getBody() interface{} { + body := make(map[string]interface{}) + if len(s.conditions) > 0 { + body["conditions"] = s.conditions + } + if len(s.settings) > 0 { + body["settings"] = s.settings + } + if len(s.mappings) > 0 { + body["mappings"] = s.mappings + } + return body +} + +// buildURL builds the URL for the operation. +func (s *IndicesRolloverService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.newIndex != "" { + path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{ + "alias": s.alias, + "new_index": s.newIndex, + }) + } else { + path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{ + "alias": s.alias, + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dryRun { + params.Set("dry_run", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesRolloverService) Validate() error { + var invalid []string + if s.alias == "" { + invalid = append(invalid, "Alias") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + body = s.getBody() + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesRolloverResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesRolloverResponse is the response of IndicesRolloverService.Do. +type IndicesRolloverResponse struct { + OldIndex string `json:"old_index"` + NewIndex string `json:"new_index"` + RolledOver bool `json:"rolled_over"` + DryRun bool `json:"dry_run"` + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` + Conditions map[string]bool `json:"conditions"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go new file mode 100644 index 0000000000000000000000000000000000000000..bfde8d96fb0bfa1ab519ad2861221e416d41f2ed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_shrink.go @@ -0,0 +1,174 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesShrinkService allows you to shrink an existing index into a +// new index with fewer primary shards. +// +// For further details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-shrink-index.html. +type IndicesShrinkService struct { + client *Client + pretty bool + source string + target string + masterTimeout string + timeout string + waitForActiveShards string + bodyJson interface{} + bodyString string +} + +// NewIndicesShrinkService creates a new IndicesShrinkService. +func NewIndicesShrinkService(client *Client) *IndicesShrinkService { + return &IndicesShrinkService{ + client: client, + } +} + +// Source is the name of the source index to shrink. +func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService { + s.source = source + return s +} + +// Target is the name of the target index to shrink into. +func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService { + s.target = target + return s +} + +// MasterTimeout specifies the timeout for connection to master. +func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is an explicit operation timeout. +func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService { + s.timeout = timeout + return s +} + +// WaitForActiveShards sets the number of active shards to wait for on +// the shrunken index before the operation returns. +func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService { + s.pretty = pretty + return s +} + +// BodyJson is the configuration for the target index (`settings` and `aliases`) +// defined as a JSON-serializable instance to be sent as the request body. +func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService { + s.bodyJson = body + return s +} + +// BodyString is the configuration for the target index (`settings` and `aliases`) +// defined as a string to send as the request body. +func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesShrinkService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{ + "source": s.source, + "target": s.target, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesShrinkService) Validate() error { + var invalid []string + if s.source == "" { + invalid = append(invalid, "Source") + } + if s.target == "" { + invalid = append(invalid, "Target") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesShrinkResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesShrinkResponse is the response of IndicesShrinkService.Do. +type IndicesShrinkResponse struct { + Acknowledged bool `json:"acknowledged"` + ShardsAcknowledged bool `json:"shards_acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..b4f7bfd2091771a273fe41c01fed54d95ad318b6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/indices_stats.go @@ -0,0 +1,386 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IndicesStatsService provides stats on various metrics of one or more +// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/indices-stats.html. +type IndicesStatsService struct { + client *Client + pretty bool + metric []string + index []string + level string + types []string + completionFields []string + fielddataFields []string + fields []string + groups []string + human *bool +} + +// NewIndicesStatsService creates a new IndicesStatsService. +func NewIndicesStatsService(client *Client) *IndicesStatsService { + return &IndicesStatsService{ + client: client, + index: make([]string, 0), + metric: make([]string, 0), + completionFields: make([]string, 0), + fielddataFields: make([]string, 0), + fields: make([]string, 0), + groups: make([]string, 0), + types: make([]string, 0), + } +} + +// Metric limits the information returned the specific metrics. Options are: +// docs, store, indexing, get, search, completion, fielddata, flush, merge, +// query_cache, refresh, suggest, and warmer. +func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// Index is the list of index names; use `_all` or empty string to perform +// the operation on all indices. +func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { + s.index = append(s.index, indices...) + return s +} + +// Type is a list of document types for the `indexing` index metric. +func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { + s.types = append(s.types, types...) + return s +} + +// Level returns stats aggregated at cluster, index or shard level. +func (s *IndicesStatsService) Level(level string) *IndicesStatsService { + s.level = level + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric +// (supports wildcards). +func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { + s.groups = append(s.groups, groups...) + return s +} + +// Human indicates whether to return time and byte values in human-readable format.. +func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { + s.human = &human + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IndicesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + if len(s.index) > 0 && len(s.metric) > 0 { + path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ + "index": strings.Join(s.index, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.metric) > 0 { + path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else { + path = "/_stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.groups) > 0 { + params.Set("groups", strings.Join(s.groups, ",")) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IndicesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IndicesStatsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IndicesStatsResponse is the response of IndicesStatsService.Do. +type IndicesStatsResponse struct { + // Shards provides information returned from shards. + Shards shardsInfo `json:"_shards"` + + // All provides summary stats about all indices. + All *IndexStats `json:"_all,omitempty"` + + // Indices provides a map into the stats of an index. The key of the + // map is the index name. + Indices map[string]*IndexStats `json:"indices,omitempty"` +} + +// IndexStats is index stats for a specific index. +type IndexStats struct { + Primaries *IndexStatsDetails `json:"primaries,omitempty"` + Total *IndexStatsDetails `json:"total,omitempty"` +} + +type IndexStatsDetails struct { + Docs *IndexStatsDocs `json:"docs,omitempty"` + Store *IndexStatsStore `json:"store,omitempty"` + Indexing *IndexStatsIndexing `json:"indexing,omitempty"` + Get *IndexStatsGet `json:"get,omitempty"` + Search *IndexStatsSearch `json:"search,omitempty"` + Merges *IndexStatsMerges `json:"merges,omitempty"` + Refresh *IndexStatsRefresh `json:"refresh,omitempty"` + Flush *IndexStatsFlush `json:"flush,omitempty"` + Warmer *IndexStatsWarmer `json:"warmer,omitempty"` + FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` + IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` + Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` + Percolate *IndexStatsPercolate `json:"percolate,omitempty"` + Completion *IndexStatsCompletion `json:"completion,omitempty"` + Segments *IndexStatsSegments `json:"segments,omitempty"` + Translog *IndexStatsTranslog `json:"translog,omitempty"` + Suggest *IndexStatsSuggest `json:"suggest,omitempty"` + QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` +} + +type IndexStatsDocs struct { + Count int64 `json:"count,omitempty"` + Deleted int64 `json:"deleted,omitempty"` +} + +type IndexStatsStore struct { + Size string `json:"size,omitempty"` // human size, e.g. 119.3mb + SizeInBytes int64 `json:"size_in_bytes,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` // human time, e.g. 0s + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsIndexing struct { + IndexTotal int64 `json:"index_total,omitempty"` + IndexTime string `json:"index_time,omitempty"` + IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` + IndexCurrent int64 `json:"index_current,omitempty"` + DeleteTotal int64 `json:"delete_total,omitempty"` + DeleteTime string `json:"delete_time,omitempty"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` + DeleteCurrent int64 `json:"delete_current,omitempty"` + NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` + IsThrottled bool `json:"is_throttled,omitempty"` + ThrottleTime string `json:"throttle_time,omitempty"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` +} + +type IndexStatsGet struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + ExistsTotal int64 `json:"exists_total,omitempty"` + ExistsTime string `json:"exists_time,omitempty"` + ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` + MissingTotal int64 `json:"missing_total,omitempty"` + MissingTime string `json:"missing_time,omitempty"` + MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsSearch struct { + OpenContexts int64 `json:"open_contexts,omitempty"` + QueryTotal int64 `json:"query_total,omitempty"` + QueryTime string `json:"query_time,omitempty"` + QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` + QueryCurrent int64 `json:"query_current,omitempty"` + FetchTotal int64 `json:"fetch_total,omitempty"` + FetchTime string `json:"fetch_time,omitempty"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` + FetchCurrent int64 `json:"fetch_current,omitempty"` +} + +type IndexStatsMerges struct { + Current int64 `json:"current,omitempty"` + CurrentDocs int64 `json:"current_docs,omitempty"` + CurrentSize string `json:"current_size,omitempty"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` + TotalDocs int64 `json:"total_docs,omitempty"` + TotalSize string `json:"total_size,omitempty"` + TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` +} + +type IndexStatsRefresh struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFlush struct { + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsWarmer struct { + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + TotalTime string `json:"total_time,omitempty"` + TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` +} + +type IndexStatsFilterCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsIdCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` +} + +type IndexStatsFielddata struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` +} + +type IndexStatsPercolate struct { + Total int64 `json:"total,omitempty"` + GetTime string `json:"get_time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Queries int64 `json:"queries,omitempty"` +} + +type IndexStatsCompletion struct { + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSegments struct { + Count int64 `json:"count,omitempty"` + Memory string `json:"memory,omitempty"` + MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` + IndexWriterMemory string `json:"index_writer_memory,omitempty"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes,omitempty"` + IndexWriterMaxMemory string `json:"index_writer_max_memory,omitempty"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes,omitempty"` + VersionMapMemory string `json:"version_map_memory,omitempty"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes,omitempty"` + FixedBitSetMemory string `json:"fixed_bit_set,omitempty"` + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes,omitempty"` +} + +type IndexStatsTranslog struct { + Operations int64 `json:"operations,omitempty"` + Size string `json:"size,omitempty"` + SizeInBytes int64 `json:"size_in_bytes,omitempty"` +} + +type IndexStatsSuggest struct { + Total int64 `json:"total,omitempty"` + Time string `json:"time,omitempty"` + TimeInMillis int64 `json:"time_in_millis,omitempty"` + Current int64 `json:"current,omitempty"` +} + +type IndexStatsQueryCache struct { + MemorySize string `json:"memory_size,omitempty"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` + Evictions int64 `json:"evictions,omitempty"` + HitCount int64 `json:"hit_count,omitempty"` + MissCount int64 `json:"miss_count,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go new file mode 100644 index 0000000000000000000000000000000000000000..9fe97798a0ca4fff55b626900ef8cef0688ed008 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_delete_pipeline.go @@ -0,0 +1,124 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestDeletePipelineService deletes pipelines by ID. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/delete-pipeline-api.html. +type IngestDeletePipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string +} + +// NewIngestDeletePipelineService creates a new IngestDeletePipelineService. +func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService { + return &IngestDeletePipelineService{ + client: client, + } +} + +// Id is documented as: Pipeline ID. +func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService { + s.id = id + return s +} + +// MasterTimeout is documented as: Explicit operation timeout for connection to master node. +func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout is documented as: Explicit operation timeout. +func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestDeletePipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "DELETE", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestDeletePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do. +type IngestDeletePipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go new file mode 100644 index 0000000000000000000000000000000000000000..61199477cd6375d3ae9253bb1b12dcb4ed913a37 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_get_pipeline.go @@ -0,0 +1,118 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestGetPipelineService returns pipelines based on ID. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/get-pipeline-api.html +// for documentation. +type IngestGetPipelineService struct { + client *Client + pretty bool + id []string + masterTimeout string +} + +// NewIngestGetPipelineService creates a new IngestGetPipelineService. +func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService { + return &IngestGetPipelineService{ + client: client, + } +} + +// Id is a list of pipeline ids. Wildcards supported. +func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService { + s.id = append(s.id, id...) + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if len(s.id) > 0 { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": strings.Join(s.id, ","), + }) + } else { + path = "/_ingest/pipeline" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestGetPipelineService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + var ret IngestGetPipelineResponse + if err := json.Unmarshal(res.Body, &ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do. +type IngestGetPipelineResponse map[string]*IngestGetPipeline + +type IngestGetPipeline struct { + ID string `json:"id"` + Config map[string]interface{} `json:"config"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go new file mode 100644 index 0000000000000000000000000000000000000000..1ab423d5cd138bd912f7bd15fa457daa0207351f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_put_pipeline.go @@ -0,0 +1,152 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestPutPipelineService adds pipelines and updates existing pipelines in +// the cluster. +// +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/put-pipeline-api.html. +type IngestPutPipelineService struct { + client *Client + pretty bool + id string + masterTimeout string + timeout string + bodyJson interface{} + bodyString string +} + +// NewIngestPutPipelineService creates a new IngestPutPipelineService. +func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService { + return &IngestPutPipelineService{ + client: client, + } +} + +// Id is the pipeline ID. +func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService { + s.id = id + return s +} + +// MasterTimeout is an explicit operation timeout for connection to master node. +func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService { + s.masterTimeout = masterTimeout + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService { + s.timeout = timeout + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable document. +// Use e.g. a map[string]interface{} here. +func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService { + s.bodyJson = body + return s +} + +// BodyString is the ingest definition, specified as a string. +func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.masterTimeout != "" { + params.Set("master_timeout", s.masterTimeout) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestPutPipelineService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestPutPipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do. +type IngestPutPipelineResponse struct { + Acknowledged bool `json:"acknowledged"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go new file mode 100644 index 0000000000000000000000000000000000000000..7d41e353bf15549fcc1e8b107619bec8788838e0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ingest_simulate_pipeline.go @@ -0,0 +1,157 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// IngestSimulatePipelineService executes a specific pipeline against the set of +// documents provided in the body of the request. +// +// The API is documented at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/simulate-pipeline-api.html. +type IngestSimulatePipelineService struct { + client *Client + pretty bool + id string + verbose *bool + bodyJson interface{} + bodyString string +} + +// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline. +func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService { + return &IngestSimulatePipelineService{ + client: client, + } +} + +// Id specifies the pipeline ID. +func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService { + s.id = id + return s +} + +// Verbose mode. Display data output for each processor in executed pipeline. +func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService { + s.verbose = &verbose + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService { + s.pretty = pretty + return s +} + +// BodyJson is the ingest definition, defined as a JSON-serializable simulate +// definition. Use e.g. a map[string]interface{} here. +func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService { + s.bodyJson = body + return s +} + +// BodyString is the simulate definition, defined as a string. +func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) { + var err error + var path string + + // Build URL + if s.id != "" { + path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{ + "id": s.id, + }) + } else { + path = "/_ingest/pipeline/_simulate" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.verbose != nil { + params.Set("verbose", fmt.Sprintf("%v", *s.verbose)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *IngestSimulatePipelineService) Validate() error { + var invalid []string + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(IngestSimulatePipelineResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do. +type IngestSimulatePipelineResponse struct { + Docs []*IngestSimulateDocumentResult `json:"docs"` +} + +type IngestSimulateDocumentResult struct { + Doc map[string]interface{} `json:"doc"` + ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"` +} + +type IngestSimulateProcessorResult struct { + ProcessorTag string `json:"tag"` + Doc map[string]interface{} `json:"doc"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go b/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go new file mode 100644 index 0000000000000000000000000000000000000000..c371fbf791cedb7295082ead4f39159606435ac4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/inner_hit.go @@ -0,0 +1,160 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// InnerHit implements a simple join for parent/child, nested, and even +// top-level documents in Elasticsearch. +// It is an experimental feature for Elasticsearch versions 1.5 (or greater). +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-inner-hits.html +// for documentation. +// +// See the tests for SearchSource, HasChildFilter, HasChildQuery, +// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery +// for usage examples. +type InnerHit struct { + source *SearchSource + path string + typ string + + name string +} + +// NewInnerHit creates a new InnerHit. +func NewInnerHit() *InnerHit { + return &InnerHit{source: NewSearchSource()} +} + +func (hit *InnerHit) Path(path string) *InnerHit { + hit.path = path + return hit +} + +func (hit *InnerHit) Type(typ string) *InnerHit { + hit.typ = typ + return hit +} + +func (hit *InnerHit) Query(query Query) *InnerHit { + hit.source.Query(query) + return hit +} + +func (hit *InnerHit) From(from int) *InnerHit { + hit.source.From(from) + return hit +} + +func (hit *InnerHit) Size(size int) *InnerHit { + hit.source.Size(size) + return hit +} + +func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { + hit.source.TrackScores(trackScores) + return hit +} + +func (hit *InnerHit) Explain(explain bool) *InnerHit { + hit.source.Explain(explain) + return hit +} + +func (hit *InnerHit) Version(version bool) *InnerHit { + hit.source.Version(version) + return hit +} + +func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit { + hit.source.StoredField(storedFieldName) + return hit +} + +func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit { + hit.source.StoredFields(storedFieldNames...) + return hit +} + +func (hit *InnerHit) NoStoredFields() *InnerHit { + hit.source.NoStoredFields() + return hit +} + +func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { + hit.source.FetchSource(fetchSource) + return hit +} + +func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { + hit.source.FetchSourceContext(fetchSourceContext) + return hit +} + +func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit { + hit.source.DocvalueFields(docvalueFields...) + return hit +} + +func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit { + hit.source.DocvalueField(docvalueField) + return hit +} + +func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { + hit.source.ScriptFields(scriptFields...) + return hit +} + +func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { + hit.source.ScriptField(scriptField) + return hit +} + +func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { + hit.source.Sort(field, ascending) + return hit +} + +func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { + hit.source.SortWithInfo(info) + return hit +} + +func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { + hit.source.SortBy(sorter...) + return hit +} + +func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { + hit.source.Highlight(highlight) + return hit +} + +func (hit *InnerHit) Highlighter() *Highlight { + return hit.source.Highlighter() +} + +func (hit *InnerHit) Name(name string) *InnerHit { + hit.name = name + return hit +} + +func (hit *InnerHit) Source() (interface{}, error) { + src, err := hit.source.Source() + if err != nil { + return nil, err + } + source, ok := src.(map[string]interface{}) + if !ok { + return nil, nil + } + + // Notice that hit.typ and hit.path are not exported here. + // They are only used with SearchSource and serialized there. + + if hit.name != "" { + source["name"] = hit.name + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/logger.go b/vendor/gopkg.in/olivere/elastic.v5/logger.go new file mode 100644 index 0000000000000000000000000000000000000000..095eb4cd4e0c693ecf540e6db4e79db6523477d2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/logger.go @@ -0,0 +1,10 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Logger specifies the interface for all log operations. +type Logger interface { + Printf(format string, v ...interface{}) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mget.go b/vendor/gopkg.in/olivere/elastic.v5/mget.go new file mode 100644 index 0000000000000000000000000000000000000000..9e87abf3e1d35c8f3d28a184720277c06b9a9a35 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mget.go @@ -0,0 +1,253 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// MgetService allows to get multiple documents based on an index, +// type (optional) and id (possibly routing). The response includes +// a docs array with all the fetched documents, each element similar +// in structure to a document provided by the Get API. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-get.html +// for details. +type MgetService struct { + client *Client + pretty bool + preference string + realtime *bool + refresh string + routing string + storedFields []string + items []*MultiGetItem +} + +// NewMgetService initializes a new Multi GET API request call. +func NewMgetService(client *Client) *MgetService { + builder := &MgetService{ + client: client, + } + return builder +} + +// Preference specifies the node or shard the operation should be performed +// on (default: random). +func (s *MgetService) Preference(preference string) *MgetService { + s.preference = preference + return s +} + +// Refresh the shard containing the document before performing the operation. +func (s *MgetService) Refresh(refresh string) *MgetService { + s.refresh = refresh + return s +} + +// Realtime specifies whether to perform the operation in realtime or search mode. +func (s *MgetService) Realtime(realtime bool) *MgetService { + s.realtime = &realtime + return s +} + +// Routing is the specific routing value. +func (s *MgetService) Routing(routing string) *MgetService { + s.routing = routing + return s +} + +// StoredFields is a list of fields to return in the response. +func (s *MgetService) StoredFields(storedFields ...string) *MgetService { + s.storedFields = append(s.storedFields, storedFields...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MgetService) Pretty(pretty bool) *MgetService { + s.pretty = pretty + return s +} + +// Add an item to the request. +func (s *MgetService) Add(items ...*MultiGetItem) *MgetService { + s.items = append(s.items, items...) + return s +} + +// Source returns the request body, which will be serialized into JSON. +func (s *MgetService) Source() (interface{}, error) { + source := make(map[string]interface{}) + items := make([]interface{}, len(s.items)) + for i, item := range s.items { + src, err := item.Source() + if err != nil { + return nil, err + } + items[i] = src + } + source["docs"] = items + return source, nil +} + +// Do executes the request. +func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) { + // Build url + path := "/_mget" + + params := make(url.Values) + if s.realtime != nil { + params.Add("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.preference != "" { + params.Add("preference", s.preference) + } + if s.refresh != "" { + params.Add("refresh", s.refresh) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + + // Set body + body, err := s.Source() + if err != nil { + return nil, err + } + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MgetResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Multi Get Item -- + +// MultiGetItem is a single document to retrieve via the MgetService. +type MultiGetItem struct { + index string + typ string + id string + routing string + storedFields []string + version *int64 // see org.elasticsearch.common.lucene.uid.Versions + versionType string // see org.elasticsearch.index.VersionType + fsc *FetchSourceContext +} + +// NewMultiGetItem initializes a new, single item for a Multi GET request. +func NewMultiGetItem() *MultiGetItem { + return &MultiGetItem{} +} + +// Index specifies the index name. +func (item *MultiGetItem) Index(index string) *MultiGetItem { + item.index = index + return item +} + +// Type specifies the type name. +func (item *MultiGetItem) Type(typ string) *MultiGetItem { + item.typ = typ + return item +} + +// Id specifies the identifier of the document. +func (item *MultiGetItem) Id(id string) *MultiGetItem { + item.id = id + return item +} + +// Routing is the specific routing value. +func (item *MultiGetItem) Routing(routing string) *MultiGetItem { + item.routing = routing + return item +} + +// StoredFields is a list of fields to return in the response. +func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem { + item.storedFields = append(item.storedFields, storedFields...) + return item +} + +// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), +// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. +// The default in Elasticsearch is MatchAny (-3). +func (item *MultiGetItem) Version(version int64) *MultiGetItem { + item.version = &version + return item +} + +// VersionType can be "internal", "external", "external_gt", "external_gte", +// or "force". See org.elasticsearch.index.VersionType in Elasticsearch source. +// It is "internal" by default. +func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { + item.versionType = versionType + return item +} + +// FetchSource allows to specify source filtering. +func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { + item.fsc = fetchSourceContext + return item +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiGet search. +func (item *MultiGetItem) Source() (interface{}, error) { + source := make(map[string]interface{}) + + source["_id"] = item.id + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.routing != "" { + source["_routing"] = item.routing + } + if len(item.storedFields) > 0 { + source["stored_fields"] = strings.Join(item.storedFields, ",") + } + if item.version != nil { + source["version"] = fmt.Sprintf("%d", *item.version) + } + if item.versionType != "" { + source["version_type"] = item.versionType + } + + return source, nil +} + +// -- Result of a Multi Get request. + +// MgetResponse is the outcome of a Multi GET API request. +type MgetResponse struct { + Docs []*GetResult `json:"docs,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/msearch.go b/vendor/gopkg.in/olivere/elastic.v5/msearch.go new file mode 100644 index 0000000000000000000000000000000000000000..6866954d4f5db59a7d754fd102f200429864b9eb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/msearch.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +// MultiSearch executes one or more searches in one roundtrip. +type MultiSearchService struct { + client *Client + requests []*SearchRequest + indices []string + pretty bool + routing string + preference string +} + +func NewMultiSearchService(client *Client) *MultiSearchService { + builder := &MultiSearchService{ + client: client, + requests: make([]*SearchRequest, 0), + indices: make([]string, 0), + } + return builder +} + +func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { + s.requests = append(s.requests, requests...) + return s +} + +func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { + s.indices = append(s.indices, indices...) + return s +} + +func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { + s.pretty = pretty + return s +} + +func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { + // Build url + path := "/_msearch" + + // Parameters + params := make(url.Values) + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + + // Set body + var lines []string + for _, sr := range s.requests { + // Set default indices if not specified in the request + if !sr.HasIndices() && len(s.indices) > 0 { + sr = sr.Index(s.indices...) + } + + header, err := json.Marshal(sr.header()) + if err != nil { + return nil, err + } + body, err := json.Marshal(sr.body()) + if err != nil { + return nil, err + } + lines = append(lines, string(header)) + lines = append(lines, string(body)) + } + body := strings.Join(lines, "\n") + "\n" // Don't forget trailing \n + + // Get response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(MultiSearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +type MultiSearchResult struct { + Responses []*SearchResult `json:"responses,omitempty"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go new file mode 100644 index 0000000000000000000000000000000000000000..4017b9f326bc66025825ac902086eeb760c02fff --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/mtermvectors.go @@ -0,0 +1,471 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// MultiTermvectorService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-multi-termvectors.html +// for documentation. +type MultiTermvectorService struct { + client *Client + pretty bool + index string + typ string + fieldStatistics *bool + fields []string + ids []string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string + docs []*MultiTermvectorItem +} + +// NewMultiTermvectorService creates a new MultiTermvectorService. +func NewMultiTermvectorService(client *Client) *MultiTermvectorService { + return &MultiTermvectorService{ + client: client, + } +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService { + s.pretty = pretty + return s +} + +// Add adds documents to MultiTermvectors service. +func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService { + s.docs = append(s.docs, docs...) + return s +} + +// Index in which the document resides. +func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService { + s.index = index + return s +} + +// Type of the document. +func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService { + s.typ = typ + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService { + s.fields = fields + return s +} + +// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body. +func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService { + s.ids = ids + return s +} + +// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService { + s.offsets = &offsets + return s +} + +// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService { + s.positions = &positions + return s +} + +// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService { + s.preference = preference + return s +} + +// Realtime specifies if requests are real-time as opposed to near-real-time (default: true). +func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService { + s.realtime = &realtime + return s +} + +// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". +func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService { + s.termStatistics = &termStatistics + return s +} + +// Version is explicit version number for concurrency control. +func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService { + s.version = version + return s +} + +// VersionType is specific version type. +func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService { + s.versionType = versionType + return s +} + +// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService { + s.bodyJson = body + return s +} + +// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. +func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService { + s.bodyString = body + return s +} + +func (s *MultiTermvectorService) Source() interface{} { + source := make(map[string]interface{}) + docs := make([]interface{}, len(s.docs)) + for i, doc := range s.docs { + docs[i] = doc.Source() + } + source["docs"] = docs + return source +} + +// buildURL builds the URL for the operation. +func (s *MultiTermvectorService) buildURL() (string, url.Values, error) { + var path string + var err error + + if s.index != "" && s.typ != "" { + path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{ + "index": s.index, + "type": s.typ, + }) + } else if s.index != "" && s.typ == "" { + path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{ + "index": s.index, + }) + } else { + path = "/_mtermvectors" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if len(s.ids) > 0 { + params.Set("ids", strings.Join(s.ids, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *MultiTermvectorService) Validate() error { + var invalid []string + if s.index == "" && s.typ != "" { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if len(s.bodyString) > 0 { + body = s.bodyString + } else { + body = s.Source() + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(MultiTermvectorResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// MultiTermvectorResponse is the response of MultiTermvectorService.Do. +type MultiTermvectorResponse struct { + Docs []*TermvectorsResponse `json:"docs"` +} + +// -- MultiTermvectorItem -- + +// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService. +type MultiTermvectorItem struct { + index string + typ string + id string + doc interface{} + fieldStatistics *bool + fields []string + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool +} + +func NewMultiTermvectorItem() *MultiTermvectorItem { + return &MultiTermvectorItem{} +} + +func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem { + s.index = index + return s +} + +func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem { + s.typ = typ + return s +} + +func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem { + s.id = id + return s +} + +// Doc is the document to analyze. +func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem { + s.termStatistics = &termStatistics + return s +} + +// Source returns the serialized JSON to be sent to Elasticsearch as +// part of a MultiTermvector. +func (s *MultiTermvectorItem) Source() interface{} { + source := make(map[string]interface{}) + + source["_id"] = s.id + + if s.index != "" { + source["_index"] = s.index + } + if s.typ != "" { + source["_type"] = s.typ + } + if s.fields != nil { + source["fields"] = s.fields + } + if s.fieldStatistics != nil { + source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics) + } + if s.offsets != nil { + source["offsets"] = s.offsets + } + if s.parent != "" { + source["parent"] = s.parent + } + if s.payloads != nil { + source["payloads"] = fmt.Sprintf("%v", *s.payloads) + } + if s.positions != nil { + source["positions"] = fmt.Sprintf("%v", *s.positions) + } + if s.preference != "" { + source["preference"] = s.preference + } + if s.realtime != nil { + source["realtime"] = fmt.Sprintf("%v", *s.realtime) + } + if s.routing != "" { + source["routing"] = s.routing + } + if s.termStatistics != nil { + source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics) + } + if s.doc != nil { + source["doc"] = s.doc + } + if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 { + source["per_field_analyzer"] = s.perFieldAnalyzer + } + + return source +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go new file mode 100644 index 0000000000000000000000000000000000000000..cb0f23e048c29bc2987271a907bd282611ba765a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_info.go @@ -0,0 +1,310 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + "time" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// NodesInfoService allows to retrieve one or more or all of the +// cluster nodes information. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-info.html. +type NodesInfoService struct { + client *Client + pretty bool + nodeId []string + metric []string + flatSettings *bool + human *bool +} + +// NewNodesInfoService creates a new NodesInfoService. +func NewNodesInfoService(client *Client) *NodesInfoService { + return &NodesInfoService{ + client: client, + nodeId: []string{"_all"}, + metric: []string{"_all"}, + } +} + +// NodeId is a list of node IDs or names to limit the returned information. +// Use "_local" to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// Metric is a list of metrics you wish returned. Leave empty to return all. +// Valid metrics are: settings, os, process, jvm, thread_pool, network, +// transport, http, and plugins. +func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { + s.metric = append(s.metric, metric...) + return s +} + +// FlatSettings returns settings in flat format (default: false). +func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { + s.flatSettings = &flatSettings + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesInfoService) Human(human bool) *NodesInfoService { + s.human = &human + return s +} + +// Pretty indicates whether to indent the returned JSON. +func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesInfoService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.flatSettings != nil { + params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.pretty { + params.Set("pretty", "1") + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesInfoService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesInfoResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesInfoResponse is the response of NodesInfoService.Do. +type NodesInfoResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesInfoNode `json:"nodes"` +} + +type NodesInfoNode struct { + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is the IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Version is the Elasticsearch version running on the node, e.g. "1.4.3" + Version string `json:"version"` + // Build is the Elasticsearch build, e.g. "36a29a7" + Build string `json:"build"` + // HTTPAddress, e.g. "127.0.0.1:9200" + HTTPAddress string `json:"http_address"` + // HTTPSAddress, e.g. "127.0.0.1:9200" + HTTPSAddress string `json:"https_address"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Settings of the node, e.g. paths and pidfile. + Settings map[string]interface{} `json:"settings"` + + // OS information, e.g. CPU and memory. + OS *NodesInfoNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesInfoNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesInfoNodeJVM `json:"jvm"` + + // ThreadPool information. + ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` + + // Network information. + Network *NodesInfoNodeNetwork `json:"network"` + + // Network information. + Transport *NodesInfoNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesInfoNodeHTTP `json:"http"` + + // Plugins information. + Plugins []*NodesInfoNodePlugin `json:"plugins"` +} + +type NodesInfoNodeOS struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + AvailableProcessors int `json:"available_processors"` // e.g. 4 + + // CPU information + CPU struct { + Vendor string `json:"vendor"` // e.g. Intel + Model string `json:"model"` // e.g. iMac15,1 + MHz int `json:"mhz"` // e.g. 3500 + TotalCores int `json:"total_cores"` // e.g. 4 + TotalSockets int `json:"total_sockets"` // e.g. 4 + CoresPerSocket int `json:"cores_per_socket"` // e.g. 16 + CacheSizeInBytes int `json:"cache_size_in_bytes"` // e.g. 256 + } `json:"cpu"` + + // Mem information + Mem struct { + Total string `json:"total"` // e.g. 16gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 17179869184 + } `json:"mem"` + + // Swap information + Swap struct { + Total string `json:"total"` // e.g. 1gb + TotalInBytes int `json:"total_in_bytes"` // e.g. 1073741824 + } `json:"swap"` +} + +type NodesInfoNodeProcess struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + ID int `json:"id"` // process id, e.g. 87079 + MaxFileDescriptors int `json:"max_file_descriptors"` // e.g. 32768 + Mlockall bool `json:"mlockall"` // e.g. false +} + +type NodesInfoNodeJVM struct { + PID int `json:"pid"` // process id, e.g. 87079 + Version string `json:"version"` // e.g. "1.8.0_25" + VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" + VMVersion string `json:"vm_version"` // e.g. "25.25-b02" + VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" + StartTime time.Time `json:"start_time"` // e.g. "2015-01-03T15:18:30.982Z" + StartTimeInMillis int64 `json:"start_time_in_millis"` + + // Mem information + Mem struct { + HeapInit string `json:"heap_init"` // e.g. 1gb + HeapInitInBytes int `json:"heap_init_in_bytes"` + HeapMax string `json:"heap_max"` // e.g. 4gb + HeapMaxInBytes int `json:"heap_max_in_bytes"` + NonHeapInit string `json:"non_heap_init"` // e.g. 2.4mb + NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` + NonHeapMax string `json:"non_heap_max"` // e.g. 0b + NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` + DirectMax string `json:"direct_max"` // e.g. 4gb + DirectMaxInBytes int `json:"direct_max_in_bytes"` + } `json:"mem"` + + GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew"] + MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace"] +} + +type NodesInfoNodeThreadPool struct { + Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` + Bench *NodesInfoNodeThreadPoolSection `json:"bench"` + Listener *NodesInfoNodeThreadPoolSection `json:"listener"` + Index *NodesInfoNodeThreadPoolSection `json:"index"` + Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` + Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` + Generic *NodesInfoNodeThreadPoolSection `json:"generic"` + Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` + Search *NodesInfoNodeThreadPoolSection `json:"search"` + Flush *NodesInfoNodeThreadPoolSection `json:"flush"` + Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` + Management *NodesInfoNodeThreadPoolSection `json:"management"` + Get *NodesInfoNodeThreadPoolSection `json:"get"` + Merge *NodesInfoNodeThreadPoolSection `json:"merge"` + Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` + Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` +} + +type NodesInfoNodeThreadPoolSection struct { + Type string `json:"type"` // e.g. fixed + Min int `json:"min"` // e.g. 4 + Max int `json:"max"` // e.g. 4 + KeepAlive string `json:"keep_alive"` // e.g. "5m" + QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 +} + +type NodesInfoNodeNetwork struct { + RefreshInterval string `json:"refresh_interval"` // e.g. 1s + RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 + PrimaryInterface struct { + Address string `json:"address"` // e.g. 192.168.1.2 + Name string `json:"name"` // e.g. en0 + MACAddress string `json:"mac_address"` // e.g. 11:22:33:44:55:66 + } `json:"primary_interface"` +} + +type NodesInfoNodeTransport struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` + Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` +} + +type NodesInfoNodeTransportProfile struct { + BoundAddress []string `json:"bound_address"` + PublishAddress string `json:"publish_address"` +} + +type NodesInfoNodeHTTP struct { + BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] + PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" + MaxContentLength string `json:"max_content_length"` // e.g. "100mb" + MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` +} + +type NodesInfoNodePlugin struct { + Name string `json:"name"` + Description string `json:"description"` + Site bool `json:"site"` + JVM bool `json:"jvm"` + URL string `json:"url"` // e.g. /_plugin/dummy/ +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..e93b39c4004c9d87453fd447f143ebc7ed091671 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/nodes_stats.go @@ -0,0 +1,707 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// NodesStatsService returns node statistics. +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/cluster-nodes-stats.html +// for details. +type NodesStatsService struct { + client *Client + pretty bool + metric []string + indexMetric []string + nodeId []string + completionFields []string + fielddataFields []string + fields []string + groups *bool + human *bool + level string + timeout string + types []string +} + +// NewNodesStatsService creates a new NodesStatsService. +func NewNodesStatsService(client *Client) *NodesStatsService { + return &NodesStatsService{ + client: client, + } +} + +// Metric limits the information returned to the specified metrics. +func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService { + s.metric = append(s.metric, metric...) + return s +} + +// IndexMetric limits the information returned for `indices` metric +// to the specific index metrics. Isn't used if `indices` (or `all`) +// metric isn't specified.. +func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService { + s.indexMetric = append(s.indexMetric, indexMetric...) + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// CompletionFields is a list of fields for `fielddata` and `suggest` +// index metric (supports wildcards). +func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService { + s.completionFields = append(s.completionFields, completionFields...) + return s +} + +// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). +func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards). +func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService { + s.fields = append(s.fields, fields...) + return s +} + +// Groups is a list of search groups for `search` index metric. +func (s *NodesStatsService) Groups(groups bool) *NodesStatsService { + s.groups = &groups + return s +} + +// Human indicates whether to return time and byte values in human-readable format. +func (s *NodesStatsService) Human(human bool) *NodesStatsService { + s.human = &human + return s +} + +// Level specifies whether to return indices stats aggregated at node, index or shard level. +func (s *NodesStatsService) Level(level string) *NodesStatsService { + s.level = level + return s +} + +// Timeout specifies an explicit operation timeout. +func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService { + s.timeout = timeout + return s +} + +// Types a list of document types for the `indexing` index metric. +func (s *NodesStatsService) Types(types ...string) *NodesStatsService { + s.types = append(s.types, types...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *NodesStatsService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "node_id": strings.Join(s.nodeId, ","), + }) + } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{ + "node_id": strings.Join(s.nodeId, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { + path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{ + "metric": strings.Join(s.metric, ","), + }) + } else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { + path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{ + "index_metric": strings.Join(s.indexMetric, ","), + }) + } else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { + path = "/_nodes/stats" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.completionFields) > 0 { + params.Set("completion_fields", strings.Join(s.completionFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.groups != nil { + params.Set("groups", fmt.Sprintf("%v", *s.groups)) + } + if s.human != nil { + params.Set("human", fmt.Sprintf("%v", *s.human)) + } + if s.level != "" { + params.Set("level", s.level) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if len(s.types) > 0 { + params.Set("types", strings.Join(s.types, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *NodesStatsService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(NodesStatsResponse) + if err := json.Unmarshal(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// NodesStatsResponse is the response of NodesStatsService.Do. +type NodesStatsResponse struct { + ClusterName string `json:"cluster_name"` + Nodes map[string]*NodesStatsNode `json:"nodes"` +} + +type NodesStatsNode struct { + // Timestamp when these stats we're gathered. + Timestamp int64 `json:"timestamp"` + // Name of the node, e.g. "Mister Fear" + Name string `json:"name"` + // TransportAddress, e.g. "127.0.0.1:9300" + TransportAddress string `json:"transport_address"` + // Host is the host name, e.g. "macbookair" + Host string `json:"host"` + // IP is an IP address, e.g. "192.168.1.2" + IP string `json:"ip"` + // Roles is a list of the roles of the node, e.g. master, data, ingest. + Roles []string `json:"roles"` + + // Attributes of the node. + Attributes map[string]interface{} `json:"attributes"` + + // Indices returns index information. + Indices *NodesStatsIndex `json:"indices"` + + // OS information, e.g. CPU and memory. + OS *NodesStatsNodeOS `json:"os"` + + // Process information, e.g. max file descriptors. + Process *NodesStatsNodeProcess `json:"process"` + + // JVM information, e.g. VM version. + JVM *NodesStatsNodeJVM `json:"jvm"` + + // ThreadPool information. + ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"` + + // FS returns information about the filesystem. + FS *NodesStatsNodeFS `json:"fs"` + + // Network information. + Transport *NodesStatsNodeTransport `json:"transport"` + + // HTTP information. + HTTP *NodesStatsNodeHTTP `json:"http"` + + // Breaker contains information about circuit breakers. + Breaker map[string]*NodesStatsBreaker `json:"breaker"` + + // ScriptStats information. + ScriptStats *NodesStatsScriptStats `json:"script"` + + // Discovery information. + Discovery *NodesStatsDiscovery `json:"discovery"` + + // Ingest information + Ingest *NodesStatsIngest `json:"ingest"` +} + +type NodesStatsIndex struct { + Docs *NodesStatsDocsStats `json:"docs"` + Store *NodesStatsStoreStats `json:"store"` + Indexing *NodesStatsIndexingStats `json:"indexing"` + Get *NodesStatsGetStats `json:"get"` + Search *NodesStatsSearchStats `json:"search"` + Merges *NodesStatsMergeStats `json:"merges"` + Refresh *NodesStatsRefreshStats `json:"refresh"` + Flush *NodesStatsFlushStats `json:"flush"` + Warmer *NodesStatsWarmerStats `json:"warmer"` + QueryCache *NodesStatsQueryCacheStats `json:"query_cache"` + Fielddata *NodesStatsFielddataStats `json:"fielddata"` + Percolate *NodesStatsPercolateStats `json:"percolate"` + Completion *NodesStatsCompletionStats `json:"completion"` + Segments *NodesStatsSegmentsStats `json:"segments"` + Translog *NodesStatsTranslogStats `json:"translog"` + Suggest *NodesStatsSuggestStats `json:"suggest"` + RequestCache *NodesStatsRequestCacheStats `json:"request_cache"` + Recovery NodesStatsRecoveryStats `json:"recovery"` + + Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices + Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards +} + +type NodesStatsDocsStats struct { + Count int64 `json:"count"` + Deleted int64 `json:"deleted"` +} + +type NodesStatsStoreStats struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type NodesStatsIndexingStats struct { + IndexTotal int64 `json:"index_total"` + IndexTime string `json:"index_time"` + IndexTimeInMillis int64 `json:"index_time_in_millis"` + IndexCurrent int64 `json:"index_current"` + IndexFailed int64 `json:"index_failed"` + DeleteTotal int64 `json:"delete_total"` + DeleteTime string `json:"delete_time"` + DeleteTimeInMillis int64 `json:"delete_time_in_millis"` + DeleteCurrent int64 `json:"delete_current"` + NoopUpdateTotal int64 `json:"noop_update_total"` + IsThrottled bool `json:"is_throttled"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` + + Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types +} + +type NodesStatsGetStats struct { + Total int64 `json:"total"` + Time string `json:"get_time"` + TimeInMillis int64 `json:"time_in_millis"` + Exists int64 `json:"exists"` + ExistsTime string `json:"exists_time"` + ExistsTimeInMillis int64 `json:"exists_in_millis"` + Missing int64 `json:"missing"` + MissingTime string `json:"missing_time"` + MissingTimeInMillis int64 `json:"missing_in_millis"` + Current int64 `json:"current"` +} + +type NodesStatsSearchStats struct { + OpenContexts int64 `json:"open_contexts"` + QueryTotal int64 `json:"query_total"` + QueryTime string `json:"query_time"` + QueryTimeInMillis int64 `json:"query_time_in_millis"` + QueryCurrent int64 `json:"query_current"` + FetchTotal int64 `json:"fetch_total"` + FetchTime string `json:"fetch_time"` + FetchTimeInMillis int64 `json:"fetch_time_in_millis"` + FetchCurrent int64 `json:"fetch_current"` + ScrollTotal int64 `json:"scroll_total"` + ScrollTime string `json:"scroll_time"` + ScrollTimeInMillis int64 `json:"scroll_time_in_millis"` + ScrollCurrent int64 `json:"scroll_current"` + + Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups +} + +type NodesStatsMergeStats struct { + Current int64 `json:"current"` + CurrentDocs int64 `json:"current_docs"` + CurrentSize string `json:"current_size"` + CurrentSizeInBytes int64 `json:"current_size_in_bytes"` + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` + TotalDocs int64 `json:"total_docs"` + TotalSize string `json:"total_size"` + TotalSizeInBytes int64 `json:"total_size_in_bytes"` + TotalStoppedTime string `json:"total_stopped_time"` + TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"` + TotalThrottledTime string `json:"total_throttled_time"` + TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"` + TotalThrottleBytes string `json:"total_auto_throttle"` + TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"` +} + +type NodesStatsRefreshStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsFlushStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsWarmerStats struct { + Current int64 `json:"current"` + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` +} + +type NodesStatsQueryCacheStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + TotalCount int64 `json:"total_count"` + HitCount int64 `json:"hit_count"` + MissCount int64 `json:"miss_count"` + CacheSize int64 `json:"cache_size"` + CacheCount int64 `json:"cache_count"` + Evictions int64 `json:"evictions"` +} + +type NodesStatsFielddataStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + Fields map[string]struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + } `json:"fields"` +} + +type NodesStatsPercolateStats struct { + Total int64 `json:"total"` + Time string `json:"time"` + TimeInMillis int64 `json:"time_in_millis"` + Current int64 `json:"current"` + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Queries int64 `json:"queries"` +} + +type NodesStatsCompletionStats struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + Fields map[string]struct { + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` + } `json:"fields"` +} + +type NodesStatsSegmentsStats struct { + Count int64 `json:"count"` + Memory string `json:"memory"` + MemoryInBytes int64 `json:"memory_in_bytes"` + TermsMemory string `json:"terms_memory"` + TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` + StoredFieldsMemory string `json:"stored_fields_memory"` + StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` + TermVectorsMemory string `json:"term_vectors_memory"` + TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` + NormsMemory string `json:"norms_memory"` + NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` + DocValuesMemory string `json:"doc_values_memory"` + DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` + IndexWriterMemory string `json:"index_writer_memory"` + IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` + IndexWriterMaxMemory string `json:"index_writer_max_memory"` + IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` + VersionMapMemory string `json:"version_map_memory"` + VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` + FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo + FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` +} + +type NodesStatsTranslogStats struct { + Operations int64 `json:"operations"` + Size string `json:"size"` + SizeInBytes int64 `json:"size_in_bytes"` +} + +type NodesStatsSuggestStats struct { + Total int64 `json:"total"` + TotalTime string `json:"total_time"` + TotalTimeInMillis int64 `json:"total_time_in_millis"` + Current int64 `json:"current"` +} + +type NodesStatsRequestCacheStats struct { + MemorySize string `json:"memory_size"` + MemorySizeInBytes int64 `json:"memory_size_in_bytes"` + Evictions int64 `json:"evictions"` + HitCount int64 `json:"hit_count"` + MissCount int64 `json:"miss_count"` +} + +type NodesStatsRecoveryStats struct { + CurrentAsSource int `json:"current_as_source"` + CurrentAsTarget int `json:"current_as_target"` + ThrottleTime string `json:"throttle_time"` + ThrottleTimeInMillis int64 `json:"throttle_time_in_millis"` +} + +type NodesStatsNodeOS struct { + Timestamp int64 `json:"timestamp"` + CPU *NodesStatsNodeOSCPU `json:"cpu"` + Mem *NodesStatsNodeOSMem `json:"mem"` + Swap *NodesStatsNodeOSSwap `json:"swap"` +} + +type NodesStatsNodeOSCPU struct { + Percent int `json:"percent"` + LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m +} + +type NodesStatsNodeOSMem struct { + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` + FreePercent int `json:"free_percent"` + UsedPercent int `json:"used_percent"` +} + +type NodesStatsNodeOSSwap struct { + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` +} + +type NodesStatsNodeProcess struct { + Timestamp int64 `json:"timestamp"` + OpenFileDescriptors int64 `json:"open_file_descriptors"` + MaxFileDescriptors int64 `json:"max_file_descriptors"` + CPU struct { + Percent int `json:"percent"` + Total string `json:"total"` + TotalInMillis int64 `json:"total_in_millis"` + } `json:"cpu"` + Mem struct { + TotalVirtual string `json:"total_virtual"` + TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"` + } `json:"mem"` +} + +type NodesStatsNodeJVM struct { + Timestamp int64 `json:"timestamp"` + Uptime string `json:"uptime"` + UptimeInMillis int64 `json:"uptime_in_millis"` + Mem *NodesStatsNodeJVMMem `json:"mem"` + Threads *NodesStatsNodeJVMThreads `json:"threads"` + GC *NodesStatsNodeJVMGC `json:"gc"` + BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"` + Classes *NodesStatsNodeJVMClasses `json:"classes"` +} + +type NodesStatsNodeJVMMem struct { + HeapUsed string `json:"heap_used"` + HeapUsedInBytes int64 `json:"heap_used_in_bytes"` + HeapUsedPercent int `json:"heap_used_percent"` + HeapCommitted string `json:"heap_committed"` + HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"` + HeapMax string `json:"heap_max"` + HeapMaxInBytes int64 `json:"heap_max_in_bytes"` + NonHeapUsed string `json:"non_heap_used"` + NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` + NonHeapCommitted string `json:"non_heap_committed"` + NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` + Pools map[string]struct { + Used string `json:"used"` + UsedInBytes int64 `json:"used_in_bytes"` + Max string `json:"max"` + MaxInBytes int64 `json:"max_in_bytes"` + PeakUsed string `json:"peak_used"` + PeakUsedInBytes int64 `json:"peak_used_in_bytes"` + PeakMax string `json:"peak_max"` + PeakMaxInBytes int64 `json:"peak_max_in_bytes"` + } `json:"pools"` +} + +type NodesStatsNodeJVMThreads struct { + Count int64 `json:"count"` + PeakCount int64 `json:"peak_count"` +} + +type NodesStatsNodeJVMGC struct { + Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"` +} + +type NodesStatsNodeJVMGCCollector struct { + CollectionCount int64 `json:"collection_count"` + CollectionTime string `json:"collection_time"` + CollectionTimeInMillis int64 `json:"collection_time_in_millis"` +} + +type NodesStatsNodeJVMBufferPool struct { + Count int64 `json:"count"` + TotalCapacity string `json:"total_capacity"` + TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` +} + +type NodesStatsNodeJVMClasses struct { + CurrentLoadedCount int64 `json:"current_loaded_count"` + TotalLoadedCount int64 `json:"total_loaded_count"` + TotalUnloadedCount int64 `json:"total_unloaded_count"` +} + +type NodesStatsNodeThreadPool struct { + Threads int `json:"threads"` + Queue int `json:"queue"` + Active int `json:"active"` + Rejected int64 `json:"rejected"` + Largest int `json:"largest"` + Completed int64 `json:"completed"` +} + +type NodesStatsNodeFS struct { + Timestamp int64 `json:"timestamp"` + Total *NodesStatsNodeFSEntry `json:"total"` + Data []*NodesStatsNodeFSEntry `json:"data"` + IOStats *NodesStatsNodeFSIOStats `json:"io_stats"` +} + +type NodesStatsNodeFSEntry struct { + Path string `json:"path"` + Mount string `json:"mount"` + Type string `json:"type"` + Total string `json:"total"` + TotalInBytes int64 `json:"total_in_bytes"` + Free string `json:"free"` + FreeInBytes int64 `json:"free_in_bytes"` + Available string `json:"available"` + AvailableInBytes int64 `json:"available_in_bytes"` + Spins string `json:"spins"` +} + +type NodesStatsNodeFSIOStats struct { + Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"` + Total *NodesStatsNodeFSIOStatsEntry `json:"total"` +} + +type NodesStatsNodeFSIOStatsEntry struct { + DeviceName string `json:"device_name"` + Operations int64 `json:"operations"` + ReadOperations int64 `json:"read_operations"` + WriteOperations int64 `json:"write_operations"` + ReadKilobytes int64 `json:"read_kilobytes"` + WriteKilobytes int64 `json:"write_kilobytes"` +} + +type NodesStatsNodeTransport struct { + ServerOpen int `json:"server_open"` + RxCount int64 `json:"rx_count"` + RxSize string `json:"rx_size"` + RxSizeInBytes int64 `json:"rx_size_in_bytes"` + TxCount int64 `json:"tx_count"` + TxSize string `json:"tx_size"` + TxSizeInBytes int64 `json:"tx_size_in_bytes"` +} + +type NodesStatsNodeHTTP struct { + CurrentOpen int `json:"current_open"` + TotalOpened int `json:"total_opened"` +} + +type NodesStatsBreaker struct { + LimitSize string `json:"limit_size"` + LimitSizeInBytes int64 `json:"limit_size_in_bytes"` + EstimatedSize string `json:"estimated_size"` + EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"` + Overhead float64 `json:"overhead"` + Tripped int64 `json:"tripped"` +} + +type NodesStatsScriptStats struct { + Compilations int64 `json:"compilations"` + CacheEvictions int64 `json:"cache_evictions"` +} + +type NodesStatsDiscovery struct { + ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"` +} + +type NodesStatsDiscoveryStats struct { + Total int64 `json:"total"` + Pending int64 `json:"pending"` + Committed int64 `json:"committed"` +} + +type NodesStatsIngest struct { + Total *NodesStatsIngestStats `json:"total"` + Pipelines interface{} `json:"pipelines"` +} + +type NodesStatsIngestStats struct { + Count int64 `json:"count"` + Time string `json:"time"` + TimeInMillis int64 `json:"time_in_millis"` + Current int64 `json:"current"` + Failed int64 `json:"failed"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/ping.go b/vendor/gopkg.in/olivere/elastic.v5/ping.go new file mode 100644 index 0000000000000000000000000000000000000000..5ec7f0f9a34baf837e01013d9f31c6a2d1662281 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/ping.go @@ -0,0 +1,129 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "net/http" + "net/url" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// PingService checks if an Elasticsearch server on a given URL is alive. +// When asked for, it can also return various information about the +// Elasticsearch server, e.g. the Elasticsearch version number. +// +// Ping simply starts a HTTP GET request to the URL of the server. +// If the server responds with HTTP Status code 200 OK, the server is alive. +type PingService struct { + client *Client + url string + timeout string + httpHeadOnly bool + pretty bool +} + +// PingResult is the result returned from querying the Elasticsearch server. +type PingResult struct { + Name string `json:"name"` + ClusterName string `json:"cluster_name"` + Version struct { + Number string `json:"number"` + BuildHash string `json:"build_hash"` + BuildTimestamp string `json:"build_timestamp"` + BuildSnapshot bool `json:"build_snapshot"` + LuceneVersion string `json:"lucene_version"` + } `json:"version"` + TagLine string `json:"tagline"` +} + +func NewPingService(client *Client) *PingService { + return &PingService{ + client: client, + url: DefaultURL, + httpHeadOnly: false, + pretty: false, + } +} + +func (s *PingService) URL(url string) *PingService { + s.url = url + return s +} + +func (s *PingService) Timeout(timeout string) *PingService { + s.timeout = timeout + return s +} + +// HeadOnly makes the service to only return the status code in Do; +// the PingResult will be nil. +func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { + s.httpHeadOnly = httpHeadOnly + return s +} + +func (s *PingService) Pretty(pretty bool) *PingService { + s.pretty = pretty + return s +} + +// Do returns the PingResult, the HTTP status code of the Elasticsearch +// server, and an error. +func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) { + s.client.mu.RLock() + basicAuth := s.client.basicAuth + basicAuthUsername := s.client.basicAuthUsername + basicAuthPassword := s.client.basicAuthPassword + s.client.mu.RUnlock() + + url_ := s.url + "/" + + params := make(url.Values) + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.pretty { + params.Set("pretty", "1") + } + if len(params) > 0 { + url_ += "?" + params.Encode() + } + + var method string + if s.httpHeadOnly { + method = "HEAD" + } else { + method = "GET" + } + + // Notice: This service must NOT use PerformRequest! + req, err := NewRequest(method, url_) + if err != nil { + return nil, 0, err + } + + if basicAuth { + req.SetBasicAuth(basicAuthUsername, basicAuthPassword) + } + + res, err := ctxhttp.Do(ctx, s.client.c, (*http.Request)(req)) + if err != nil { + return nil, 0, err + } + defer res.Body.Close() + + var ret *PingResult + if !s.httpHeadOnly { + ret = new(PingResult) + if err := json.NewDecoder(res.Body).Decode(ret); err != nil { + return nil, res.StatusCode, err + } + } + + return ret, res.StatusCode, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/plugins.go b/vendor/gopkg.in/olivere/elastic.v5/plugins.go new file mode 100644 index 0000000000000000000000000000000000000000..a46ac3748831d2d147833f0a7079d028ffb64b4f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/plugins.go @@ -0,0 +1,40 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "golang.org/x/net/context" + +// HasPlugin indicates whether the cluster has the named plugin. +func (c *Client) HasPlugin(name string) (bool, error) { + plugins, err := c.Plugins() + if err != nil { + return false, nil + } + for _, plugin := range plugins { + if plugin == name { + return true, nil + } + } + return false, nil +} + +// Plugins returns the list of all registered plugins. +func (c *Client) Plugins() ([]string, error) { + stats, err := c.ClusterStats().Do(context.Background()) + if err != nil { + return nil, err + } + if stats == nil { + return nil, err + } + if stats.Nodes == nil { + return nil, err + } + var plugins []string + for _, plugin := range stats.Nodes.Plugins { + plugins = append(plugins, plugin.Name) + } + return plugins, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/put_template.go b/vendor/gopkg.in/olivere/elastic.v5/put_template.go new file mode 100644 index 0000000000000000000000000000000000000000..23e2bbb10be71e0d966ad1aa810c75e549e650fd --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/put_template.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// PutTemplateService creates or updates a search template. +// The documentation can be found at +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-template.html. +type PutTemplateService struct { + client *Client + pretty bool + id string + opType string + version *int + versionType string + bodyJson interface{} + bodyString string +} + +// NewPutTemplateService creates a new PutTemplateService. +func NewPutTemplateService(client *Client) *PutTemplateService { + return &PutTemplateService{ + client: client, + } +} + +// Id is the template ID. +func (s *PutTemplateService) Id(id string) *PutTemplateService { + s.id = id + return s +} + +// OpType is an explicit operation type. +func (s *PutTemplateService) OpType(opType string) *PutTemplateService { + s.opType = opType + return s +} + +// Version is an explicit version number for concurrency control. +func (s *PutTemplateService) Version(version int) *PutTemplateService { + s.version = &version + return s +} + +// VersionType is a specific version type. +func (s *PutTemplateService) VersionType(versionType string) *PutTemplateService { + s.versionType = versionType + return s +} + +// BodyJson is the document as a JSON serializable object. +func (s *PutTemplateService) BodyJson(body interface{}) *PutTemplateService { + s.bodyJson = body + return s +} + +// BodyString is the document as a string. +func (s *PutTemplateService) BodyString(body string) *PutTemplateService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *PutTemplateService) buildURL() (string, url.Values, error) { + // Build URL + path, err := uritemplates.Expand("/_search/template/{id}", map[string]string{ + "id": s.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.version != nil { + params.Set("version", fmt.Sprintf("%d", *s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + if s.opType != "" { + params.Set("op_type", s.opType) + } + + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *PutTemplateService) Validate() error { + var invalid []string + if s.id == "" { + invalid = append(invalid, "Id") + } + if s.bodyString == "" && s.bodyJson == nil { + invalid = append(invalid, "BodyJson") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *PutTemplateService) Do(ctx context.Context) (*AcknowledgedResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else { + body = s.bodyString + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "PUT", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(AcknowledgedResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/query.go b/vendor/gopkg.in/olivere/elastic.v5/query.go new file mode 100644 index 0000000000000000000000000000000000000000..ad01354a08ed7bc3c3a6b4c99559ef014b695a28 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/query.go @@ -0,0 +1,13 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Query represents the generic query interface. A query's sole purpose +// is to return the source of the query as a JSON-serializable object. +// Returning map[string]interface{} is the norm for queries. +type Query interface { + // Source returns the JSON-serializable query request. + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/reindex.go b/vendor/gopkg.in/olivere/elastic.v5/reindex.go new file mode 100644 index 0000000000000000000000000000000000000000..5cab1695f300fdef59341dae2e8bc101366d2d01 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/reindex.go @@ -0,0 +1,553 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + + "golang.org/x/net/context" +) + +// ReindexService is a method to copy documents from one index to another. +// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/5.0/docs-reindex.html. +type ReindexService struct { + client *Client + pretty bool + refresh string + timeout string + waitForActiveShards string + waitForCompletion *bool + requestsPerSecond *int + body interface{} + source *ReindexSource + destination *ReindexDestination + conflicts string + size *int + script *Script +} + +// NewReindexService creates a new ReindexService. +func NewReindexService(client *Client) *ReindexService { + return &ReindexService{ + client: client, + } +} + +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or +// equal to the total number of copies for the shard (number of replicas + 1). +func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Refresh indicates whether Elasticsearch should refresh the effected indexes +// immediately. +func (s *ReindexService) Refresh(refresh string) *ReindexService { + s.refresh = refresh + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *ReindexService) Timeout(timeout string) *ReindexService { + s.timeout = timeout + return s +} + +// WaitForCompletion indicates whether Elasticsearch should block until the +// reindex is complete. +func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *ReindexService) Pretty(pretty bool) *ReindexService { + s.pretty = pretty + return s +} + +// Source specifies the source of the reindexing process. +func (s *ReindexService) Source(source *ReindexSource) *ReindexService { + s.source = source + return s +} + +// SourceIndex specifies the source index of the reindexing process. +func (s *ReindexService) SourceIndex(index string) *ReindexService { + if s.source == nil { + s.source = NewReindexSource() + } + s.source = s.source.Index(index) + return s +} + +// Destination specifies the destination of the reindexing process. +func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService { + s.destination = destination + return s +} + +// DestinationIndex specifies the destination index of the reindexing process. +func (s *ReindexService) DestinationIndex(index string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + return s +} + +// DestinationIndexAndType specifies both the destination index and type +// of the reindexing process. +func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService { + if s.destination == nil { + s.destination = NewReindexDestination() + } + s.destination = s.destination.Index(index) + s.destination = s.destination.Type(typ) + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *ReindexService) Conflicts(conflicts string) *ReindexService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *ReindexService) AbortOnVersionConflict() *ReindexService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *ReindexService) ProceedOnVersionConflict() *ReindexService { + s.conflicts = "proceed" + return s +} + +// Size sets an upper limit for the number of processed documents. +func (s *ReindexService) Size(size int) *ReindexService { + s.size = &size + return s +} + +// Script allows for modification of the documents as they are reindexed +// from source to destination. +func (s *ReindexService) Script(script *Script) *ReindexService { + s.script = script + return s +} + +// Body specifies the body of the request to send to Elasticsearch. +// It overrides settings specified with other setters, e.g. Query. +func (s *ReindexService) Body(body interface{}) *ReindexService { + s.body = body + return s +} + +// buildURL builds the URL for the operation. +func (s *ReindexService) buildURL() (string, url.Values, error) { + // Build URL path + path := "/_reindex" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *ReindexService) Validate() error { + var invalid []string + if s.body != nil { + return nil + } + if s.source == nil { + invalid = append(invalid, "Source") + } else { + if len(s.source.indices) == 0 { + invalid = append(invalid, "Source.Index") + } + } + if s.destination == nil { + invalid = append(invalid, "Destination") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// getBody returns the body part of the document request. +func (s *ReindexService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil + } + + body := make(map[string]interface{}) + + if s.conflicts != "" { + body["conflicts"] = s.conflicts + } + if s.size != nil { + body["size"] = *s.size + } + if s.script != nil { + out, err := s.script.Source() + if err != nil { + return nil, err + } + body["script"] = out + } + + src, err := s.source.Source() + if err != nil { + return nil, err + } + body["source"] = src + + dst, err := s.destination.Source() + if err != nil { + return nil, err + } + body["dest"] = dst + + return body, nil +} + +// Do executes the operation. +func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.getBody() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Source of Reindex -- + +// ReindexSource specifies the source of a Reindex process. +type ReindexSource struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + scroll string + query Query + sorts []SortInfo + sorters []Sorter + searchSource *SearchSource +} + +// NewReindexSource creates a new ReindexSource. +func NewReindexSource() *ReindexSource { + return &ReindexSource{} +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (r *ReindexSource) SearchType(searchType string) *ReindexSource { + r.searchType = searchType + return r +} + +func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource { + return r.SearchType("query_then_fetch") +} + +func (r *ReindexSource) Index(indices ...string) *ReindexSource { + r.indices = append(r.indices, indices...) + return r +} + +func (r *ReindexSource) Type(types ...string) *ReindexSource { + r.types = append(r.types, types...) + return r +} + +func (r *ReindexSource) Preference(preference string) *ReindexSource { + r.preference = &preference + return r +} + +func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource { + r.requestCache = &requestCache + return r +} + +func (r *ReindexSource) Scroll(scroll string) *ReindexSource { + r.scroll = scroll + return r +} + +func (r *ReindexSource) Query(query Query) *ReindexSource { + r.query = query + return r +} + +// Sort adds a sort order. +func (s *ReindexSource) Sort(field string, ascending bool) *ReindexSource { + s.sorts = append(s.sorts, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource { + s.sorts = append(s.sorts, info) + return s +} + +// SortBy adds a sort order. +func (s *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if r.query != nil { + src, err := r.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } else if r.searchSource != nil { + src, err := r.searchSource.Source() + if err != nil { + return nil, err + } + source["source"] = src + } + + if r.searchType != "" { + source["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + source["index"] = r.indices[0] + default: + source["index"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + source["type"] = r.types[0] + default: + source["type"] = r.types + } + + if r.preference != nil && *r.preference != "" { + source["preference"] = *r.preference + } + + if r.requestCache != nil { + source["request_cache"] = fmt.Sprintf("%v", *r.requestCache) + } + + if r.scroll != "" { + source["scroll"] = r.scroll + } + + if len(r.sorters) > 0 { + var sortarr []interface{} + for _, sorter := range r.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } else if len(r.sorts) > 0 { + var sortarr []interface{} + for _, sort := range r.sorts { + src, err := sort.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + return source, nil +} + +// -source Destination of Reindex -- + +// ReindexDestination is the destination of a Reindex API call. +// It is basically the meta data of a BulkIndexRequest. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-reindex.html +// fsourcer details. +type ReindexDestination struct { + index string + typ string + routing string + parent string + opType string + version int64 // default is MATCH_ANY + versionType string // default is "internal" +} + +// NewReindexDestination returns a new ReindexDestination. +func NewReindexDestination() *ReindexDestination { + return &ReindexDestination{} +} + +// Index specifies name of the Elasticsearch index to use as the destination +// of a reindexing process. +func (r *ReindexDestination) Index(index string) *ReindexDestination { + r.index = index + return r +} + +// Type specifies the Elasticsearch type to use for reindexing. +func (r *ReindexDestination) Type(typ string) *ReindexDestination { + r.typ = typ + return r +} + +// Routing specifies a routing value for the reindexing request. +// It can be "keep", "discard", or start with "=". The latter specifies +// the routing on the bulk request. +func (r *ReindexDestination) Routing(routing string) *ReindexDestination { + r.routing = routing + return r +} + +// Keep sets the routing on the bulk request sent for each match to the routing +// of the match (the default). +func (r *ReindexDestination) Keep() *ReindexDestination { + r.routing = "keep" + return r +} + +// Discard sets the routing on the bulk request sent for each match to null. +func (r *ReindexDestination) Discard() *ReindexDestination { + r.routing = "discard" + return r +} + +// Parent specifies the identifier of the parent document (if available). +func (r *ReindexDestination) Parent(parent string) *ReindexDestination { + r.parent = parent + return r +} + +// OpType specifies if this request should follow create-only or upsert +// behavior. This follows the OpType of the standard document index API. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-index_.html#operation-type +// for details. +func (r *ReindexDestination) OpType(opType string) *ReindexDestination { + r.opType = opType + return r +} + +// Version indicates the version of the document as part of an optimistic +// concurrency model. +func (r *ReindexDestination) Version(version int64) *ReindexDestination { + r.version = version + return r +} + +// VersionType specifies how versions are created. +func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination { + r.versionType = versionType + return r +} + +// Source returns a serializable JSON request for the request. +func (r *ReindexDestination) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.index != "" { + source["index"] = r.index + } + if r.typ != "" { + source["type"] = r.typ + } + if r.routing != "" { + source["routing"] = r.routing + } + if r.opType != "" { + source["op_type"] = r.opType + } + if r.parent != "" { + source["parent"] = r.parent + } + if r.version > 0 { + source["version"] = r.version + } + if r.versionType != "" { + source["version_type"] = r.versionType + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/request.go b/vendor/gopkg.in/olivere/elastic.v5/request.go new file mode 100644 index 0000000000000000000000000000000000000000..e0e71b5e333ea54b4b9cb1257e1437e18301c4a4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/request.go @@ -0,0 +1,123 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "runtime" + "strings" +) + +// Elasticsearch-specific HTTP request +type Request http.Request + +// NewRequest is a http.Request and adds features such as encoding the body. +func NewRequest(method, url string) (*Request, error) { + req, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") + req.Header.Add("Accept", "application/json") + return (*Request)(req), nil +} + +// SetBasicAuth wraps http.Request's SetBasicAuth. +func (r *Request) SetBasicAuth(username, password string) { + ((*http.Request)(r)).SetBasicAuth(username, password) +} + +// SetBody encodes the body in the request. Optionally, it performs GZIP compression. +func (r *Request) SetBody(body interface{}, gzipCompress bool) error { + switch b := body.(type) { + case string: + if gzipCompress { + return r.setBodyGzip(b) + } else { + return r.setBodyString(b) + } + default: + if gzipCompress { + return r.setBodyGzip(body) + } else { + return r.setBodyJson(body) + } + } +} + +// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. +func (r *Request) setBodyJson(data interface{}) error { + body, err := json.Marshal(data) + if err != nil { + return err + } + r.Header.Set("Content-Type", "application/json") + r.setBodyReader(bytes.NewReader(body)) + return nil +} + +// setBodyString encodes the body as a string. +func (r *Request) setBodyString(body string) error { + return r.setBodyReader(strings.NewReader(body)) +} + +// setBodyGzip gzip's the body. It accepts both strings and structs as body. +// The latter will be encoded via json.Marshal. +func (r *Request) setBodyGzip(body interface{}) error { + switch b := body.(type) { + case string: + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write([]byte(b)); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + default: + data, err := json.Marshal(b) + if err != nil { + return err + } + buf := new(bytes.Buffer) + w := gzip.NewWriter(buf) + if _, err := w.Write(data); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + r.Header.Add("Content-Encoding", "gzip") + r.Header.Add("Vary", "Accept-Encoding") + r.Header.Set("Content-Type", "application/json") + return r.setBodyReader(bytes.NewReader(buf.Bytes())) + } +} + +// setBodyReader writes the body from an io.Reader. +func (r *Request) setBodyReader(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + r.Body = rc + if body != nil { + switch v := body.(type) { + case *strings.Reader: + r.ContentLength = int64(v.Len()) + case *bytes.Buffer: + r.ContentLength = int64(v.Len()) + } + } + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/rescore.go b/vendor/gopkg.in/olivere/elastic.v5/rescore.go new file mode 100644 index 0000000000000000000000000000000000000000..9b7eaee1d849008231d95c57f5049f13d27d4306 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/rescore.go @@ -0,0 +1,44 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescore struct { + rescorer Rescorer + windowSize *int + defaultRescoreWindowSize *int +} + +func NewRescore() *Rescore { + return &Rescore{} +} + +func (r *Rescore) WindowSize(windowSize int) *Rescore { + r.windowSize = &windowSize + return r +} + +func (r *Rescore) IsEmpty() bool { + return r.rescorer == nil +} + +func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { + r.rescorer = rescorer + return r +} + +func (r *Rescore) Source() (interface{}, error) { + source := make(map[string]interface{}) + if r.windowSize != nil { + source["window_size"] = *r.windowSize + } else if r.defaultRescoreWindowSize != nil { + source["window_size"] = *r.defaultRescoreWindowSize + } + rescorerSrc, err := r.rescorer.Source() + if err != nil { + return nil, err + } + source[r.rescorer.Name()] = rescorerSrc + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/rescorer.go b/vendor/gopkg.in/olivere/elastic.v5/rescorer.go new file mode 100644 index 0000000000000000000000000000000000000000..ccd4bb854b85752e4de820a49272a08a2442f4e6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/rescorer.go @@ -0,0 +1,64 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +type Rescorer interface { + Name() string + Source() (interface{}, error) +} + +// -- Query Rescorer -- + +type QueryRescorer struct { + query Query + rescoreQueryWeight *float64 + queryWeight *float64 + scoreMode string +} + +func NewQueryRescorer(query Query) *QueryRescorer { + return &QueryRescorer{ + query: query, + } +} + +func (r *QueryRescorer) Name() string { + return "query" +} + +func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { + r.rescoreQueryWeight = &rescoreQueryWeight + return r +} + +func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { + r.queryWeight = &queryWeight + return r +} + +func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { + r.scoreMode = scoreMode + return r +} + +func (r *QueryRescorer) Source() (interface{}, error) { + rescoreQuery, err := r.query.Source() + if err != nil { + return nil, err + } + + source := make(map[string]interface{}) + source["rescore_query"] = rescoreQuery + if r.queryWeight != nil { + source["query_weight"] = *r.queryWeight + } + if r.rescoreQueryWeight != nil { + source["rescore_query_weight"] = *r.rescoreQueryWeight + } + if r.scoreMode != "" { + source["score_mode"] = r.scoreMode + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/response.go b/vendor/gopkg.in/olivere/elastic.v5/response.go new file mode 100644 index 0000000000000000000000000000000000000000..e7380d98a6665ba6a98667de75620a5017a508d2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/response.go @@ -0,0 +1,43 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "io/ioutil" + "net/http" +) + +// Response represents a response from Elasticsearch. +type Response struct { + // StatusCode is the HTTP status code, e.g. 200. + StatusCode int + // Header is the HTTP header from the HTTP response. + // Keys in the map are canonicalized (see http.CanonicalHeaderKey). + Header http.Header + // Body is the deserialized response body. + Body json.RawMessage +} + +// newResponse creates a new response from the HTTP response. +func (c *Client) newResponse(res *http.Response) (*Response, error) { + r := &Response{ + StatusCode: res.StatusCode, + Header: res.Header, + } + if res.Body != nil { + slurp, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + // HEAD requests return a body but no content + if len(slurp) > 0 { + if err := c.decoder.Decode(slurp, &r.Body); err != nil { + return nil, err + } + } + } + return r, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/retrier.go b/vendor/gopkg.in/olivere/elastic.v5/retrier.go new file mode 100644 index 0000000000000000000000000000000000000000..b0d4e9ca4d7fdfa11b0512c10111b5339d5ce666 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/retrier.go @@ -0,0 +1,62 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "net/http" + "time" + + "golang.org/x/net/context" +) + +// RetrierFunc specifies the signature of a Retry function. +type RetrierFunc func(context.Context, int, *http.Request, *http.Response, error) (time.Duration, bool, error) + +// Retrier decides whether to retry a failed HTTP request with Elasticsearch. +type Retrier interface { + // Retry is called when a request has failed. It decides whether to retry + // the call, how long to wait for the next call, or whether to return an + // error (which will be returned to the service that started the HTTP + // request in the first place). + // + // Callers may also use this to inspect the HTTP request/response and + // the error that happened. Additional data can be passed through via + // the context. + Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) +} + +// -- StopRetrier -- + +// StopRetrier is an implementation that does no retries. +type StopRetrier struct { +} + +// NewStopRetrier returns a retrier that does no retries. +func NewStopRetrier() *StopRetrier { + return &StopRetrier{} +} + +// Retry does not retry. +func (r *StopRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) { + return 0, false, nil +} + +// -- BackoffRetrier -- + +// BackoffRetrier is an implementation that does nothing but return nil on Retry. +type BackoffRetrier struct { + backoff Backoff +} + +// NewBackoffRetrier returns a retrier that uses the given backoff strategy. +func NewBackoffRetrier(backoff Backoff) *BackoffRetrier { + return &BackoffRetrier{backoff: backoff} +} + +// Retry calls into the backoff strategy and its wait interval. +func (r *BackoffRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) { + wait, goahead := r.backoff.Next(retry) + return wait, goahead, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/retry.go b/vendor/gopkg.in/olivere/elastic.v5/retry.go new file mode 100644 index 0000000000000000000000000000000000000000..3571a3b7a6b0707febd43fcfc830ea4d84714d24 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/retry.go @@ -0,0 +1,56 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +// This file is based on code (c) 2014 Cenk Altı and governed by the MIT license. +// See https://github.com/cenkalti/backoff for original source. + +package elastic + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives error returned +// from an operation. +// +// Notice that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error) + +// Retry the function f until it does not return error or BackOff stops. +// f is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b Backoff, notify Notify) error { + var err error + var wait time.Duration + var retry bool + var n int + + for { + if err = operation(); err == nil { + return nil + } + + n++ + wait, retry = b.Next(n) + if !retry { + return err + } + + if notify != nil { + notify(err) + } + + time.Sleep(wait) + } +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh new file mode 100755 index 0000000000000000000000000000000000000000..08c67ea17cf55c9a4623dbf273b425774b9927c1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-beta1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-beta1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh new file mode 100755 index 0000000000000000000000000000000000000000..d4586acca2aa98dd62dc1f41a436ee10da2890d5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0-rc1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0-rc1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh new file mode 100755 index 0000000000000000000000000000000000000000..e7a98c8fcf7d4c73d32cae20276870c924c06280 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.0.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.0 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh new file mode 100755 index 0000000000000000000000000000000000000000..5286702116fd9cd49fb5f2997728010ba1c23825 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.0.1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.0.1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh new file mode 100755 index 0000000000000000000000000000000000000000..b15b5711306091fabdb6b9459786381490dfaeed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.1.1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh new file mode 100755 index 0000000000000000000000000000000000000000..66c85e58af57ec62ab9bb2aea11937c15633b93a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.1.2.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.1.2 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh new file mode 100755 index 0000000000000000000000000000000000000000..417d283a746d6dc85876aa630a2e8d1cbbd5735c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.0.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.0 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh new file mode 100755 index 0000000000000000000000000000000000000000..6d0d5dd70fbcfc096316e5a0ecec9de9cfebe316 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/run-es-5.2.1.sh @@ -0,0 +1 @@ +docker run --rm --privileged=true -p 9200:9200 -p 9300:9300 -v "$PWD/config:/usr/share/elasticsearch/config" -e ES_JAVA_OPTS='-Xms1g -Xmx1g' elasticsearch:5.2.1 elasticsearch diff --git a/vendor/gopkg.in/olivere/elastic.v5/script.go b/vendor/gopkg.in/olivere/elastic.v5/script.go new file mode 100644 index 0000000000000000000000000000000000000000..b771c0547f8bf182f6ac117abc3b85b99dca83a6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/script.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// Script holds all the paramaters necessary to compile or find in cache +// and then execute a script. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html +// for details of scripting. +type Script struct { + script string + typ string + lang string + params map[string]interface{} +} + +// NewScript creates and initializes a new Script. +func NewScript(script string) *Script { + return &Script{ + script: script, + typ: "", // default type is "inline" + params: make(map[string]interface{}), + } +} + +// NewScriptInline creates and initializes a new Script of type "inline". +func NewScriptInline(script string) *Script { + return NewScript(script).Type("inline") +} + +// NewScriptId creates and initializes a new Script of type "id". +func NewScriptId(script string) *Script { + return NewScript(script).Type("id") +} + +// NewScriptFile creates and initializes a new Script of type "file". +func NewScriptFile(script string) *Script { + return NewScript(script).Type("file") +} + +// Script is either the cache key of the script to be compiled/executed +// or the actual script source code for inline scripts. For indexed +// scripts this is the id used in the request. For file scripts this is +// the file name. +func (s *Script) Script(script string) *Script { + s.script = script + return s +} + +// Type sets the type of script: "inline", "id", or "file". +func (s *Script) Type(typ string) *Script { + s.typ = typ + return s +} + +// Lang sets the language of the script. Permitted values are "groovy", +// "expression", "mustache", "mvel" (default), "javascript", "python". +// To use certain languages, you need to configure your server and/or +// add plugins. See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html +// for details. +func (s *Script) Lang(lang string) *Script { + s.lang = lang + return s +} + +// Param adds a key/value pair to the parameters that this script will be executed with. +func (s *Script) Param(name string, value interface{}) *Script { + if s.params == nil { + s.params = make(map[string]interface{}) + } + s.params[name] = value + return s +} + +// Params sets the map of parameters this script will be executed with. +func (s *Script) Params(params map[string]interface{}) *Script { + s.params = params + return s +} + +// Source returns the JSON serializable data for this Script. +func (s *Script) Source() (interface{}, error) { + if s.typ == "" && s.lang == "" && len(s.params) == 0 { + return s.script, nil + } + source := make(map[string]interface{}) + if s.typ == "" { + source["inline"] = s.script + } else { + source[s.typ] = s.script + } + if s.lang != "" { + source["lang"] = s.lang + } + if len(s.params) > 0 { + source["params"] = s.params + } + return source, nil +} + +// -- Script Field -- + +// ScriptField is a single script field. +type ScriptField struct { + FieldName string // name of the field + + script *Script +} + +// NewScriptField creates and initializes a new ScriptField. +func NewScriptField(fieldName string, script *Script) *ScriptField { + return &ScriptField{FieldName: fieldName, script: script} +} + +// Source returns the serializable JSON for the ScriptField. +func (f *ScriptField) Source() (interface{}, error) { + if f.script == nil { + return nil, errors.New("ScriptField expects script") + } + source := make(map[string]interface{}) + src, err := f.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/scroll.go b/vendor/gopkg.in/olivere/elastic.v5/scroll.go new file mode 100644 index 0000000000000000000000000000000000000000..7268026ff7c0ac1cd7ac1c18fc6f0ffb98e7668f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/scroll.go @@ -0,0 +1,445 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "io" + "net/url" + "strings" + "sync" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +const ( + // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive. + DefaultScrollKeepAlive = "5m" +) + +// ScrollService iterates over pages of search results from Elasticsearch. +type ScrollService struct { + client *Client + indices []string + types []string + keepAlive string + body interface{} + ss *SearchSource + size *int + pretty bool + routing string + preference string + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + + mu sync.RWMutex + scrollId string +} + +// NewScrollService initializes and returns a new ScrollService. +func NewScrollService(client *Client) *ScrollService { + builder := &ScrollService{ + client: client, + ss: NewSearchSource(), + keepAlive: DefaultScrollKeepAlive, + } + return builder +} + +// Index sets the name of one or more indices to iterate over. +func (s *ScrollService) Index(indices ...string) *ScrollService { + if s.indices == nil { + s.indices = make([]string, 0) + } + s.indices = append(s.indices, indices...) + return s +} + +// Type sets the name of one or more types to iterate over. +func (s *ScrollService) Type(types ...string) *ScrollService { + if s.types == nil { + s.types = make([]string, 0) + } + s.types = append(s.types, types...) + return s +} + +// Scroll is an alias for KeepAlive, the time to keep +// the cursor alive (e.g. "5m" for 5 minutes). +func (s *ScrollService) Scroll(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// KeepAlive sets the maximum time after which the cursor will expire. +// It is "2m" by default. +func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { + s.keepAlive = keepAlive + return s +} + +// Size specifies the number of documents Elasticsearch should return +// from each shard, per page. +func (s *ScrollService) Size(size int) *ScrollService { + s.size = &size + return s +} + +// Body sets the raw body to send to Elasticsearch. This can be e.g. a string, +// a map[string]interface{} or anything that can be serialized into JSON. +// Notice that setting the body disables the use of SearchSource and many +// other properties of the ScanService. +func (s *ScrollService) Body(body interface{}) *ScrollService { + s.body = body + return s +} + +// SearchSource sets the search source builder to use with this iterator. +// Notice that only a certain number of properties can be used when scrolling, +// e.g. query and sorting. +func (s *ScrollService) SearchSource(searchSource *SearchSource) *ScrollService { + s.ss = searchSource + if s.ss == nil { + s.ss = NewSearchSource() + } + return s +} + +// Query sets the query to perform, e.g. a MatchAllQuery. +func (s *ScrollService) Query(query Query) *ScrollService { + s.ss = s.ss.Query(query) + return s +} + +// PostFilter is executed as the last filter. It only affects the +// search hits but not facets. See +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-post-filter.html +// for details. +func (s *ScrollService) PostFilter(postFilter Query) *ScrollService { + s.ss = s.ss.PostFilter(postFilter) + return s +} + +// Slice allows slicing the scroll request into several batches. +// This is supported in Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll +// for details. +func (s *ScrollService) Slice(sliceQuery Query) *ScrollService { + s.ss = s.ss.Slice(sliceQuery) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService { + s.ss = s.ss.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScrollService { + s.ss = s.ss.FetchSourceContext(fetchSourceContext) + return s +} + +// Version can be set to true to return a version for each search hit. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-version.html. +func (s *ScrollService) Version(version bool) *ScrollService { + s.ss = s.ss.Version(version) + return s +} + +// Sort adds a sort order. This can have negative effects on the performance +// of the scroll operation as Elasticsearch needs to sort first. +func (s *ScrollService) Sort(field string, ascending bool) *ScrollService { + s.ss = s.ss.Sort(field, ascending) + return s +} + +// SortWithInfo specifies a sort order. Notice that sorting can have a +// negative impact on scroll performance. +func (s *ScrollService) SortWithInfo(info SortInfo) *ScrollService { + s.ss = s.ss.SortWithInfo(info) + return s +} + +// SortBy specifies a sort order. Notice that sorting can have a +// negative impact on scroll performance. +func (s *ScrollService) SortBy(sorter ...Sorter) *ScrollService { + s.ss = s.ss.SortBy(sorter...) + return s +} + +// Pretty asks Elasticsearch to pretty-print the returned JSON. +func (s *ScrollService) Pretty(pretty bool) *ScrollService { + s.pretty = pretty + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *ScrollService) Routing(routings ...string) *ScrollService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards ("random"). Can be set to "_local" to prefer +// local shards, "_primary" to execute on primary shards only, +// or a custom value which guarantees that the same order will be used +// across different requests. +func (s *ScrollService) Preference(preference string) *ScrollService { + s.preference = preference + return s +} + +// IgnoreUnavailable indicates whether the specified concrete indices +// should be ignored when unavailable (missing or closed). +func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string +// or when no indices have been specified). +func (s *ScrollService) AllowNoIndices(allowNoIndices bool) *ScrollService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *ScrollService) ExpandWildcards(expandWildcards string) *ScrollService { + s.expandWildcards = expandWildcards + return s +} + +// ScrollId specifies the identifier of a scroll in action. +func (s *ScrollService) ScrollId(scrollId string) *ScrollService { + s.mu.Lock() + s.scrollId = scrollId + s.mu.Unlock() + return s +} + +// Do returns the next search result. It will return io.EOF as error if there +// are no more search results. +func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) { + s.mu.RLock() + nextScrollId := s.scrollId + s.mu.RUnlock() + if len(nextScrollId) == 0 { + return s.first(ctx) + } + return s.next(ctx) +} + +// Clear cancels the current scroll operation. If you don't do this manually, +// the scroll will be expired automatically by Elasticsearch. You can control +// how long a scroll cursor is kept alive with the KeepAlive func. +func (s *ScrollService) Clear(ctx context.Context) error { + s.mu.RLock() + scrollId := s.scrollId + s.mu.RUnlock() + if len(scrollId) == 0 { + return nil + } + + path := "/_search/scroll" + params := url.Values{} + body := struct { + ScrollId []string `json:"scroll_id,omitempty"` + }{ + ScrollId: []string{scrollId}, + } + + _, err := s.client.PerformRequest(ctx, "DELETE", path, params, body) + if err != nil { + return err + } + + return nil +} + +// -- First -- + +// first takes the first page of search results. +func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) { + // Get URL and parameters for request + path, params, err := s.buildFirstURL() + if err != nil { + return nil, err + } + + // Get HTTP request body + body, err := s.bodyFirst() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + s.mu.Lock() + s.scrollId = ret.ScrollId + s.mu.Unlock() + if ret.Hits == nil || len(ret.Hits.Hits) == 0 { + return nil, io.EOF + } + return ret, nil +} + +// buildFirstURL builds the URL for retrieving the first page. +func (s *ScrollService) buildFirstURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.indices) == 0 && len(s.types) == 0 { + path = "/_search" + } else if len(s.indices) > 0 && len(s.types) == 0 { + path, err = uritemplates.Expand("/{index}/_search", map[string]string{ + "index": strings.Join(s.indices, ","), + }) + } else if len(s.indices) == 0 && len(s.types) > 0 { + path, err = uritemplates.Expand("/_all/{typ}/_search", map[string]string{ + "typ": strings.Join(s.types, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/{typ}/_search", map[string]string{ + "index": strings.Join(s.indices, ","), + "typ": strings.Join(s.types, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.size != nil && *s.size > 0 { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.keepAlive) > 0 { + params.Set("scroll", s.keepAlive) + } + if len(s.routing) > 0 { + params.Set("routing", s.routing) + } + if len(s.preference) > 0 { + params.Set("preference", s.preference) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if len(s.expandWildcards) > 0 { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + + return path, params, nil +} + +// bodyFirst returns the request to fetch the first batch of results. +func (s *ScrollService) bodyFirst() (interface{}, error) { + var err error + var body interface{} + + if s.body != nil { + body = s.body + } else { + // Use _doc sort by default if none is specified + if !s.ss.hasSort() { + // Use efficient sorting when no user-defined query/body is specified + s.ss = s.ss.SortBy(SortByDoc{}) + } + + // Body from search source + body, err = s.ss.Source() + if err != nil { + return nil, err + } + } + + return body, nil +} + +// -- Next -- + +func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) { + // Get URL for request + path, params, err := s.buildNextURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.bodyNext() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + s.mu.Lock() + s.scrollId = ret.ScrollId + s.mu.Unlock() + if ret.Hits == nil || len(ret.Hits.Hits) == 0 { + return nil, io.EOF + } + return ret, nil +} + +// buildNextURL builds the URL for the operation. +func (s *ScrollService) buildNextURL() (string, url.Values, error) { + path := "/_search/scroll" + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + + return path, params, nil +} + +// body returns the request to fetch the next batch of results. +func (s *ScrollService) bodyNext() (interface{}, error) { + s.mu.RLock() + body := struct { + Scroll string `json:"scroll"` + ScrollId string `json:"scroll_id,omitempty"` + }{ + Scroll: s.keepAlive, + ScrollId: s.scrollId, + } + s.mu.RUnlock() + return body, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search.go b/vendor/gopkg.in/olivere/elastic.v5/search.go new file mode 100644 index 0000000000000000000000000000000000000000..e27ada228fbd7f52fe6e61e03f81b89820232c71 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search.go @@ -0,0 +1,556 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// Search for documents in Elasticsearch. +type SearchService struct { + client *Client + searchSource *SearchSource + source interface{} + pretty bool + filterPath []string + searchType string + index []string + typ []string + routing string + preference string + requestCache *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string +} + +// NewSearchService creates a new service for searching in Elasticsearch. +func NewSearchService(client *Client) *SearchService { + builder := &SearchService{ + client: client, + searchSource: NewSearchSource(), + } + return builder +} + +// SearchSource sets the search source builder to use with this service. +func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { + s.searchSource = searchSource + if s.searchSource == nil { + s.searchSource = NewSearchSource() + } + return s +} + +// Source allows the user to set the request body manually without using +// any of the structs and interfaces in Elastic. +func (s *SearchService) Source(source interface{}) *SearchService { + s.source = source + return s +} + +// FilterPath allows reducing the response, a mechanism known as +// response filtering and described here: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/common-options.html#common-options-response-filtering. +func (s *SearchService) FilterPath(filterPath ...string) *SearchService { + s.filterPath = append(s.filterPath, filterPath...) + return s +} + +// Index sets the names of the indices to use for search. +func (s *SearchService) Index(index ...string) *SearchService { + s.index = append(s.index, index...) + return s +} + +// Types adds search restrictions for a list of types. +func (s *SearchService) Type(typ ...string) *SearchService { + s.typ = append(s.typ, typ...) + return s +} + +// Pretty enables the caller to indent the JSON output. +func (s *SearchService) Pretty(pretty bool) *SearchService { + s.pretty = pretty + return s +} + +// Timeout sets the timeout to use, e.g. "1s" or "1000ms". +func (s *SearchService) Timeout(timeout string) *SearchService { + s.searchSource = s.searchSource.Timeout(timeout) + return s +} + +// Profile sets the Profile API flag on the search source. +// When enabled, a search executed by this service will return query +// profiling data. +func (s *SearchService) Profile(profile bool) *SearchService { + s.searchSource = s.searchSource.Profile(profile) + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { + s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) + return s +} + +// SearchType sets the search operation type. Valid values are: +// "query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", +// "dfs_query_and_fetch", "count", "scan". +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-search-type.html +// for details. +func (s *SearchService) SearchType(searchType string) *SearchService { + s.searchType = searchType + return s +} + +// Routing is a list of specific routing values to control the shards +// the search will be executed on. +func (s *SearchService) Routing(routings ...string) *SearchService { + s.routing = strings.Join(routings, ",") + return s +} + +// Preference sets the preference to execute the search. Defaults to +// randomize across shards ("random"). Can be set to "_local" to prefer +// local shards, "_primary" to execute on primary shards only, +// or a custom value which guarantees that the same order will be used +// across different requests. +func (s *SearchService) Preference(preference string) *SearchService { + s.preference = preference + return s +} + +// RequestCache indicates whether the cache should be used for this +// request or not, defaults to index level setting. +func (s *SearchService) RequestCache(requestCache bool) *SearchService { + s.requestCache = &requestCache + return s +} + +// Query sets the query to perform, e.g. MatchAllQuery. +func (s *SearchService) Query(query Query) *SearchService { + s.searchSource = s.searchSource.Query(query) + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchService) PostFilter(postFilter Query) *SearchService { + s.searchSource = s.searchSource.PostFilter(postFilter) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchService) FetchSource(fetchSource bool) *SearchService { + s.searchSource = s.searchSource.FetchSource(fetchSource) + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { + s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchService) Highlight(highlight *Highlight) *SearchService { + s.searchSource = s.searchSource.Highlight(highlight) + return s +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { + s.searchSource = s.searchSource.GlobalSuggestText(globalText) + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchService) Suggester(suggester Suggester) *SearchService { + s.searchSource = s.searchSource.Suggester(suggester) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { + s.searchSource = s.searchSource.Aggregation(name, aggregation) + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchService) MinScore(minScore float64) *SearchService { + s.searchSource = s.searchSource.MinScore(minScore) + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchService) From(from int) *SearchService { + s.searchSource = s.searchSource.From(from) + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchService) Size(size int) *SearchService { + s.searchSource = s.searchSource.Size(size) + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchService) Explain(explain bool) *SearchService { + s.searchSource = s.searchSource.Explain(explain) + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchService) Version(version bool) *SearchService { + s.searchSource = s.searchSource.Version(version) + return s +} + +// Sort adds a sort order. +func (s *SearchService) Sort(field string, ascending bool) *SearchService { + s.searchSource = s.searchSource.Sort(field, ascending) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { + s.searchSource = s.searchSource.SortWithInfo(info) + return s +} + +// SortBy adds a sort order. +func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { + s.searchSource = s.searchSource.SortBy(sorter...) + return s +} + +// NoStoredFields indicates that no stored fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchService) NoStoredFields() *SearchService { + s.searchSource = s.searchSource.NoStoredFields() + return s +} + +// StoredField adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchService) StoredField(fieldName string) *SearchService { + s.searchSource = s.searchSource.StoredField(fieldName) + return s +} + +// StoredFields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchService) StoredFields(fields ...string) *SearchService { + s.searchSource = s.searchSource.StoredFields(fields...) + return s +} + +// SearchAfter allows a different form of pagination by using a live cursor, +// using the results of the previous page to help the retrieval of the next. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-search-after.html +func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService { + s.searchSource = s.searchSource.SearchAfter(sortValues...) + return s +} + +// IgnoreUnavailable indicates whether the specified concrete indices +// should be ignored when unavailable (missing or closed). +func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string +// or when no indices have been specified). +func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService { + s.expandWildcards = expandWildcards + return s +} + +// buildURL builds the URL for the operation. +func (s *SearchService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 && len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_search", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else if len(s.typ) > 0 { + path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{ + "type": strings.Join(s.typ, ","), + }) + } else { + path = "/_search" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if len(s.filterPath) > 0 { + params.Set("filter_path", strings.Join(s.filterPath, ",")) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *SearchService) Validate() error { + return nil +} + +// Do executes the search and returns a SearchResult. +func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Perform request + var body interface{} + if s.source != nil { + body = s.source + } else { + src, err := s.searchSource.Source() + if err != nil { + return nil, err + } + body = src + } + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return search results + ret := new(SearchResult) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// SearchResult is the result of a search in Elasticsearch. +type SearchResult struct { + TookInMillis int64 `json:"took"` // search time in milliseconds + ScrollId string `json:"_scroll_id"` // only used with Scroll and Scan operations + Hits *SearchHits `json:"hits"` // the actual search hits + Suggest SearchSuggest `json:"suggest"` // results from suggesters + Aggregations Aggregations `json:"aggregations"` // results from aggregations + TimedOut bool `json:"timed_out"` // true if the search timed out + Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet + Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search + Shards *shardsInfo `json:"_shards,omitempty"` // shard information +} + +// TotalHits is a convenience function to return the number of hits for +// a search result. +func (r *SearchResult) TotalHits() int64 { + if r.Hits != nil { + return r.Hits.TotalHits + } + return 0 +} + +// Each is a utility function to iterate over all hits. It saves you from +// checking for nil values. Notice that Each will ignore errors in +// serializing JSON. +func (r *SearchResult) Each(typ reflect.Type) []interface{} { + if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { + return nil + } + var slice []interface{} + for _, hit := range r.Hits.Hits { + v := reflect.New(typ).Elem() + if err := json.Unmarshal(*hit.Source, v.Addr().Interface()); err == nil { + slice = append(slice, v.Interface()) + } + } + return slice +} + +// SearchHits specifies the list of search hits. +type SearchHits struct { + TotalHits int64 `json:"total"` // total number of hits found + MaxScore *float64 `json:"max_score"` // maximum score of all hits + Hits []*SearchHit `json:"hits"` // the actual hits returned +} + +// SearchHit is a single hit. +type SearchHit struct { + Score *float64 `json:"_score"` // computed score + Index string `json:"_index"` // index name + Type string `json:"_type"` // type meta field + Id string `json:"_id"` // external or internal + Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) + Routing string `json:"_routing"` // routing meta field + Parent string `json:"_parent"` // parent meta field + Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService + Sort []interface{} `json:"sort"` // sort information + Highlight SearchHitHighlight `json:"highlight"` // highlighter information + Source *json.RawMessage `json:"_source"` // stored document source + Fields map[string]interface{} `json:"fields"` // returned (stored) fields + Explanation *SearchExplanation `json:"_explanation"` // explains how the score was computed + MatchedQueries []string `json:"matched_queries"` // matched queries + InnerHits map[string]*SearchHitInnerHits `json:"inner_hits"` // inner hits with ES >= 1.5.0 + + // Shard + // HighlightFields + // SortValues + // MatchedFilters +} + +type SearchHitInnerHits struct { + Hits *SearchHits `json:"hits"` +} + +// SearchExplanation explains how the score for a hit was computed. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-explain.html. +type SearchExplanation struct { + Value float64 `json:"value"` // e.g. 1.0 + Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" + Details []SearchExplanation `json:"details,omitempty"` // recursive details +} + +// Suggest + +// SearchSuggest is a map of suggestions. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters.html. +type SearchSuggest map[string][]SearchSuggestion + +// SearchSuggestion is a single search suggestion. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters.html. +type SearchSuggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []SearchSuggestionOption `json:"options"` +} + +// SearchSuggestionOption is an option of a SearchSuggestion. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters.html. +type SearchSuggestionOption struct { + Text string `json:"text"` + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Score float64 `json:"_score"` + Source *json.RawMessage `json:"_source"` +} + +// SearchProfile is a list of shard profiling data collected during +// query execution in the "profile" section of a SearchResult +type SearchProfile struct { + Shards []SearchProfileShardResult `json:"shards"` +} + +// SearchProfileShardResult returns the profiling data for a single shard +// accessed during the search query or aggregation. +type SearchProfileShardResult struct { + ID string `json:"id"` + Searches []QueryProfileShardResult `json:"searches"` + Aggregations []ProfileResult `json:"aggregations"` +} + +// QueryProfileShardResult is a container class to hold the profile results +// for a single shard in the request. It comtains a list of query profiles, +// a collector tree and a total rewrite tree. +type QueryProfileShardResult struct { + Query []ProfileResult `json:"query,omitempty"` + RewriteTime int64 `json:"rewrite_time,omitempty"` + Collector []interface{} `json:"collector,omitempty"` +} + +// CollectorResult holds the profile timings of the collectors used in the +// search. Children's CollectorResults may be embedded inside of a parent +// CollectorResult. +type CollectorResult struct { + Name string `json:"name,omitempty"` + Reason string `json:"reason,omitempty"` + Time string `json:"time,omitempty"` + TimeNanos int64 `json:"time_in_nanos,omitempty"` + Children []CollectorResult `json:"children,omitempty"` +} + +// ProfileResult is the internal representation of a profiled query, +// corresponding to a single node in the query tree. +type ProfileResult struct { + Type string `json:"type"` + Description string `json:"description,omitempty"` + NodeTime string `json:"time,omitempty"` + NodeTimeNanos int64 `json:"time_in_nanos,omitempty"` + Breakdown map[string]int64 `json:"breakdown,omitempty"` + Children []ProfileResult `json:"children,omitempty"` +} + +// Aggregations (see search_aggs.go) + +// Highlighting + +// SearchHitHighlight is the highlight information of a search hit. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-highlighting.html +// for a general discussion of highlighting. +type SearchHitHighlight map[string][]string diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go new file mode 100644 index 0000000000000000000000000000000000000000..778aebcffef658f26b3c380d6e4f4c82dbc82f83 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs.go @@ -0,0 +1,1353 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "bytes" + "encoding/json" +) + +// Aggregations can be seen as a unit-of-work that build +// analytic information over a set of documents. It is +// (in many senses) the follow-up of facets in Elasticsearch. +// For more details about aggregations, visit: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations.html +type Aggregation interface { + // Source returns a JSON-serializable aggregation that is a fragment + // of the request sent to Elasticsearch. + Source() (interface{}, error) +} + +// Aggregations is a list of aggregations that are part of a search result. +type Aggregations map[string]*json.RawMessage + +// Min returns min aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-min-aggregation.html +func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Max returns max aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-max-aggregation.html +func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sum returns sum aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-sum-aggregation.html +func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Avg returns average aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-avg-aggregation.html +func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ValueCount returns value-count aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-valuecount-aggregation.html +func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Cardinality returns cardinality aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-cardinality-aggregation.html +func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationValueMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Stats returns stats aggregation results. +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-stats-aggregation.html +func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ExtendedStats returns extended stats aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-extendedstats-aggregation.html +func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationExtendedStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Percentiles returns percentiles results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-aggregation.html +func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// PercentileRanks returns percentile ranks results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-rank-aggregation.html +func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPercentilesMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// TopHits returns top-hits aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-top-hits-aggregation.html +func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationTopHitsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Global returns global results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-global-aggregation.html +func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filter returns filter results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filter-aggregation.html +func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Filters returns filters results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filters-aggregation.html +func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketFilters) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Missing returns missing results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-missing-aggregation.html +func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Nested returns nested results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-nested-aggregation.html +func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// ReverseNested returns reverse-nested results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-reverse-nested-aggregation.html +func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Children returns children results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-children-aggregation.html +func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Terms returns terms aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-terms-aggregation.html +func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SignificantTerms returns significant terms aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html +func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketSignificantTerms) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Sampler returns sampler aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-sampler-aggregation.html +func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { + if raw, found := a[name]; found { + agg := new(AggregationSingleBucket) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Range returns range aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html +func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// KeyedRange returns keyed range aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html. +func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyedRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateRange returns date range aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-daterange-aggregation.html +func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// IPv4Range returns IPv4 range aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-iprange-aggregation.html +func (a Aggregations) IPv4Range(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Histogram returns histogram aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-histogram-aggregation.html +func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// DateHistogram returns date histogram aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-datehistogram-aggregation.html +func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketHistogramItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoBounds returns geo-bounds aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-geobounds-aggregation.html +func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationGeoBoundsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoHash returns geo-hash aggregation results. +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geohashgrid-aggregation.html +func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketKeyItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// GeoDistance returns geo distance aggregation results. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geodistance-aggregation.html +func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { + if raw, found := a[name]; found { + agg := new(AggregationBucketRangeItems) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// AvgBucket returns average bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-avg-bucket-aggregation.html +func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SumBucket returns sum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-sum-bucket-aggregation.html +func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// StatsBucket returns stats bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-stats-bucket-aggregation.html +func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineStatsMetric) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MaxBucket returns maximum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-max-bucket-aggregation.html +func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MinBucket returns minimum bucket pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-min-bucket-aggregation.html +func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineBucketMetricValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// MovAvg returns moving average pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html +func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// Derivative returns derivative pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-derivative-aggregation.html +func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineDerivative) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// CumulativeSum returns a cumulative sum pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-cumulative-sum-aggregation.html +func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// BucketScript returns bucket script pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-script-aggregation.html +func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// SerialDiff returns serial differencing pipeline aggregation results. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-serialdiff-aggregation.html +func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { + if raw, found := a[name]; found { + agg := new(AggregationPipelineSimpleValue) + if raw == nil { + return agg, true + } + if err := json.Unmarshal(*raw, agg); err == nil { + return agg, true + } + } + return nil, false +} + +// -- Single value metric -- + +// AggregationValueMetric is a single-value metric, returned e.g. by a +// Min or Max aggregation. +type AggregationValueMetric struct { + Aggregations + + Value *float64 //`json:"value"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. +func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Stats metric -- + +// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. +type AggregationStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. +func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Extended stats metric -- + +// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. +type AggregationExtendedStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + Min *float64 //`json:"min,omitempty"` + Max *float64 //`json:"max,omitempty"` + Avg *float64 //`json:"avg,omitempty"` + Sum *float64 //`json:"sum,omitempty"` + SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` + Variance *float64 //`json:"variance,omitempty"` + StdDeviation *float64 //`json:"std_deviation,omitempty"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. +func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_of_squares"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfSquares) + } + if v, ok := aggs["variance"]; ok && v != nil { + json.Unmarshal(*v, &a.Variance) + } + if v, ok := aggs["std_deviation"]; ok && v != nil { + json.Unmarshal(*v, &a.StdDeviation) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Percentiles metric -- + +// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. +type AggregationPercentilesMetric struct { + Aggregations + + Values map[string]float64 // `json:"values"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. +func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["values"]; ok && v != nil { + json.Unmarshal(*v, &a.Values) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Top-hits metric -- + +// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. +type AggregationTopHitsMetric struct { + Aggregations + + Hits *SearchHits //`json:"hits"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. +func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + a.Aggregations = aggs + a.Hits = new(SearchHits) + if v, ok := aggs["hits"]; ok && v != nil { + json.Unmarshal(*v, &a.Hits) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + return nil +} + +// -- Geo-bounds metric -- + +// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. +type AggregationGeoBoundsMetric struct { + Aggregations + + Bounds struct { + TopLeft struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"top_left"` + BottomRight struct { + Latitude float64 `json:"lat"` + Longitude float64 `json:"lon"` + } `json:"bottom_right"` + } `json:"bounds"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. +func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["bounds"]; ok && v != nil { + json.Unmarshal(*v, &a.Bounds) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Single bucket -- + +// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. +type AggregationSingleBucket struct { + Aggregations + + DocCount int64 // `json:"doc_count"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. +func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket range items -- + +// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned +// with a range aggregation. +type AggregationBucketRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned +// with a keyed range aggregation. +type AggregationBucketKeyedRangeItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. +func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. +type AggregationBucketRangeItem struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + From *float64 //`json:"from"` + FromAsString string //`json:"from_as_string"` + To *float64 //`json:"to"` + ToAsString string //`json:"to_as_string"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. +func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["from"]; ok && v != nil { + json.Unmarshal(*v, &a.From) + } + if v, ok := aggs["from_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.FromAsString) + } + if v, ok := aggs["to"]; ok && v != nil { + json.Unmarshal(*v, &a.To) + } + if v, ok := aggs["to_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ToAsString) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket key items -- + +// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned +// with a terms aggregation. +type AggregationBucketKeyItems struct { + Aggregations + + DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` + SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. +func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCountErrorUpperBound) + } + if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.SumOfOtherDocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. +type AggregationBucketKeyItem struct { + Aggregations + + Key interface{} //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + KeyNumber json.Number + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. +func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + dec := json.NewDecoder(bytes.NewReader(data)) + dec.UseNumber() + if err := dec.Decode(&aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + json.Unmarshal(*v, &a.KeyNumber) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket types for significant terms -- + +// AggregationBucketSignificantTerms is a bucket aggregation returned +// with a significant terms aggregation. +type AggregationBucketSignificantTerms struct { + Aggregations + + DocCount int64 //`json:"doc_count"` + Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. +func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. +type AggregationBucketSignificantTerm struct { + Aggregations + + Key string //`json:"key"` + DocCount int64 //`json:"doc_count"` + BgCount int64 //`json:"bg_count"` + Score float64 //`json:"score"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. +func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + if v, ok := aggs["bg_count"]; ok && v != nil { + json.Unmarshal(*v, &a.BgCount) + } + if v, ok := aggs["score"]; ok && v != nil { + json.Unmarshal(*v, &a.Score) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket filters -- + +// AggregationBucketFilters is a multi-bucket aggregation that is returned +// with a filters aggregation. +type AggregationBucketFilters struct { + Aggregations + + Buckets []*AggregationBucketKeyItem //`json:"buckets"` + NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. +func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + json.Unmarshal(*v, &a.NamedBuckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Bucket histogram items -- + +// AggregationBucketHistogramItems is a bucket aggregation that is returned +// with a date histogram aggregation. +type AggregationBucketHistogramItems struct { + Aggregations + + Buckets []*AggregationBucketHistogramItem //`json:"buckets"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. +func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["buckets"]; ok && v != nil { + json.Unmarshal(*v, &a.Buckets) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. +type AggregationBucketHistogramItem struct { + Aggregations + + Key float64 //`json:"key"` + KeyAsString *string //`json:"key_as_string"` + DocCount int64 //`json:"doc_count"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. +func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["key"]; ok && v != nil { + json.Unmarshal(*v, &a.Key) + } + if v, ok := aggs["key_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.KeyAsString) + } + if v, ok := aggs["doc_count"]; ok && v != nil { + json.Unmarshal(*v, &a.DocCount) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineSimpleValue is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineSimpleValue struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. +func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline simple value -- + +// AggregationPipelineBucketMetricValue is a value returned e.g. by a +// MaxBucket aggregation. +type AggregationPipelineBucketMetricValue struct { + Aggregations + + Keys []interface{} // `json:"keys"` + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. +func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["keys"]; ok && v != nil { + json.Unmarshal(*v, &a.Keys) + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline derivative -- + +// AggregationPipelineDerivative is the value returned by a +// Derivative aggregation. +type AggregationPipelineDerivative struct { + Aggregations + + Value *float64 // `json:"value"` + ValueAsString string // `json:"value_as_string"` + NormalizedValue *float64 // `json:"normalized_value"` + NormalizedValueAsString string // `json:"normalized_value_as_string"` + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. +func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["value"]; ok && v != nil { + json.Unmarshal(*v, &a.Value) + } + if v, ok := aggs["value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.ValueAsString) + } + if v, ok := aggs["normalized_value"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValue) + } + if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.NormalizedValueAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} + +// -- Pipeline stats metric -- + +// AggregationPipelineStatsMetric is a simple value, returned e.g. by a +// MovAvg aggregation. +type AggregationPipelineStatsMetric struct { + Aggregations + + Count int64 // `json:"count"` + CountAsString string // `json:"count_as_string"` + Min *float64 // `json:"min"` + MinAsString string // `json:"min_as_string"` + Max *float64 // `json:"max"` + MaxAsString string // `json:"max_as_string"` + Avg *float64 // `json:"avg"` + AvgAsString string // `json:"avg_as_string"` + Sum *float64 // `json:"sum"` + SumAsString string // `json:"sum_as_string"` + + Meta map[string]interface{} // `json:"meta,omitempty"` +} + +// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineStatsMetric structure. +func (a *AggregationPipelineStatsMetric) UnmarshalJSON(data []byte) error { + var aggs map[string]*json.RawMessage + if err := json.Unmarshal(data, &aggs); err != nil { + return err + } + if v, ok := aggs["count"]; ok && v != nil { + json.Unmarshal(*v, &a.Count) + } + if v, ok := aggs["count_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.CountAsString) + } + if v, ok := aggs["min"]; ok && v != nil { + json.Unmarshal(*v, &a.Min) + } + if v, ok := aggs["min_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.MinAsString) + } + if v, ok := aggs["max"]; ok && v != nil { + json.Unmarshal(*v, &a.Max) + } + if v, ok := aggs["max_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.MaxAsString) + } + if v, ok := aggs["avg"]; ok && v != nil { + json.Unmarshal(*v, &a.Avg) + } + if v, ok := aggs["avg_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.AvgAsString) + } + if v, ok := aggs["sum"]; ok && v != nil { + json.Unmarshal(*v, &a.Sum) + } + if v, ok := aggs["sum_as_string"]; ok && v != nil { + json.Unmarshal(*v, &a.SumAsString) + } + if v, ok := aggs["meta"]; ok && v != nil { + json.Unmarshal(*v, &a.Meta) + } + a.Aggregations = aggs + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go new file mode 100644 index 0000000000000000000000000000000000000000..14d0d1ca9cf382853db711116e825fb4da23724c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_children.go @@ -0,0 +1,76 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ChildrenAggregation is a special single bucket aggregation that enables +// aggregating from buckets on parent document types to buckets on child documents. +// It is available from 1.4.0.Beta1 upwards. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-children-aggregation.html +type ChildrenAggregation struct { + typ string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewChildrenAggregation() *ChildrenAggregation { + return &ChildrenAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { + a.typ = typ + return a +} + +func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { + a.meta = metaData + return a +} + +func (a *ChildrenAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "to-answers" : { + // "children": { + // "type" : "answer" + // } + // } + // } + // } + // This method returns only the { "type" : ... } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["children"] = opts + opts["type"] = a.typ + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go new file mode 100644 index 0000000000000000000000000000000000000000..17916b4902c4bbcccff85267e7358e6953fd1e29 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_histogram.go @@ -0,0 +1,285 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DateHistogramAggregation is a multi-bucket aggregation similar to the +// histogram except it can only be applied on date values. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-datehistogram-aggregation.html +type DateHistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval string + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin interface{} + extendedBoundsMax interface{} + timeZone string + format string + offset string +} + +// NewDateHistogramAggregation creates a new DateHistogramAggregation. +func NewDateHistogramAggregation() *DateHistogramAggregation { + return &DateHistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Field on which the aggregation is processed. +func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { + a.field = field + return a +} + +func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { + a.missing = missing + return a +} + +func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { + a.meta = metaData + return a +} + +// Interval by which the aggregation gets processed. +// Allowed values are: "year", "quarter", "month", "week", "day", +// "hour", "minute". It also supports time settings like "1.5h" +// (up to "w" for weeks). +func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { + return a.OrderByCount(true) +} + +func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { + return a.OrderByCount(false) +} + +func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { + return a.OrderByKey(true) +} + +func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +// MinDocCount sets the minimum document count per bucket. +// Buckets with less documents than this min value will not be returned. +func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +// TimeZone sets the timezone in which to translate dates before computing buckets. +func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { + a.timeZone = timeZone + return a +} + +// Format sets the format to use for dates. +func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { + a.format = format + return a +} + +// Offset sets the offset of time intervals in the histogram, e.g. "+6h". +func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { + a.offset = offset + return a +} + +// ExtendedBounds accepts int, int64, string, or time.Time values. +// In case the lower value in the histogram would be greater than min or the +// upper value would be less than max, empty buckets will be generated. +func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + a.extendedBoundsMax = max + return a +} + +// ExtendedBoundsMin accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { + a.extendedBoundsMin = min + return a +} + +// ExtendedBoundsMax accepts int, int64, string, or time.Time values. +func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { + a.extendedBoundsMax = max + return a +} + +func (a *DateHistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "articles_over_time" : { + // "date_histogram" : { + // "field" : "date", + // "interval" : "month" + // } + // } + // } + // } + // + // This method returns only the { "date_histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.timeZone != "" { + opts["time_zone"] = a.timeZone + } + if a.offset != "" { + opts["offset"] = a.offset + } + if a.format != "" { + opts["format"] = a.format + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go new file mode 100644 index 0000000000000000000000000000000000000000..f94513d90d72e0bf326d017fcdb20666bb689b54 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_date_range.go @@ -0,0 +1,234 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// DateRangeAggregation is a range aggregation that is dedicated for +// date values. The main difference between this aggregation and the +// normal range aggregation is that the from and to values can be expressed +// in Date Math expressions, and it is also possible to specify a +// date format by which the from and to response fields will be returned. +// Note that this aggregration includes the from value and excludes the to +// value for each range. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-daterange-aggregation.html +type DateRangeAggregation struct { + field string + script *Script + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + format string + entries []DateRangeAggregationEntry +} + +type DateRangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewDateRangeAggregation() *DateRangeAggregation { + return &DateRangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]DateRangeAggregationEntry, 0), + } +} + +func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { + a.field = field + return a +} + +func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { + a.script = script + return a +} + +func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { + a.meta = metaData + return a +} + +func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { + a.keyed = &keyed + return a +} + +func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { + a.format = format + return a +} + +func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { + a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *DateRangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "range" : { + // "date_range": { + // "field": "date", + // "format": "MM-yyy", + // "ranges": [ + // { "to": "now-10M/M" }, + // { "from": "now-10M/M" } + // ] + // } + // } + // } + // } + // } + // + // This method returns only the { "date_range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["date_range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + if a.format != "" { + opts["format"] = a.format + } + + var ranges []interface{} + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go new file mode 100644 index 0000000000000000000000000000000000000000..2085f0d366ede8ca95ebda13e950ad660f844478 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filter.go @@ -0,0 +1,77 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FilterAggregation defines a single bucket of all the documents +// in the current document set context that match a specified filter. +// Often this will be used to narrow down the current aggregation context +// to a specific set of documents. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filter-aggregation.html +type FilterAggregation struct { + filter Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewFilterAggregation() *FilterAggregation { + return &FilterAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { + a.meta = metaData + return a +} + +func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { + a.filter = filter + return a +} + +func (a *FilterAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "in_stock_products" : { + // "filter" : { "range" : { "stock" : { "gt" : 0 } } } + // } + // } + // } + // This method returns only the { "filter" : {} } part. + + src, err := a.filter.Source() + if err != nil { + return nil, err + } + source := make(map[string]interface{}) + source["filter"] = src + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go new file mode 100644 index 0000000000000000000000000000000000000000..80999eed9681b9186d964ca932f5cc0d2ed6d3cb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_filters.go @@ -0,0 +1,138 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// FiltersAggregation defines a multi bucket aggregations where each bucket +// is associated with a filter. Each bucket will collect all documents that +// match its associated filter. +// +// Notice that the caller has to decide whether to add filters by name +// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot +// use both named and unnamed filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-filters-aggregation.html +type FiltersAggregation struct { + unnamedFilters []Query + namedFilters map[string]Query + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewFiltersAggregation initializes a new FiltersAggregation. +func NewFiltersAggregation() *FiltersAggregation { + return &FiltersAggregation{ + unnamedFilters: make([]Query, 0), + namedFilters: make(map[string]Query), + subAggregations: make(map[string]Aggregation), + } +} + +// Filter adds an unnamed filter. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { + a.unnamedFilters = append(a.unnamedFilters, filter) + return a +} + +// Filters adds one or more unnamed filters. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { + if len(filters) > 0 { + a.unnamedFilters = append(a.unnamedFilters, filters...) + } + return a +} + +// FilterWithName adds a filter with a specific name. Notice that you can +// either use named or unnamed filters, but not both. +func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation { + a.namedFilters[name] = filter + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { + a.meta = metaData + return a +} + +// Source returns the a JSON-serializable interface. +// If the aggregation is invalid, an error is returned. This may e.g. happen +// if you mixed named and unnamed filters. +func (a *FiltersAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "messages" : { + // "filters" : { + // "filters" : { + // "errors" : { "term" : { "body" : "error" }}, + // "warnings" : { "term" : { "body" : "warning" }} + // } + // } + // } + // } + // } + // This method returns only the (outer) { "filters" : {} } part. + + source := make(map[string]interface{}) + filters := make(map[string]interface{}) + source["filters"] = filters + + if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 { + return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both") + } + + if len(a.unnamedFilters) > 0 { + arr := make([]interface{}, len(a.unnamedFilters)) + for i, filter := range a.unnamedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + arr[i] = src + } + filters["filters"] = arr + } else { + dict := make(map[string]interface{}) + for key, filter := range a.namedFilters { + src, err := filter.Source() + if err != nil { + return nil, err + } + dict[key] = src + } + filters["filters"] = dict + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go new file mode 100644 index 0000000000000000000000000000000000000000..5ae11fd6b51bdeea7c41136007689df9121dd899 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geo_distance.go @@ -0,0 +1,194 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields +// and conceptually works very similar to the range aggregation. +// The user can define a point of origin and a set of distance range buckets. +// The aggregation evaluate the distance of each document value from +// the origin point and determines the buckets it belongs to based on +// the ranges (a document belongs to a bucket if the distance between the +// document and the origin falls within the distance range of the bucket). +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-geodistance-aggregation.html +type GeoDistanceAggregation struct { + field string + unit string + distanceType string + point string + ranges []geoDistAggRange + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +type geoDistAggRange struct { + Key string + From interface{} + To interface{} +} + +func NewGeoDistanceAggregation() *GeoDistanceAggregation { + return &GeoDistanceAggregation{ + subAggregations: make(map[string]Aggregation), + ranges: make([]geoDistAggRange, 0), + } +} + +func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { + a.field = field + return a +} + +func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { + a.unit = unit + return a +} + +func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { + a.distanceType = distanceType + return a +} + +func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { + a.point = latLon + return a +} + +func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { + a.meta = metaData + return a +} +func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { + a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) + return a +} + +func (a *GeoDistanceAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "rings_around_amsterdam" : { + // "geo_distance" : { + // "field" : "location", + // "origin" : "52.3760, 4.894", + // "ranges" : [ + // { "to" : 100 }, + // { "from" : 100, "to" : 300 }, + // { "from" : 300 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_distance"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.unit != "" { + opts["unit"] = a.unit + } + if a.distanceType != "" { + opts["distance_type"] = a.distanceType + } + if a.point != "" { + opts["origin"] = a.point + } + + var ranges []interface{} + for _, ent := range a.ranges { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case *int, *int16, *int32, *int64, *float32, *float64: + r["from"] = from + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case *int, *int16, *int32, *int64, *float32, *float64: + r["to"] = to + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go new file mode 100644 index 0000000000000000000000000000000000000000..07f61b3314d19cd14d37a8a5ebe4908ad6419a4d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_geohash_grid.go @@ -0,0 +1,102 @@ +package elastic + +type GeoHashGridAggregation struct { + field string + precision int + size int + shardSize int + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoHashGridAggregation() *GeoHashGridAggregation { + return &GeoHashGridAggregation{ + subAggregations: make(map[string]Aggregation), + precision: -1, + size: -1, + shardSize: -1, + } +} + +func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation { + a.field = field + return a +} + +func (a *GeoHashGridAggregation) Precision(precision int) *GeoHashGridAggregation { + a.precision = precision + return a +} + +func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation { + a.size = size + return a +} + +func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation { + a.shardSize = shardSize + return a +} + +func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation { + a.subAggregations[name] = subAggregation + return a +} + +func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation { + a.meta = metaData + return a +} + +func (a *GeoHashGridAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "new_york": { + // "geohash_grid": { + // "field": "location", + // "precision": 5 + // } + // } + // } + // } + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geohash_grid"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + if a.precision != -1 { + opts["precision"] = a.precision + } + + if a.size != -1 { + opts["size"] = a.size + } + + if a.shardSize != -1 { + opts["shard_size"] = a.shardSize + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go new file mode 100644 index 0000000000000000000000000000000000000000..fbd14a45fad1ad4e1f66c8fdda7bd35bc90af90e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_global.go @@ -0,0 +1,71 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GlobalAggregation defines a single bucket of all the documents within +// the search execution context. This context is defined by the indices +// and the document types you’re searching on, but is not influenced +// by the search query itself. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-global-aggregation.html +type GlobalAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGlobalAggregation() *GlobalAggregation { + return &GlobalAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { + a.meta = metaData + return a +} + +func (a *GlobalAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "all_products" : { + // "global" : {}, + // "aggs" : { + // "avg_price" : { "avg" : { "field" : "price" } } + // } + // } + // } + // } + // This method returns only the { "global" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["global"] = opts + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go new file mode 100644 index 0000000000000000000000000000000000000000..060853d111122e1d322aed91e749880d4ef111b5 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_histogram.go @@ -0,0 +1,253 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HistogramAggregation is a multi-bucket values source based aggregation +// that can be applied on numeric values extracted from the documents. +// It dynamically builds fixed size (a.k.a. interval) buckets over the +// values. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-histogram-aggregation.html +type HistogramAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + interval int64 + order string + orderAsc bool + minDocCount *int64 + extendedBoundsMin *int64 + extendedBoundsMax *int64 + offset *int64 +} + +func NewHistogramAggregation() *HistogramAggregation { + return &HistogramAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *HistogramAggregation) Field(field string) *HistogramAggregation { + a.field = field + return a +} + +func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { + a.missing = missing + return a +} + +func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { + a.meta = metaData + return a +} + +func (a *HistogramAggregation) Interval(interval int64) *HistogramAggregation { + a.interval = interval + return a +} + +// Order specifies the sort order. Valid values for order are: +// "_key", "_count", a sub-aggregation name, or a sub-aggregation name +// with a metric. +func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { + return a.OrderByCount(true) +} + +func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { + return a.OrderByCount(false) +} + +func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { + // "order" : { "_key" : "asc" } + a.order = "_key" + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { + return a.OrderByKey(true) +} + +func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { + return a.OrderByKey(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *HistogramAggregation) ExtendedBounds(min, max int64) *HistogramAggregation { + a.extendedBoundsMin = &min + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMin(min int64) *HistogramAggregation { + a.extendedBoundsMin = &min + return a +} + +func (a *HistogramAggregation) ExtendedBoundsMax(max int64) *HistogramAggregation { + a.extendedBoundsMax = &max + return a +} + +func (a *HistogramAggregation) Offset(offset int64) *HistogramAggregation { + a.offset = &offset + return a +} + +func (a *HistogramAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "prices" : { + // "histogram" : { + // "field" : "price", + // "interval" : 50 + // } + // } + // } + // } + // + // This method returns only the { "histogram" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["histogram"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + opts["interval"] = a.interval + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if a.offset != nil { + opts["offset"] = *a.offset + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { + bounds := make(map[string]interface{}) + if a.extendedBoundsMin != nil { + bounds["min"] = a.extendedBoundsMin + } + if a.extendedBoundsMax != nil { + bounds["max"] = a.extendedBoundsMax + } + opts["extended_bounds"] = bounds + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go new file mode 100644 index 0000000000000000000000000000000000000000..3ca3fd6930645d246a1ab30171458aa51d58b06a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_missing.go @@ -0,0 +1,81 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MissingAggregation is a field data based single bucket aggregation, +// that creates a bucket of all documents in the current document set context +// that are missing a field value (effectively, missing a field or having +// the configured NULL value set). This aggregator will often be used in +// conjunction with other field data bucket aggregators (such as ranges) +// to return information for all the documents that could not be placed +// in any of the other buckets due to missing field data values. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-missing-aggregation.html +type MissingAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMissingAggregation() *MissingAggregation { + return &MissingAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MissingAggregation) Field(field string) *MissingAggregation { + a.field = field + return a +} + +func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { + a.meta = metaData + return a +} + +func (a *MissingAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "products_without_a_price" : { + // "missing" : { "field" : "price" } + // } + // } + // } + // This method returns only the { "missing" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["missing"] = opts + + if a.field != "" { + opts["field"] = a.field + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go new file mode 100644 index 0000000000000000000000000000000000000000..62e592eb881056aa8d230f9c710b3b74a7796960 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_nested.go @@ -0,0 +1,82 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedAggregation is a special single bucket aggregation that enables +// aggregating nested documents. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-nested-aggregation.html +type NestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewNestedAggregation() *NestedAggregation { + return &NestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { + a.meta = metaData + return a +} + +func (a *NestedAggregation) Path(path string) *NestedAggregation { + a.path = path + return a +} + +func (a *NestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "name" : "led tv" } + // } + // "aggs" : { + // "resellers" : { + // "nested" : { + // "path" : "resellers" + // }, + // "aggs" : { + // "min_price" : { "min" : { "field" : "resellers.price" } } + // } + // } + // } + // } + // This method returns only the { "nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["nested"] = opts + + opts["path"] = a.path + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go new file mode 100644 index 0000000000000000000000000000000000000000..88cde76fa2fdeb7cc3ef4056b472175d8b5e4a83 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_range.go @@ -0,0 +1,232 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "time" +) + +// RangeAggregation is a multi-bucket value source based aggregation that +// enables the user to define a set of ranges - each representing a bucket. +// During the aggregation process, the values extracted from each document +// will be checked against each bucket range and "bucket" the +// relevant/matching document. Note that this aggregration includes the +// from value and excludes the to value for each range. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-range-aggregation.html +type RangeAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + keyed *bool + unmapped *bool + entries []rangeAggregationEntry +} + +type rangeAggregationEntry struct { + Key string + From interface{} + To interface{} +} + +func NewRangeAggregation() *RangeAggregation { + return &RangeAggregation{ + subAggregations: make(map[string]Aggregation), + entries: make([]rangeAggregationEntry, 0), + } +} + +func (a *RangeAggregation) Field(field string) *RangeAggregation { + a.field = field + return a +} + +func (a *RangeAggregation) Script(script *Script) *RangeAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { + a.missing = missing + return a +} + +func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { + a.meta = metaData + return a +} + +func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { + a.keyed = &keyed + return a +} + +func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { + a.unmapped = &unmapped + return a +} + +func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) + return a +} + +func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) + return a +} + +func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) + return a +} + +func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) + return a +} + +func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) + return a +} + +func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { + a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) + return a +} + +func (a *RangeAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "price_ranges" : { + // "range" : { + // "field" : "price", + // "ranges" : [ + // { "to" : 50 }, + // { "from" : 50, "to" : 100 }, + // { "from" : 100 } + // ] + // } + // } + // } + // } + // + // This method returns only the { "range" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["range"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + if a.keyed != nil { + opts["keyed"] = *a.keyed + } + if a.unmapped != nil { + opts["unmapped"] = *a.unmapped + } + + var ranges []interface{} + for _, ent := range a.entries { + r := make(map[string]interface{}) + if ent.Key != "" { + r["key"] = ent.Key + } + if ent.From != nil { + switch from := ent.From.(type) { + case int, int16, int32, int64, float32, float64: + r["from"] = from + case time.Time: + r["from"] = from.Format(time.RFC3339) + case string: + r["from"] = from + } + } + if ent.To != nil { + switch to := ent.To.(type) { + case int, int16, int32, int64, float32, float64: + r["to"] = to + case time.Time: + r["to"] = to.Format(time.RFC3339) + case string: + r["to"] = to + } + } + ranges = append(ranges, r) + } + opts["ranges"] = ranges + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go new file mode 100644 index 0000000000000000000000000000000000000000..f307f256f856904a7e72531211203dd8c6bca7d7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_reverse_nested.go @@ -0,0 +1,86 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ReverseNestedAggregation defines a special single bucket aggregation +// that enables aggregating on parent docs from nested documents. +// Effectively this aggregation can break out of the nested block +// structure and link to other nested structures or the root document, +// which allows nesting other aggregations that aren’t part of +// the nested object in a nested aggregation. +// +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-reverse-nested-aggregation.html +type ReverseNestedAggregation struct { + path string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +// NewReverseNestedAggregation initializes a new ReverseNestedAggregation +// bucket aggregation. +func NewReverseNestedAggregation() *ReverseNestedAggregation { + return &ReverseNestedAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +// Path set the path to use for this nested aggregation. The path must match +// the path to a nested object in the mappings. If it is not specified +// then this aggregation will go back to the root document. +func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation { + a.path = path + return a +} + +func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation { + a.meta = metaData + return a +} + +func (a *ReverseNestedAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "reverse_nested" : { + // "path": "..." + // } + // } + // } + // This method returns only the { "reverse_nested" : {} } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["reverse_nested"] = opts + + if a.path != "" { + opts["path"] = a.path + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go new file mode 100644 index 0000000000000000000000000000000000000000..c1a1ab4f764808ac68227625cf5019b0e740d1e6 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_sampler.go @@ -0,0 +1,111 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SamplerAggregation is a filtering aggregation used to limit any +// sub aggregations' processing to a sample of the top-scoring documents. +// Optionally, diversity settings can be used to limit the number of matches +// that share a common value such as an "author". +// +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-sampler-aggregation.html +type SamplerAggregation struct { + subAggregations map[string]Aggregation + meta map[string]interface{} + + shardSize int + maxDocsPerValue int + executionHint string +} + +func NewSamplerAggregation() *SamplerAggregation { + return &SamplerAggregation{ + shardSize: -1, + maxDocsPerValue: -1, + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { + a.meta = metaData + return a +} + +// ShardSize sets the maximum number of docs returned from each shard. +func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { + a.shardSize = shardSize + return a +} + +func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { + a.maxDocsPerValue = maxDocsPerValue + return a +} + +func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { + a.executionHint = hint + return a +} + +func (a *SamplerAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "sample" : { + // "sampler" : { + // "shard_size" : 200 + // }, + // "aggs": { + // "keywords": { + // "significant_terms": { + // "field": "text" + // } + // } + // } + // } + // } + // } + // + // This method returns only the { "sampler" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sampler"] = opts + + if a.shardSize >= 0 { + opts["shard_size"] = a.shardSize + } + if a.maxDocsPerValue >= 0 { + opts["max_docs_per_value"] = a.maxDocsPerValue + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go new file mode 100644 index 0000000000000000000000000000000000000000..e03801f1e08d2204b09fbfc10884da9657ba6466 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_significant_terms.go @@ -0,0 +1,389 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SignificantSignificantTermsAggregation is an aggregation that returns interesting +// or unusual occurrences of terms in a set. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html +type SignificantTermsAggregation struct { + field string + subAggregations map[string]Aggregation + meta map[string]interface{} + + minDocCount *int + shardMinDocCount *int + requiredSize *int + shardSize *int + filter Query + executionHint string + significanceHeuristic SignificanceHeuristic +} + +func NewSignificantTermsAggregation() *SignificantTermsAggregation { + return &SignificantTermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + } +} + +func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { + a.field = field + return a +} + +func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { + a.meta = metaData + return a +} + +func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { + a.filter = filter + return a +} + +func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { + a.executionHint = hint + return a +} + +func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation { + a.significanceHeuristic = heuristic + return a +} + +func (a *SignificantTermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "terms" : {"force" : [ "British Transport Police" ]} + // }, + // "aggregations" : { + // "significantCrimeTypes" : { + // "significant_terms" : { "field" : "crime_type" } + // } + // } + // } + // + // This method returns only the + // { "significant_terms" : { "field" : "crime_type" } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["significant_terms"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.requiredSize != nil { + opts["size"] = *a.requiredSize // not a typo! + } + if a.shardSize != nil { + opts["shard_size"] = *a.shardSize + } + if a.minDocCount != nil { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + if a.filter != nil { + src, err := a.filter.Source() + if err != nil { + return nil, err + } + opts["background_filter"] = src + } + if a.significanceHeuristic != nil { + name := a.significanceHeuristic.Name() + src, err := a.significanceHeuristic.Source() + if err != nil { + return nil, err + } + opts[name] = src + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Significance heuristics -- + +type SignificanceHeuristic interface { + Name() string + Source() (interface{}, error) +} + +// -- Chi Square -- + +// ChiSquareSignificanceHeuristic implements Chi square as described +// in "Information Retrieval", Manning et al., Chapter 13.5.2. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_chi_square +// for details. +type ChiSquareSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic. +func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic { + return &ChiSquareSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ChiSquareSignificanceHeuristic) Name() string { + return "chi_square" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- GND -- + +// GNDSignificanceHeuristic implements the "Google Normalized Distance" +// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, +// 2007. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance +// for details. +type GNDSignificanceHeuristic struct { + backgroundIsSuperset *bool +} + +// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic. +func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic { + return &GNDSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *GNDSignificanceHeuristic) Name() string { + return "gnd" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + return source, nil +} + +// -- JLH Score -- + +// JLHScoreSignificanceHeuristic implements the JLH score as described in +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score. +type JLHScoreSignificanceHeuristic struct{} + +// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic. +func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic { + return &JLHScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *JLHScoreSignificanceHeuristic) Name() string { + return "jlh" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Mutual Information -- + +// MutualInformationSignificanceHeuristic implements Mutual information +// as described in "Information Retrieval", Manning et al., Chapter 13.5.1. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information +// for details. +type MutualInformationSignificanceHeuristic struct { + backgroundIsSuperset *bool + includeNegatives *bool +} + +// NewMutualInformationSignificanceHeuristic initializes a new instance of +// MutualInformationSignificanceHeuristic. +func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic { + return &MutualInformationSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *MutualInformationSignificanceHeuristic) Name() string { + return "mutual_information" +} + +// BackgroundIsSuperset indicates whether you defined a custom background +// filter that represents a difference set of documents that you want to +// compare to. +func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic { + sh.backgroundIsSuperset = &backgroundIsSuperset + return sh +} + +// IncludeNegatives indicates whether to filter out the terms that appear +// much less in the subset than in the background without the subset. +func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic { + sh.includeNegatives = &includeNegatives + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.backgroundIsSuperset != nil { + source["background_is_superset"] = *sh.backgroundIsSuperset + } + if sh.includeNegatives != nil { + source["include_negatives"] = *sh.includeNegatives + } + return source, nil +} + +// -- Percentage Score -- + +// PercentageScoreSignificanceHeuristic implements the algorithm described +// in https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_percentage. +type PercentageScoreSignificanceHeuristic struct{} + +// NewPercentageScoreSignificanceHeuristic initializes a new instance of +// PercentageScoreSignificanceHeuristic. +func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic { + return &PercentageScoreSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *PercentageScoreSignificanceHeuristic) Name() string { + return "percentage" +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + return source, nil +} + +// -- Script -- + +// ScriptSignificanceHeuristic implements a scripted significance heuristic. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +type ScriptSignificanceHeuristic struct { + script *Script +} + +// NewScriptSignificanceHeuristic initializes a new instance of +// ScriptSignificanceHeuristic. +func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic { + return &ScriptSignificanceHeuristic{} +} + +// Name returns the name of the heuristic in the REST interface. +func (sh *ScriptSignificanceHeuristic) Name() string { + return "script_heuristic" +} + +// Script specifies the script to use to get custom scores. The following +// parameters are available in the script: `_subset_freq`, `_superset_freq`, +// `_subset_size`, and `_superset_size`. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-significantterms-aggregation.html#_scripted +// for details. +func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic { + sh.script = script + return sh +} + +// Source returns the parameters that need to be added to the REST parameters. +func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) { + source := make(map[string]interface{}) + if sh.script != nil { + src, err := sh.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go new file mode 100644 index 0000000000000000000000000000000000000000..261e6c6184cc44a4219559188998d4221fdca94b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_bucket_terms.go @@ -0,0 +1,341 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsAggregation is a multi-bucket value source based aggregation +// where buckets are dynamically built - one per unique value. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-bucket-terms-aggregation.html +type TermsAggregation struct { + field string + script *Script + missing interface{} + subAggregations map[string]Aggregation + meta map[string]interface{} + + size *int + shardSize *int + requiredSize *int + minDocCount *int + shardMinDocCount *int + valueType string + order string + orderAsc bool + includePattern string + includeFlags *int + excludePattern string + excludeFlags *int + executionHint string + collectionMode string + showTermDocCountError *bool + includeTerms []string + excludeTerms []string +} + +func NewTermsAggregation() *TermsAggregation { + return &TermsAggregation{ + subAggregations: make(map[string]Aggregation, 0), + includeTerms: make([]string, 0), + excludeTerms: make([]string, 0), + } +} + +func (a *TermsAggregation) Field(field string) *TermsAggregation { + a.field = field + return a +} + +func (a *TermsAggregation) Script(script *Script) *TermsAggregation { + a.script = script + return a +} + +// Missing configures the value to use when documents miss a value. +func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { + a.missing = missing + return a +} + +func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { + a.meta = metaData + return a +} + +func (a *TermsAggregation) Size(size int) *TermsAggregation { + a.size = &size + return a +} + +func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { + a.requiredSize = &requiredSize + return a +} + +func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { + a.shardSize = &shardSize + return a +} + +func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { + a.minDocCount = &minDocCount + return a +} + +func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { + a.shardMinDocCount = &shardMinDocCount + return a +} + +func (a *TermsAggregation) Include(regexp string) *TermsAggregation { + a.includePattern = regexp + return a +} + +func (a *TermsAggregation) IncludeWithFlags(regexp string, flags int) *TermsAggregation { + a.includePattern = regexp + a.includeFlags = &flags + return a +} + +func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { + a.excludePattern = regexp + return a +} + +func (a *TermsAggregation) ExcludeWithFlags(regexp string, flags int) *TermsAggregation { + a.excludePattern = regexp + a.excludeFlags = &flags + return a +} + +// ValueType can be string, long, or double. +func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { + a.valueType = valueType + return a +} + +func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { + a.order = order + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { + // "order" : { "_count" : "asc" } + a.order = "_count" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { + return a.OrderByCount(true) +} + +func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { + return a.OrderByCount(false) +} + +func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { + // "order" : { "_term" : "asc" } + a.order = "_term" + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { + return a.OrderByTerm(true) +} + +func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { + return a.OrderByTerm(false) +} + +// OrderByAggregation creates a bucket ordering strategy which sorts buckets +// based on a single-valued calc get. +func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "avg_height" : "desc" } + // }, + // "aggs" : { + // "avg_height" : { "avg" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + a.orderAsc = asc + return a +} + +// OrderByAggregationAndMetric creates a bucket ordering strategy which +// sorts buckets based on a multi-valued calc get. +func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { + // { + // "aggs" : { + // "genders" : { + // "terms" : { + // "field" : "gender", + // "order" : { "height_stats.avg" : "desc" } + // }, + // "aggs" : { + // "height_stats" : { "stats" : { "field" : "height" } } + // } + // } + // } + // } + a.order = aggName + "." + metric + a.orderAsc = asc + return a +} + +func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { + a.executionHint = hint + return a +} + +// Collection mode can be depth_first or breadth_first as of 1.4.0. +func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { + a.collectionMode = collectionMode + return a +} + +func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { + a.showTermDocCountError = &showTermDocCountError + return a +} + +func (a *TermsAggregation) IncludeTerms(terms ...string) *TermsAggregation { + a.includeTerms = append(a.includeTerms, terms...) + return a +} + +func (a *TermsAggregation) ExcludeTerms(terms ...string) *TermsAggregation { + a.excludeTerms = append(a.excludeTerms, terms...) + return a +} + +func (a *TermsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "genders" : { + // "terms" : { "field" : "gender" } + // } + // } + // } + // This method returns only the { "terms" : { "field" : "gender" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["terms"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.missing != nil { + opts["missing"] = a.missing + } + + // TermsBuilder + if a.size != nil && *a.size >= 0 { + opts["size"] = *a.size + } + if a.shardSize != nil && *a.shardSize >= 0 { + opts["shard_size"] = *a.shardSize + } + if a.requiredSize != nil && *a.requiredSize >= 0 { + opts["required_size"] = *a.requiredSize + } + if a.minDocCount != nil && *a.minDocCount >= 0 { + opts["min_doc_count"] = *a.minDocCount + } + if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { + opts["shard_min_doc_count"] = *a.shardMinDocCount + } + if a.showTermDocCountError != nil { + opts["show_term_doc_count_error"] = *a.showTermDocCountError + } + if a.collectionMode != "" { + opts["collect_mode"] = a.collectionMode + } + if a.valueType != "" { + opts["value_type"] = a.valueType + } + if a.order != "" { + o := make(map[string]interface{}) + if a.orderAsc { + o[a.order] = "asc" + } else { + o[a.order] = "desc" + } + opts["order"] = o + } + if len(a.includeTerms) > 0 { + opts["include"] = a.includeTerms + } + if a.includePattern != "" { + if a.includeFlags == nil || *a.includeFlags == 0 { + opts["include"] = a.includePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.includePattern + p["flags"] = *a.includeFlags + opts["include"] = p + } + } + if len(a.excludeTerms) > 0 { + opts["exclude"] = a.excludeTerms + } + if a.excludePattern != "" { + if a.excludeFlags == nil || *a.excludeFlags == 0 { + opts["exclude"] = a.excludePattern + } else { + p := make(map[string]interface{}) + p["pattern"] = a.excludePattern + p["flags"] = *a.excludeFlags + opts["exclude"] = p + } + } + if a.executionHint != "" { + opts["execution_hint"] = a.executionHint + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go new file mode 100644 index 0000000000000000000000000000000000000000..ff337a8cd2df72fdb28f9c3b5b1f3d37718909dc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_avg.go @@ -0,0 +1,101 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgAggregation is a single-value metrics aggregation that computes +// the average of numeric values that are extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-avg-aggregation.html +type AvgAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewAvgAggregation() *AvgAggregation { + return &AvgAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *AvgAggregation) Field(field string) *AvgAggregation { + a.field = field + return a +} + +func (a *AvgAggregation) Script(script *Script) *AvgAggregation { + a.script = script + return a +} + +func (a *AvgAggregation) Format(format string) *AvgAggregation { + a.format = format + return a +} + +func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { + a.meta = metaData + return a +} + +func (a *AvgAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "avg_grade" : { "avg" : { "field" : "grade" } } + // } + // } + // This method returns only the { "avg" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["avg"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go new file mode 100644 index 0000000000000000000000000000000000000000..8f6f447c47280e5ca7aea3b0f806e46b76315513 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_cardinality.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CardinalityAggregation is a single-value metrics aggregation that +// calculates an approximate count of distinct values. +// Values can be extracted either from specific fields in the document +// or generated by a script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-cardinality-aggregation.html +type CardinalityAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + precisionThreshold *int64 + rehash *bool +} + +func NewCardinalityAggregation() *CardinalityAggregation { + return &CardinalityAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { + a.field = field + return a +} + +func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { + a.script = script + return a +} + +func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { + a.format = format + return a +} + +func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { + a.meta = metaData + return a +} + +func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { + a.precisionThreshold = &threshold + return a +} + +func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { + a.rehash = &rehash + return a +} + +func (a *CardinalityAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "author_count" : { + // "cardinality" : { "field" : "author" } + // } + // } + // } + // This method returns only the "cardinality" : { "field" : "author" } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["cardinality"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + + if a.format != "" { + opts["format"] = a.format + } + if a.precisionThreshold != nil { + opts["precision_threshold"] = *a.precisionThreshold + } + if a.rehash != nil { + opts["rehash"] = *a.rehash + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..95b312686dfbc48d6cd0d07ed213e8db108a6549 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_extended_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that +// computes stats over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-extendedstats-aggregation.html +type ExtendedStatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewExtendedStatsAggregation() *ExtendedStatsAggregation { + return &ExtendedStatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { + a.field = field + return a +} + +func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { + a.script = script + return a +} + +func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { + a.format = format + return a +} + +func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { + a.meta = metaData + return a +} + +func (a *ExtendedStatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "extended_stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "extended_stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["extended_stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go new file mode 100644 index 0000000000000000000000000000000000000000..c263a76b40812c5df4226b7c8de333bc487ee3d0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_geo_bounds.go @@ -0,0 +1,105 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoBoundsAggregation is a metric aggregation that computes the +// bounding box containing all geo_point values for a field. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-geobounds-aggregation.html +type GeoBoundsAggregation struct { + field string + script *Script + wrapLongitude *bool + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewGeoBoundsAggregation() *GeoBoundsAggregation { + return &GeoBoundsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { + a.field = field + return a +} + +func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { + a.script = script + return a +} + +func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { + a.wrapLongitude = &wrapLongitude + return a +} + +func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { + a.meta = metaData + return a +} + +func (a *GeoBoundsAggregation) Source() (interface{}, error) { + // Example: + // { + // "query" : { + // "match" : { "business_type" : "shop" } + // }, + // "aggs" : { + // "viewport" : { + // "geo_bounds" : { + // "field" : "location" + // "wrap_longitude" : "true" + // } + // } + // } + // } + // + // This method returns only the { "geo_bounds" : { ... } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["geo_bounds"] = opts + + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.wrapLongitude != nil { + opts["wrap_longitude"] = *a.wrapLongitude + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go new file mode 100644 index 0000000000000000000000000000000000000000..b62130676a0e486c230338f888c7d0abaaa6afe2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_max.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxAggregation is a single-value metrics aggregation that keeps track and +// returns the maximum value among the numeric values extracted from +// the aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by +// a provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-max-aggregation.html +type MaxAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMaxAggregation() *MaxAggregation { + return &MaxAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MaxAggregation) Field(field string) *MaxAggregation { + a.field = field + return a +} + +func (a *MaxAggregation) Script(script *Script) *MaxAggregation { + a.script = script + return a +} + +func (a *MaxAggregation) Format(format string) *MaxAggregation { + a.format = format + return a +} + +func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { + a.meta = metaData + return a +} +func (a *MaxAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "max_price" : { "max" : { "field" : "price" } } + // } + // } + // This method returns only the { "max" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["max"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go new file mode 100644 index 0000000000000000000000000000000000000000..c1ca6922b449f2f63e22dcb036832e96997fd3de --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_min.go @@ -0,0 +1,100 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinAggregation is a single-value metrics aggregation that keeps track and +// returns the minimum value among numeric values extracted from the +// aggregated documents. These values can be extracted either from +// specific numeric fields in the documents, or be generated by a +// provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-min-aggregation.html +type MinAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewMinAggregation() *MinAggregation { + return &MinAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *MinAggregation) Field(field string) *MinAggregation { + a.field = field + return a +} + +func (a *MinAggregation) Script(script *Script) *MinAggregation { + a.script = script + return a +} + +func (a *MinAggregation) Format(format string) *MinAggregation { + a.format = format + return a +} + +func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { + a.meta = metaData + return a +} + +func (a *MinAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "min_price" : { "min" : { "field" : "price" } } + // } + // } + // This method returns only the { "min" : { "field" : "price" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["min"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go new file mode 100644 index 0000000000000000000000000000000000000000..3e0595e8806ca517cab7996c475cc83265f37b97 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentile_ranks.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentileRanksAggregation +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-rank-aggregation.html +type PercentileRanksAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + values []float64 + compression *float64 + estimator string +} + +func NewPercentileRanksAggregation() *PercentileRanksAggregation { + return &PercentileRanksAggregation{ + subAggregations: make(map[string]Aggregation), + values: make([]float64, 0), + } +} + +func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { + a.field = field + return a +} + +func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { + a.script = script + return a +} + +func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { + a.format = format + return a +} + +func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { + a.meta = metaData + return a +} + +func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { + a.values = append(a.values, values...) + return a +} + +func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { + a.compression = &compression + return a +} + +func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { + a.estimator = estimator + return a +} + +func (a *PercentileRanksAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentile_ranks" : { + // "field" : "load_time" + // "values" : [15, 30] + // } + // } + // } + // } + // This method returns only the + // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentile_ranks"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.values) > 0 { + opts["values"] = a.values + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go new file mode 100644 index 0000000000000000000000000000000000000000..411f9c50fc59ff6159da08de324de3bcc94dfc8d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_percentiles.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PercentilesAggregation +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-percentile-aggregation.html +type PercentilesAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} + percentiles []float64 + compression *float64 + estimator string +} + +func NewPercentilesAggregation() *PercentilesAggregation { + return &PercentilesAggregation{ + subAggregations: make(map[string]Aggregation), + percentiles: make([]float64, 0), + } +} + +func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { + a.field = field + return a +} + +func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { + a.script = script + return a +} + +func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { + a.format = format + return a +} + +func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { + a.meta = metaData + return a +} + +func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { + a.percentiles = append(a.percentiles, percentiles...) + return a +} + +func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { + a.compression = &compression + return a +} + +func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { + a.estimator = estimator + return a +} + +func (a *PercentilesAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "load_time_outlier" : { + // "percentiles" : { + // "field" : "load_time" + // } + // } + // } + // } + // This method returns only the + // { "percentiles" : { "field" : "load_time" } } + // part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["percentiles"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + if len(a.percentiles) > 0 { + opts["percents"] = a.percentiles + } + if a.compression != nil { + opts["compression"] = *a.compression + } + if a.estimator != "" { + opts["estimator"] = a.estimator + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go new file mode 100644 index 0000000000000000000000000000000000000000..400b79b00837848f4b6c08d23f4f808fd0fe6267 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_stats.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsAggregation is a multi-value metrics aggregation that computes stats +// over numeric values extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-stats-aggregation.html +type StatsAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewStatsAggregation() *StatsAggregation { + return &StatsAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *StatsAggregation) Field(field string) *StatsAggregation { + a.field = field + return a +} + +func (a *StatsAggregation) Script(script *Script) *StatsAggregation { + a.script = script + return a +} + +func (a *StatsAggregation) Format(format string) *StatsAggregation { + a.format = format + return a +} + +func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { + a.meta = metaData + return a +} + +func (a *StatsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_stats" : { "stats" : { "field" : "grade" } } + // } + // } + // This method returns only the { "stats" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["stats"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go new file mode 100644 index 0000000000000000000000000000000000000000..f959a3e54b7699f21343f2032bd588cd8ba56fad --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_sum.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumAggregation is a single-value metrics aggregation that sums up +// numeric values that are extracted from the aggregated documents. +// These values can be extracted either from specific numeric fields +// in the documents, or be generated by a provided script. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-sum-aggregation.html +type SumAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewSumAggregation() *SumAggregation { + return &SumAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *SumAggregation) Field(field string) *SumAggregation { + a.field = field + return a +} + +func (a *SumAggregation) Script(script *Script) *SumAggregation { + a.script = script + return a +} + +func (a *SumAggregation) Format(format string) *SumAggregation { + a.format = format + return a +} + +func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { + a.meta = metaData + return a +} + +func (a *SumAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "intraday_return" : { "sum" : { "field" : "change" } } + // } + // } + // This method returns only the { "sum" : { "field" : "change" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["sum"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go new file mode 100644 index 0000000000000000000000000000000000000000..43dd36cdb11d7d34614927302db2ee7c609a770b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_top_hits.go @@ -0,0 +1,143 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TopHitsAggregation keeps track of the most relevant document +// being aggregated. This aggregator is intended to be used as a +// sub aggregator, so that the top matching documents +// can be aggregated per bucket. +// +// It can effectively be used to group result sets by certain fields via +// a bucket aggregator. One or more bucket aggregators determines by +// which properties a result set get sliced into. +// +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-top-hits-aggregation.html +type TopHitsAggregation struct { + searchSource *SearchSource +} + +func NewTopHitsAggregation() *TopHitsAggregation { + return &TopHitsAggregation{ + searchSource: NewSearchSource(), + } +} + +func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { + a.searchSource = a.searchSource.From(from) + return a +} + +func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { + a.searchSource = a.searchSource.Size(size) + return a +} + +func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { + a.searchSource = a.searchSource.TrackScores(trackScores) + return a +} + +func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Explain(explain) + return a +} + +func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Version(version) + return a +} + +func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation { + a.searchSource = a.searchSource.NoStoredFields() + return a +} + +func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSource(fetchSource) + return a +} + +func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { + a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) + return a +} + +func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueFields(docvalueFields...) + return a +} + +func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation { + a.searchSource = a.searchSource.DocvalueField(docvalueField) + return a +} + +func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptFields(scriptFields...) + return a +} + +func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { + a.searchSource = a.searchSource.ScriptField(scriptField) + return a +} + +func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { + a.searchSource = a.searchSource.Sort(field, ascending) + return a +} + +func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { + a.searchSource = a.searchSource.SortWithInfo(info) + return a +} + +func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { + a.searchSource = a.searchSource.SortBy(sorter...) + return a +} + +func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { + a.searchSource = a.searchSource.Highlight(highlight) + return a +} + +func (a *TopHitsAggregation) Highlighter() *Highlight { + return a.searchSource.Highlighter() +} + +func (a *TopHitsAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs": { + // "top_tag_hits": { + // "top_hits": { + // "sort": [ + // { + // "last_activity_date": { + // "order": "desc" + // } + // } + // ], + // "_source": { + // "include": [ + // "title" + // ] + // }, + // "size" : 1 + // } + // } + // } + // } + // This method returns only the { "top_hits" : { ... } } part. + + source := make(map[string]interface{}) + src, err := a.searchSource.Source() + if err != nil { + return nil, err + } + source["top_hits"] = src + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go new file mode 100644 index 0000000000000000000000000000000000000000..4e7281d62d75e01b695409eb213f7fdab068508a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_metrics_value_count.go @@ -0,0 +1,102 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ValueCountAggregation is a single-value metrics aggregation that counts +// the number of values that are extracted from the aggregated documents. +// These values can be extracted either from specific fields in the documents, +// or be generated by a provided script. Typically, this aggregator will be +// used in conjunction with other single-value aggregations. +// For example, when computing the avg one might be interested in the +// number of values the average is computed over. +// See: https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-metrics-valuecount-aggregation.html +type ValueCountAggregation struct { + field string + script *Script + format string + subAggregations map[string]Aggregation + meta map[string]interface{} +} + +func NewValueCountAggregation() *ValueCountAggregation { + return &ValueCountAggregation{ + subAggregations: make(map[string]Aggregation), + } +} + +func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { + a.field = field + return a +} + +func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { + a.script = script + return a +} + +func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { + a.format = format + return a +} + +func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { + a.meta = metaData + return a +} + +func (a *ValueCountAggregation) Source() (interface{}, error) { + // Example: + // { + // "aggs" : { + // "grades_count" : { "value_count" : { "field" : "grade" } } + // } + // } + // This method returns only the { "value_count" : { "field" : "grade" } } part. + + source := make(map[string]interface{}) + opts := make(map[string]interface{}) + source["value_count"] = opts + + // ValuesSourceAggregationBuilder + if a.field != "" { + opts["field"] = a.field + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + opts["script"] = src + } + if a.format != "" { + opts["format"] = a.format + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..7eea9310d52e59d8272cdaae9572ff180e717cd8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_avg_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// AvgBucketAggregation is a sibling pipeline aggregation which calculates +// the (mean) average value of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-avg-bucket-aggregation.html +type AvgBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. +func NewAvgBucketAggregation() *AvgBucketAggregation { + return &AvgBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *AvgBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *AvgBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["avg_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go new file mode 100644 index 0000000000000000000000000000000000000000..13cad638f25ab565f3ed7a9b17542aeb32e3ac83 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_script.go @@ -0,0 +1,132 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketScriptAggregation is a parent pipeline aggregation which executes +// a script which can perform per bucket computations on specified metrics +// in the parent multi-bucket aggregation. The specified metric must be +// numeric and the script must return a numeric value. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-script-aggregation.html +type BucketScriptAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. +func NewBucketScriptAggregation() *BucketScriptAggregation { + return &BucketScriptAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketScriptAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketScriptAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketScriptAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_script"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go new file mode 100644 index 0000000000000000000000000000000000000000..f3c938519039cb83de91b25e4e2edd2d273a2449 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_bucket_selector.go @@ -0,0 +1,134 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// BucketSelectorAggregation is a parent pipeline aggregation which +// determines whether the current bucket will be retained in the parent +// multi-bucket aggregation. The specific metric must be numeric and +// the script must return a boolean value. If the script language is +// expression then a numeric return value is permitted. In this case 0.0 +// will be evaluated as false and all other values will evaluate to true. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-bucket-selector-aggregation.html +type BucketSelectorAggregation struct { + format string + gapPolicy string + script *Script + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPathsMap map[string]string +} + +// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. +func NewBucketSelectorAggregation() *BucketSelectorAggregation { + return &BucketSelectorAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPathsMap: make(map[string]string), + } +} + +func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { + a.gapPolicy = "skip" + return a +} + +// Script is the script to run. +func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { + a.script = script + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *BucketSelectorAggregation) SubAggregation(name string, subAggregation Aggregation) *BucketSelectorAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { + a.meta = metaData + return a +} + +// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { + a.bucketsPathsMap = bucketsPathsMap + return a +} + +// AddBucketsPath adds a bucket path to use for this pipeline aggregator. +func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { + if a.bucketsPathsMap == nil { + a.bucketsPathsMap = make(map[string]string) + } + a.bucketsPathsMap[name] = path + return a +} + +func (a *BucketSelectorAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["bucket_selector"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.script != nil { + src, err := a.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + } + + // Add buckets paths + if len(a.bucketsPathsMap) > 0 { + params["buckets_path"] = a.bucketsPathsMap + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go new file mode 100644 index 0000000000000000000000000000000000000000..95546f1cb0ee0a5ef55b569a080514c3ee0d9e5a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_cumulative_sum.go @@ -0,0 +1,90 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CumulativeSumAggregation is a parent pipeline aggregation which calculates +// the cumulative sum of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-cumulative-sum-aggregation.html +type CumulativeSumAggregation struct { + format string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. +func NewCumulativeSumAggregation() *CumulativeSumAggregation { + return &CumulativeSumAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { + a.format = format + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *CumulativeSumAggregation) SubAggregation(name string, subAggregation Aggregation) *CumulativeSumAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *CumulativeSumAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["cumulative_sum"] = params + + if a.format != "" { + params["format"] = a.format + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go new file mode 100644 index 0000000000000000000000000000000000000000..2c3c7e03ab683971ff99ed5d7d4877bf0dd5eb55 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_derivative.go @@ -0,0 +1,124 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DerivativeAggregation is a parent pipeline aggregation which calculates +// the derivative of a specified metric in a parent histogram (or date_histogram) +// aggregation. The specified metric must be numeric and the enclosing +// histogram must have min_doc_count set to 0 (default for histogram aggregations). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-derivative-aggregation.html +type DerivativeAggregation struct { + format string + gapPolicy string + unit string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. +func NewDerivativeAggregation() *DerivativeAggregation { + return &DerivativeAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { + a.gapPolicy = "skip" + return a +} + +// Unit sets the unit provided, e.g. "1d" or "1y". +// It is only useful when calculating the derivative using a date_histogram. +func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { + a.unit = unit + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *DerivativeAggregation) SubAggregation(name string, subAggregation Aggregation) *DerivativeAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *DerivativeAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["derivative"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.unit != "" { + params["unit"] = a.unit + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..5a10b0e45532e28de03e7ddfafaffe31dc905982 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_max_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MaxBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-max-bucket-aggregation.html +type MaxBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. +func NewMaxBucketAggregation() *MaxBucketAggregation { + return &MaxBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MaxBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MaxBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["max_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..96982250ca91e979516770d63028bb9903cac5ab --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_min_bucket.go @@ -0,0 +1,114 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MinBucketAggregation is a sibling pipeline aggregation which identifies +// the bucket(s) with the maximum value of a specified metric in a sibling +// aggregation and outputs both the value and the key(s) of the bucket(s). +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-min-bucket-aggregation.html +type MinBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. +func NewMinBucketAggregation() *MinBucketAggregation { + return &MinBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MinBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *MinBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MinBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["min_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go new file mode 100644 index 0000000000000000000000000000000000000000..cf94342f70a0543e515121a2f738f1200a871ab0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_mov_avg.go @@ -0,0 +1,393 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MovAvgAggregation operates on a series of data. It will slide a window +// across the data and emit the average value of that window. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html +type MovAvgAggregation struct { + format string + gapPolicy string + model MovAvgModel + window *int + predict *int + minimize *bool + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. +func NewMovAvgAggregation() *MovAvgAggregation { + return &MovAvgAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { + a.gapPolicy = "skip" + return a +} + +// Model is used to define what type of moving average you want to use +// in the series. +func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { + a.model = model + return a +} + +// Window sets the window size for the moving average. This window will +// "slide" across the series, and the values inside that window will +// be used to calculate the moving avg value. +func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { + a.window = &window + return a +} + +// Predict sets the number of predictions that should be returned. +// Each prediction will be spaced at the intervals in the histogram. +// E.g. a predict of 2 will return two new buckets at the end of the +// histogram with the predicted values. +func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { + a.predict = &numPredictions + return a +} + +// Minimize determines if the model should be fit to the data using a +// cost minimizing algorithm. +func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { + a.minimize = &minimize + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *MovAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *MovAvgAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *MovAvgAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["moving_avg"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.model != nil { + params["model"] = a.model.Name() + settings := a.model.Settings() + if len(settings) > 0 { + params["settings"] = settings + } + } + if a.window != nil { + params["window"] = *a.window + } + if a.predict != nil { + params["predict"] = *a.predict + } + if a.minimize != nil { + params["minimize"] = *a.minimize + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} + +// -- Models for moving averages -- +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_models + +// MovAvgModel specifies the model to use with the MovAvgAggregation. +type MovAvgModel interface { + Name() string + Settings() map[string]interface{} +} + +// -- EWMA -- + +// EWMAMovAvgModel calculates an exponentially weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted +type EWMAMovAvgModel struct { + alpha *float64 +} + +// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. +func NewEWMAMovAvgModel() *EWMAMovAvgModel { + return &EWMAMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { + m.alpha = &alpha + return m +} + +// Name of the model. +func (m *EWMAMovAvgModel) Name() string { + return "ewma" +} + +// Settings of the model. +func (m *EWMAMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + return settings +} + +// -- Holt linear -- + +// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear +type HoltLinearMovAvgModel struct { + alpha *float64 + beta *float64 +} + +// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. +func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { + return &HoltLinearMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { + m.beta = &beta + return m +} + +// Name of the model. +func (m *HoltLinearMovAvgModel) Name() string { + return "holt" +} + +// Settings of the model. +func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + return settings +} + +// -- Holt Winters -- + +// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters +type HoltWintersMovAvgModel struct { + alpha *float64 + beta *float64 + gamma *float64 + period *int + seasonalityType string + pad *bool +} + +// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. +func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { + return &HoltWintersMovAvgModel{} +} + +// Alpha controls the smoothing of the data. Alpha = 1 retains no memory +// of past values (e.g. a random walk), while alpha = 0 retains infinite +// memory of past values (e.g. the series mean). Useful values are somewhere +// in between. Defaults to 0.5. +func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { + m.alpha = &alpha + return m +} + +// Beta is equivalent to Alpha but controls the smoothing of the trend +// instead of the data. +func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { + m.beta = &beta + return m +} + +func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { + m.gamma = &gamma + return m +} + +func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { + m.period = &period + return m +} + +func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { + m.seasonalityType = typ + return m +} + +func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { + m.pad = &pad + return m +} + +// Name of the model. +func (m *HoltWintersMovAvgModel) Name() string { + return "holt_winters" +} + +// Settings of the model. +func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { + settings := make(map[string]interface{}) + if m.alpha != nil { + settings["alpha"] = *m.alpha + } + if m.beta != nil { + settings["beta"] = *m.beta + } + if m.gamma != nil { + settings["gamma"] = *m.gamma + } + if m.period != nil { + settings["period"] = *m.period + } + if m.pad != nil { + settings["pad"] = *m.pad + } + if m.seasonalityType != "" { + settings["type"] = m.seasonalityType + } + return settings +} + +// -- Linear -- + +// LinearMovAvgModel calculates a linearly weighted moving average, such +// that older values are linearly less important. "Time" is determined +// by position in collection. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_linear +type LinearMovAvgModel struct { +} + +// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. +func NewLinearMovAvgModel() *LinearMovAvgModel { + return &LinearMovAvgModel{} +} + +// Name of the model. +func (m *LinearMovAvgModel) Name() string { + return "linear" +} + +// Settings of the model. +func (m *LinearMovAvgModel) Settings() map[string]interface{} { + return nil +} + +// -- Simple -- + +// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-movavg-aggregation.html#_simple +type SimpleMovAvgModel struct { +} + +// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. +func NewSimpleMovAvgModel() *SimpleMovAvgModel { + return &SimpleMovAvgModel{} +} + +// Name of the model. +func (m *SimpleMovAvgModel) Name() string { + return "simple" +} + +// Settings of the model. +func (m *SimpleMovAvgModel) Settings() map[string]interface{} { + return nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go new file mode 100644 index 0000000000000000000000000000000000000000..84ae4300480e793b02e8c90ea1d99d0e9077c167 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_serial_diff.go @@ -0,0 +1,124 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SerialDiffAggregation implements serial differencing. +// Serial differencing is a technique where values in a time series are +// subtracted from itself at different time lags or periods. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-serialdiff-aggregation.html +type SerialDiffAggregation struct { + format string + gapPolicy string + lag *int + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. +func NewSerialDiffAggregation() *SerialDiffAggregation { + return &SerialDiffAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { + a.gapPolicy = "skip" + return a +} + +// Lag specifies the historical bucket to subtract from the current value. +// E.g. a lag of 7 will subtract the current value from the value 7 buckets +// ago. Lag must be a positive, non-zero integer. +func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { + a.lag = &lag + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SerialDiffAggregation) SubAggregation(name string, subAggregation Aggregation) *SerialDiffAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SerialDiffAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["serial_diff"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + if a.lag != nil { + params["lag"] = *a.lag + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..fb0a94afc30a118ae017e075d176ea1aa2afe376 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_stats_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// StatsBucketAggregation is a sibling pipeline aggregation which calculates +// a variety of stats across all bucket of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-stats-bucket-aggregation.html +type StatsBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewStatsBucketAggregation creates and initializes a new StatsBucketAggregation. +func NewStatsBucketAggregation() *StatsBucketAggregation { + return &StatsBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (s *StatsBucketAggregation) Format(format string) *StatsBucketAggregation { + s.format = format + return s +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (s *StatsBucketAggregation) GapPolicy(gapPolicy string) *StatsBucketAggregation { + s.gapPolicy = gapPolicy + return s +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (s *StatsBucketAggregation) GapInsertZeros() *StatsBucketAggregation { + s.gapPolicy = "insert_zeros" + return s +} + +// GapSkip skips gaps in the series. +func (s *StatsBucketAggregation) GapSkip() *StatsBucketAggregation { + s.gapPolicy = "skip" + return s +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (s *StatsBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsBucketAggregation { + s.subAggregations[name] = subAggregation + return s +} + +// Meta sets the meta data to be included in the aggregation response. +func (s *StatsBucketAggregation) Meta(metaData map[string]interface{}) *StatsBucketAggregation { + s.meta = metaData + return s +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (s *StatsBucketAggregation) BucketsPath(bucketsPaths ...string) *StatsBucketAggregation { + s.bucketsPaths = append(s.bucketsPaths, bucketsPaths...) + return s +} + +func (s *StatsBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["stats_bucket"] = params + + if s.format != "" { + params["format"] = s.format + } + if s.gapPolicy != "" { + params["gap_policy"] = s.gapPolicy + } + + // Add buckets paths + switch len(s.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = s.bucketsPaths[0] + default: + params["buckets_path"] = s.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(s.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range s.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(s.meta) > 0 { + source["meta"] = s.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go new file mode 100644 index 0000000000000000000000000000000000000000..1f78efa5655a192721cac74d0013a736a25c6020 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_aggs_pipeline_sum_bucket.go @@ -0,0 +1,113 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SumBucketAggregation is a sibling pipeline aggregation which calculates +// the sum across all buckets of a specified metric in a sibling aggregation. +// The specified metric must be numeric and the sibling aggregation must +// be a multi-bucket aggregation. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-aggregations-pipeline-sum-bucket-aggregation.html +type SumBucketAggregation struct { + format string + gapPolicy string + + subAggregations map[string]Aggregation + meta map[string]interface{} + bucketsPaths []string +} + +// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. +func NewSumBucketAggregation() *SumBucketAggregation { + return &SumBucketAggregation{ + subAggregations: make(map[string]Aggregation), + bucketsPaths: make([]string, 0), + } +} + +func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { + a.format = format + return a +} + +// GapPolicy defines what should be done when a gap in the series is discovered. +// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". +func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { + a.gapPolicy = gapPolicy + return a +} + +// GapInsertZeros inserts zeros for gaps in the series. +func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { + a.gapPolicy = "insert_zeros" + return a +} + +// GapSkip skips gaps in the series. +func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { + a.gapPolicy = "skip" + return a +} + +// SubAggregation adds a sub-aggregation to this aggregation. +func (a *SumBucketAggregation) SubAggregation(name string, subAggregation Aggregation) *SumBucketAggregation { + a.subAggregations[name] = subAggregation + return a +} + +// Meta sets the meta data to be included in the aggregation response. +func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { + a.meta = metaData + return a +} + +// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. +func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { + a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) + return a +} + +func (a *SumBucketAggregation) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["sum_bucket"] = params + + if a.format != "" { + params["format"] = a.format + } + if a.gapPolicy != "" { + params["gap_policy"] = a.gapPolicy + } + + // Add buckets paths + switch len(a.bucketsPaths) { + case 0: + case 1: + params["buckets_path"] = a.bucketsPaths[0] + default: + params["buckets_path"] = a.bucketsPaths + } + + // AggregationBuilder (SubAggregations) + if len(a.subAggregations) > 0 { + aggsMap := make(map[string]interface{}) + source["aggregations"] = aggsMap + for name, aggregate := range a.subAggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + } + + // Add Meta data if available + if len(a.meta) > 0 { + source["meta"] = a.meta + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go new file mode 100644 index 0000000000000000000000000000000000000000..8ae223834e392f843665139b55dac4e4ed68b2d8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_bool.go @@ -0,0 +1,212 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "fmt" + +// A bool query matches documents matching boolean +// combinations of other queries. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-bool-query.html +type BoolQuery struct { + Query + mustClauses []Query + mustNotClauses []Query + filterClauses []Query + shouldClauses []Query + boost *float64 + disableCoord *bool + minimumShouldMatch string + adjustPureNegative *bool + queryName string +} + +// Creates a new bool query. +func NewBoolQuery() *BoolQuery { + return &BoolQuery{ + mustClauses: make([]Query, 0), + mustNotClauses: make([]Query, 0), + filterClauses: make([]Query, 0), + shouldClauses: make([]Query, 0), + } +} + +func (q *BoolQuery) Must(queries ...Query) *BoolQuery { + q.mustClauses = append(q.mustClauses, queries...) + return q +} + +func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { + q.mustNotClauses = append(q.mustNotClauses, queries...) + return q +} + +func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { + q.filterClauses = append(q.filterClauses, filters...) + return q +} + +func (q *BoolQuery) Should(queries ...Query) *BoolQuery { + q.shouldClauses = append(q.shouldClauses, queries...) + return q +} + +func (q *BoolQuery) Boost(boost float64) *BoolQuery { + q.boost = &boost + return q +} + +func (q *BoolQuery) DisableCoord(disableCoord bool) *BoolQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { + q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) + return q +} + +func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { + q.adjustPureNegative = &adjustPureNegative + return q +} + +func (q *BoolQuery) QueryName(queryName string) *BoolQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the bool query. +func (q *BoolQuery) Source() (interface{}, error) { + // { + // "bool" : { + // "must" : { + // "term" : { "user" : "kimchy" } + // }, + // "must_not" : { + // "range" : { + // "age" : { "from" : 10, "to" : 20 } + // } + // }, + // "filter" : [ + // ... + // ] + // "should" : [ + // { + // "term" : { "tag" : "wow" } + // }, + // { + // "term" : { "tag" : "elasticsearch" } + // } + // ], + // "minimum_number_should_match" : 1, + // "boost" : 1.0 + // } + // } + + query := make(map[string]interface{}) + + boolClause := make(map[string]interface{}) + query["bool"] = boolClause + + // must + if len(q.mustClauses) == 1 { + src, err := q.mustClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must"] = src + } else if len(q.mustClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.mustClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must"] = clauses + } + + // must_not + if len(q.mustNotClauses) == 1 { + src, err := q.mustNotClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["must_not"] = src + } else if len(q.mustNotClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.mustNotClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["must_not"] = clauses + } + + // filter + if len(q.filterClauses) == 1 { + src, err := q.filterClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["filter"] = src + } else if len(q.filterClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.filterClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["filter"] = clauses + } + + // should + if len(q.shouldClauses) == 1 { + src, err := q.shouldClauses[0].Source() + if err != nil { + return nil, err + } + boolClause["should"] = src + } else if len(q.shouldClauses) > 1 { + var clauses []interface{} + for _, subQuery := range q.shouldClauses { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + boolClause["should"] = clauses + } + + if q.boost != nil { + boolClause["boost"] = *q.boost + } + if q.disableCoord != nil { + boolClause["disable_coord"] = *q.disableCoord + } + if q.minimumShouldMatch != "" { + boolClause["minimum_should_match"] = q.minimumShouldMatch + } + if q.adjustPureNegative != nil { + boolClause["adjust_pure_negative"] = *q.adjustPureNegative + } + if q.queryName != "" { + boolClause["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go new file mode 100644 index 0000000000000000000000000000000000000000..3a31237c994352409a5d04fa2018cfb068950c22 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_boosting.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// A boosting query can be used to effectively +// demote results that match a given query. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-boosting-query.html +type BoostingQuery struct { + Query + positiveClause Query + negativeClause Query + negativeBoost *float64 + boost *float64 +} + +// Creates a new boosting query. +func NewBoostingQuery() *BoostingQuery { + return &BoostingQuery{} +} + +func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { + q.positiveClause = positive + return q +} + +func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { + q.negativeClause = negative + return q +} + +func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { + q.negativeBoost = &negativeBoost + return q +} + +func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { + q.boost = &boost + return q +} + +// Creates the query source for the boosting query. +func (q *BoostingQuery) Source() (interface{}, error) { + // { + // "boosting" : { + // "positive" : { + // "term" : { + // "field1" : "value1" + // } + // }, + // "negative" : { + // "term" : { + // "field2" : "value2" + // } + // }, + // "negative_boost" : 0.2 + // } + // } + + query := make(map[string]interface{}) + + boostingClause := make(map[string]interface{}) + query["boosting"] = boostingClause + + // Negative and positive clause as well as negative boost + // are mandatory in the Java client. + + // positive + if q.positiveClause != nil { + src, err := q.positiveClause.Source() + if err != nil { + return nil, err + } + boostingClause["positive"] = src + } + + // negative + if q.negativeClause != nil { + src, err := q.negativeClause.Source() + if err != nil { + return nil, err + } + boostingClause["negative"] = src + } + + if q.negativeBoost != nil { + boostingClause["negative_boost"] = *q.negativeBoost + } + + if q.boost != nil { + boostingClause["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go new file mode 100644 index 0000000000000000000000000000000000000000..a1a37b37c489b806ca70e101907cc392cb23b578 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_common_terms.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// CommonTermsQuery is a modern alternative to stopwords +// which improves the precision and recall of search results +// (by taking stopwords into account), without sacrificing performance. +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-common-terms-query.html +type CommonTermsQuery struct { + Query + name string + text interface{} + cutoffFreq *float64 + highFreq *float64 + highFreqOp string + highFreqMinimumShouldMatch string + lowFreq *float64 + lowFreqOp string + lowFreqMinimumShouldMatch string + analyzer string + boost *float64 + disableCoord *bool + queryName string +} + +// NewCommonTermsQuery creates and initializes a new common terms query. +func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { + return &CommonTermsQuery{name: name, text: text} +} + +func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { + q.cutoffFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { + q.highFreq = &f + return q +} + +func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { + q.highFreqOp = op + return q +} + +func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.highFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { + q.lowFreq = &f + return q +} + +func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { + q.lowFreqOp = op + return q +} + +func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { + q.lowFreqMinimumShouldMatch = minShouldMatch + return q +} + +func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { + q.analyzer = analyzer + return q +} + +func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { + q.boost = &boost + return q +} + +func (q *CommonTermsQuery) DisableCoord(disableCoord bool) *CommonTermsQuery { + q.disableCoord = &disableCoord + return q +} + +func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the common query. +func (q *CommonTermsQuery) Source() (interface{}, error) { + // { + // "common": { + // "body": { + // "query": "this is bonsai cool", + // "cutoff_frequency": 0.001 + // } + // } + // } + source := make(map[string]interface{}) + body := make(map[string]interface{}) + query := make(map[string]interface{}) + + source["common"] = body + body[q.name] = query + query["query"] = q.text + + if q.cutoffFreq != nil { + query["cutoff_frequency"] = *q.cutoffFreq + } + if q.highFreq != nil { + query["high_freq"] = *q.highFreq + } + if q.highFreqOp != "" { + query["high_freq_operator"] = q.highFreqOp + } + if q.lowFreq != nil { + query["low_freq"] = *q.lowFreq + } + if q.lowFreqOp != "" { + query["low_freq_operator"] = q.lowFreqOp + } + if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { + mm := make(map[string]interface{}) + if q.lowFreqMinimumShouldMatch != "" { + mm["low_freq"] = q.lowFreqMinimumShouldMatch + } + if q.highFreqMinimumShouldMatch != "" { + mm["high_freq"] = q.highFreqMinimumShouldMatch + } + query["minimum_should_match"] = mm + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.disableCoord != nil { + query["disable_coord"] = *q.disableCoord + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go new file mode 100644 index 0000000000000000000000000000000000000000..3ba879958ff58713bb1e9cc9919875ba75ba50f7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_constant_score.go @@ -0,0 +1,59 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ConstantScoreQuery is a query that wraps a filter and simply returns +// a constant score equal to the query boost for every document in the filter. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-constant-score-query.html +type ConstantScoreQuery struct { + filter Query + boost *float64 +} + +// ConstantScoreQuery creates and initializes a new constant score query. +func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { + return &ConstantScoreQuery{ + filter: filter, + } +} + +// Boost sets the boost for this query. Documents matching this query +// will (in addition to the normal weightings) have their score multiplied +// by the boost provided. +func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { + q.boost = &boost + return q +} + +// Source returns the query source. +func (q *ConstantScoreQuery) Source() (interface{}, error) { + // "constant_score" : { + // "filter" : { + // .... + // }, + // "boost" : 1.5 + // } + + query := make(map[string]interface{}) + + params := make(map[string]interface{}) + query["constant_score"] = params + + // filter + src, err := q.filter.Source() + if err != nil { + return nil, err + } + params["filter"] = src + + // boost + if q.boost != nil { + params["boost"] = *q.boost + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go new file mode 100644 index 0000000000000000000000000000000000000000..af9a40d3758b8c2af32edc650d63e38693aca8d1 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_dis_max.go @@ -0,0 +1,104 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// DisMaxQuery is a query that generates the union of documents produced by +// its subqueries, and that scores each document with the maximum score +// for that document as produced by any subquery, plus a tie breaking +// increment for any additional matching subqueries. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-dis-max-query.html +type DisMaxQuery struct { + queries []Query + boost *float64 + tieBreaker *float64 + queryName string +} + +// NewDisMaxQuery creates and initializes a new dis max query. +func NewDisMaxQuery() *DisMaxQuery { + return &DisMaxQuery{ + queries: make([]Query, 0), + } +} + +// Query adds one or more queries to the dis max query. +func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { + q.queries = append(q.queries, queries...) + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { + q.boost = &boost + return q +} + +// TieBreaker is the factor by which the score of each non-maximum disjunct +// for a document is multiplied with and added into the final score. +// +// If non-zero, the value should be small, on the order of 0.1, which says +// that 10 occurrences of word in a lower-scored field that is also in a +// higher scored field is just as good as a unique word in the lower scored +// field (i.e., one that is not in any higher scored field). +func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { + q.tieBreaker = &tieBreaker + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched filters per hit. +func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *DisMaxQuery) Source() (interface{}, error) { + // { + // "dis_max" : { + // "tie_breaker" : 0.7, + // "boost" : 1.2, + // "queries" : { + // { + // "term" : { "age" : 34 } + // }, + // { + // "term" : { "age" : 35 } + // } + // ] + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["dis_max"] = params + + if q.tieBreaker != nil { + params["tie_breaker"] = *q.tieBreaker + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + // queries + var clauses []interface{} + for _, subQuery := range q.queries { + src, err := subQuery.Source() + if err != nil { + return nil, err + } + clauses = append(clauses, src) + } + params["queries"] = clauses + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go new file mode 100644 index 0000000000000000000000000000000000000000..6c2ebd369cb399036dedb03ff09a00fb69bceaf9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_exists.go @@ -0,0 +1,49 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ExistsQuery is a query that only matches on documents that the field +// has a value in them. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-exists-query.html +type ExistsQuery struct { + name string + queryName string +} + +// NewExistsQuery creates and initializes a new dis max query. +func NewExistsQuery(name string) *ExistsQuery { + return &ExistsQuery{ + name: name, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched queries per hit. +func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable content for this query. +func (q *ExistsQuery) Source() (interface{}, error) { + // { + // "exists" : { + // "field" : "user" + // } + // } + + query := make(map[string]interface{}) + params := make(map[string]interface{}) + query["exists"] = params + + params["field"] = q.name + if q.queryName != "" { + params["_name"] = q.queryName + } + + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go new file mode 100644 index 0000000000000000000000000000000000000000..46f3e78ca8934f5f5b6622ab8446998ea6c83c2b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq.go @@ -0,0 +1,172 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FunctionScoreQuery allows you to modify the score of documents that +// are retrieved by a query. This can be useful if, for example, +// a score function is computationally expensive and it is sufficient +// to compute the score on a filtered set of documents. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html +type FunctionScoreQuery struct { + query Query + filter Query + boost *float64 + maxBoost *float64 + scoreMode string + boostMode string + filters []Query + scoreFuncs []ScoreFunction + minScore *float64 + weight *float64 +} + +// NewFunctionScoreQuery creates and initializes a new function score query. +func NewFunctionScoreQuery() *FunctionScoreQuery { + return &FunctionScoreQuery{ + filters: make([]Query, 0), + scoreFuncs: make([]ScoreFunction, 0), + } +} + +// Query sets the query for the function score query. +func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { + q.query = query + q.filter = nil + return q +} + +// Filter sets the filter for the function score query. +func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { + q.query = nil + q.filter = filter + return q +} + +// Add adds a score function that will execute on all the documents +// matching the filter. +func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, filter) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// AddScoreFunc adds a score function that will execute the function on all documents. +func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { + q.filters = append(q.filters, nil) + q.scoreFuncs = append(q.scoreFuncs, scoreFunc) + return q +} + +// ScoreMode defines how results of individual score functions will be aggregated. +// Can be first, avg, max, sum, min, or multiply. +func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { + q.scoreMode = scoreMode + return q +} + +// BoostMode defines how the combined result of score functions will +// influence the final score together with the sub query score. +func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { + q.boostMode = boostMode + return q +} + +// MaxBoost is the maximum boost that will be applied by function score. +func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { + q.maxBoost = &maxBoost + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { + q.boost = &boost + return q +} + +// MinScore sets the minimum score. +func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { + q.minScore = &minScore + return q +} + +// Source returns JSON for the function score query. +func (q *FunctionScoreQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["function_score"] = query + + if q.query != nil { + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + } else if q.filter != nil { + src, err := q.filter.Source() + if err != nil { + return nil, err + } + query["filter"] = src + } + + if len(q.filters) == 1 && q.filters[0] == nil { + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[0].GetWeight(); weight != nil { + query["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[0].Source() + if err != nil { + return nil, err + } + query[q.scoreFuncs[0].Name()] = src + } else { + funcs := make([]interface{}, len(q.filters)) + for i, filter := range q.filters { + hsh := make(map[string]interface{}) + if filter != nil { + src, err := filter.Source() + if err != nil { + return nil, err + } + hsh["filter"] = src + } + // Weight needs to be serialized on this level. + if weight := q.scoreFuncs[i].GetWeight(); weight != nil { + hsh["weight"] = weight + } + // Serialize the score function + src, err := q.scoreFuncs[i].Source() + if err != nil { + return nil, err + } + hsh[q.scoreFuncs[i].Name()] = src + funcs[i] = hsh + } + query["functions"] = funcs + } + + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.boostMode != "" { + query["boost_mode"] = q.boostMode + } + if q.maxBoost != nil { + query["max_boost"] = *q.maxBoost + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.minScore != nil { + query["min_score"] = *q.minScore + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go new file mode 100644 index 0000000000000000000000000000000000000000..622b645bbc1321d9a5ac8b69ffa898a7feea3d5c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fsq_score_funcs.go @@ -0,0 +1,567 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "strings" +) + +// ScoreFunction is used in combination with the Function Score Query. +type ScoreFunction interface { + Name() string + GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery + Source() (interface{}, error) +} + +// -- Exponential Decay -- + +// ExponentialDecayFunction builds an exponential decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html +// for details. +type ExponentialDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewExponentialDecayFunction creates a new ExponentialDecayFunction. +func NewExponentialDecayFunction() *ExponentialDecayFunction { + return &ExponentialDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ExponentialDecayFunction) Name() string { + return "exp" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ExponentialDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *ExponentialDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + return source, nil +} + +// -- Gauss Decay -- + +// GaussDecayFunction builds a gauss decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html +// for details. +type GaussDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewGaussDecayFunction returns a new GaussDecayFunction. +func NewGaussDecayFunction() *GaussDecayFunction { + return &GaussDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *GaussDecayFunction) Name() string { + return "gauss" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *GaussDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { + fn.multiValueMode = mode + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *GaussDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Linear Decay -- + +// LinearDecayFunction builds a linear decay score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html +// for details. +type LinearDecayFunction struct { + fieldName string + origin interface{} + scale interface{} + decay *float64 + offset interface{} + multiValueMode string + weight *float64 +} + +// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. +func NewLinearDecayFunction() *LinearDecayFunction { + return &LinearDecayFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *LinearDecayFunction) Name() string { + return "linear" +} + +// FieldName specifies the name of the field to which this decay function is applied to. +func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { + fn.fieldName = fieldName + return fn +} + +// Origin defines the "central point" by which the decay function calculates +// "distance". +func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { + fn.origin = origin + return fn +} + +// Scale defines the scale to be used with Decay. +func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { + fn.scale = scale + return fn +} + +// Decay defines how documents are scored at the distance given a Scale. +// If no decay is defined, documents at the distance Scale will be scored 0.5. +func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { + fn.decay = &decay + return fn +} + +// Offset, if defined, computes the decay function only for a distance +// greater than the defined offset. +func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { + fn.offset = offset + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *LinearDecayFunction) GetWeight() *float64 { + return fn.weight +} + +// MultiValueMode specifies how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { + fn.multiValueMode = mode + return fn +} + +// GetMultiValueMode returns how the decay function should be calculated +// on a field that has multiple values. +// Valid modes are: min, max, avg, and sum. +func (fn *LinearDecayFunction) GetMultiValueMode() string { + return fn.multiValueMode +} + +// Source returns the serializable JSON data of this score function. +func (fn *LinearDecayFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source[fn.fieldName] = params + if fn.origin != nil { + params["origin"] = fn.origin + } + params["scale"] = fn.scale + if fn.decay != nil && *fn.decay > 0 { + params["decay"] = *fn.decay + } + if fn.offset != nil { + params["offset"] = fn.offset + } + if fn.multiValueMode != "" { + source["multi_value_mode"] = fn.multiValueMode + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Script -- + +// ScriptFunction builds a script score function. It uses a script to +// compute or influence the score of documents that match with the inner +// query or filter. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_script_score +// for details. +type ScriptFunction struct { + script *Script + weight *float64 +} + +// NewScriptFunction initializes and returns a new ScriptFunction. +func NewScriptFunction(script *Script) *ScriptFunction { + return &ScriptFunction{ + script: script, + } +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *ScriptFunction) Name() string { + return "script_score" +} + +// Script specifies the script to be executed. +func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { + fn.script = script + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *ScriptFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *ScriptFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.script != nil { + src, err := fn.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Field value factor -- + +// FieldValueFactorFunction is a function score function that allows you +// to use a field from a document to influence the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_field_value_factor. +type FieldValueFactorFunction struct { + field string + factor *float64 + missing *float64 + weight *float64 + modifier string +} + +// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. +func NewFieldValueFactorFunction() *FieldValueFactorFunction { + return &FieldValueFactorFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *FieldValueFactorFunction) Name() string { + return "field_value_factor" +} + +// Field is the field to be extracted from the document. +func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { + fn.field = field + return fn +} + +// Factor is the (optional) factor to multiply the field with. If you do not +// specify a factor, the default is 1. +func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { + fn.factor = &factor + return fn +} + +// Modifier to apply to the field value. It can be one of: none, log, log1p, +// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. +func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { + fn.modifier = modifier + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *FieldValueFactorFunction) GetWeight() *float64 { + return fn.weight +} + +// Missing is used if a document does not have that field. +func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { + fn.missing = &missing + return fn +} + +// Source returns the serializable JSON data of this score function. +func (fn *FieldValueFactorFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.field != "" { + source["field"] = fn.field + } + if fn.factor != nil { + source["factor"] = *fn.factor + } + if fn.missing != nil { + source["missing"] = *fn.missing + } + if fn.modifier != "" { + source["modifier"] = strings.ToLower(fn.modifier) + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} + +// -- Weight Factor -- + +// WeightFactorFunction builds a weight factor function that multiplies +// the weight to the score. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_weight +// for details. +type WeightFactorFunction struct { + weight float64 +} + +// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. +func NewWeightFactorFunction(weight float64) *WeightFactorFunction { + return &WeightFactorFunction{weight: weight} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *WeightFactorFunction) Name() string { + return "weight" +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { + fn.weight = weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *WeightFactorFunction) GetWeight() *float64 { + return &fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *WeightFactorFunction) Source() (interface{}, error) { + // Notice that the weight has to be serialized in FunctionScoreQuery. + return fn.weight, nil +} + +// -- Random -- + +// RandomFunction builds a random score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_random +// for details. +type RandomFunction struct { + seed interface{} + weight *float64 +} + +// NewRandomFunction initializes and returns a new RandomFunction. +func NewRandomFunction() *RandomFunction { + return &RandomFunction{} +} + +// Name represents the JSON field name under which the output of Source +// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). +func (fn *RandomFunction) Name() string { + return "random_score" +} + +// Seed is documented in 1.6 as a numeric value. However, in the source code +// of the Java client, it also accepts strings. So we accept both here, too. +func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { + fn.seed = seed + return fn +} + +// Weight adjusts the score of the score function. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-function-score-query.html#_using_function_score +// for details. +func (fn *RandomFunction) Weight(weight float64) *RandomFunction { + fn.weight = &weight + return fn +} + +// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. +// Returns nil if weight is not specified. +func (fn *RandomFunction) GetWeight() *float64 { + return fn.weight +} + +// Source returns the serializable JSON data of this score function. +func (fn *RandomFunction) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fn.seed != nil { + source["seed"] = fn.seed + } + // Notice that the weight has to be serialized in FunctionScoreQuery. + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go new file mode 100644 index 0000000000000000000000000000000000000000..b98f9c7f0c59fe095096de73cfb778a9467866b8 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_fuzzy.go @@ -0,0 +1,120 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyQuery uses similarity based on Levenshtein edit distance for +// string fields, and a +/- margin on numeric and date fields. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-fuzzy-query.html +type FuzzyQuery struct { + name string + value interface{} + boost *float64 + fuzziness interface{} + prefixLength *int + maxExpansions *int + transpositions *bool + rewrite string + queryName string +} + +// NewFuzzyQuery creates a new fuzzy query. +func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { + q := &FuzzyQuery{ + name: name, + value: value, + } + return q +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by +// the boost provided. +func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { + q.boost = &boost + return q +} + +// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings +// like "auto", "0..1", "1..4" or "0.0..1.0". +func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { + q.prefixLength = &prefixLength + return q +} + +func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { + q.maxExpansions = &maxExpansions + return q +} + +func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { + q.transpositions = &transpositions + return q +} + +func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *FuzzyQuery) Source() (interface{}, error) { + // { + // "fuzzy" : { + // "user" : { + // "value" : "ki", + // "boost" : 1.0, + // "fuzziness" : 2, + // "prefix_length" : 0, + // "max_expansions" : 100 + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["fuzzy"] = query + + fq := make(map[string]interface{}) + query[q.name] = fq + + fq["value"] = q.value + + if q.boost != nil { + fq["boost"] = *q.boost + } + if q.transpositions != nil { + fq["transpositions"] = *q.transpositions + } + if q.fuzziness != nil { + fq["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + fq["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + fq["max_expansions"] = *q.maxExpansions + } + if q.rewrite != "" { + fq["rewrite"] = q.rewrite + } + if q.queryName != "" { + fq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go new file mode 100644 index 0000000000000000000000000000000000000000..2b9cda746d5ea7aa0715d52badf7a903c979d0a4 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_bounding_box.go @@ -0,0 +1,121 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// GeoBoundingBoxQuery allows to filter hits based on a point location using +// a bounding box. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-bounding-box-query.html +type GeoBoundingBoxQuery struct { + name string + top *float64 + left *float64 + bottom *float64 + right *float64 + typ string + queryName string +} + +// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. +func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { + return &GeoBoundingBoxQuery{ + name: name, + } +} + +func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { + q.top = &top + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomRight(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { + q.bottom = &bottom + q.left = &left + return q +} + +func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.BottomLeft(point.Lat, point.Lon) +} + +func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { + q.top = &top + q.right = &right + return q +} + +func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { + return q.TopRight(point.Lat, point.Lon) +} + +// Type sets the type of executing the geo bounding box. It can be either +// memory or indexed. It defaults to memory. +func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { + q.typ = typ + return q +} + +func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { + // { + // "geo_bbox" : { + // ... + // } + // } + + if q.top == nil { + return nil, errors.New("geo_bounding_box requires top latitude to be set") + } + if q.bottom == nil { + return nil, errors.New("geo_bounding_box requires bottom latitude to be set") + } + if q.right == nil { + return nil, errors.New("geo_bounding_box requires right longitude to be set") + } + if q.left == nil { + return nil, errors.New("geo_bounding_box requires left longitude to be set") + } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["geo_bbox"] = params + + box := make(map[string]interface{}) + box["top_left"] = []float64{*q.left, *q.top} + box["bottom_right"] = []float64{*q.right, *q.bottom} + params[q.name] = box + + if q.typ != "" { + params["type"] = q.typ + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go new file mode 100644 index 0000000000000000000000000000000000000000..a10bd5e3c3ed40493c12ea6fbe21d6e843b02961 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_distance.go @@ -0,0 +1,116 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoDistanceQuery filters documents that include only hits that exists +// within a specific distance from a geo point. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-distance-query.html +type GeoDistanceQuery struct { + name string + distance string + lat float64 + lon float64 + geohash string + distanceType string + optimizeBbox string + queryName string +} + +// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. +func NewGeoDistanceQuery(name string) *GeoDistanceQuery { + return &GeoDistanceQuery{name: name} +} + +func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { + q.lat = point.Lat + q.lon = point.Lon + return q +} + +func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { + q.lat = lat + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { + q.lat = lat + return q +} + +func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { + q.lon = lon + return q +} + +func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { + q.geohash = geohash + return q +} + +func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { + q.distance = distance + return q +} + +func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { + q.distanceType = distanceType + return q +} + +func (q *GeoDistanceQuery) OptimizeBbox(optimizeBbox string) *GeoDistanceQuery { + q.optimizeBbox = optimizeBbox + return q +} + +func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoDistanceQuery) Source() (interface{}, error) { + // { + // "geo_distance" : { + // "distance" : "200km", + // "pin.location" : { + // "lat" : 40, + // "lon" : -70 + // } + // } + // } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + + if q.geohash != "" { + params[q.name] = q.geohash + } else { + location := make(map[string]interface{}) + location["lat"] = q.lat + location["lon"] = q.lon + params[q.name] = location + } + + if q.distance != "" { + params["distance"] = q.distance + } + if q.distanceType != "" { + params["distance_type"] = q.distanceType + } + if q.optimizeBbox != "" { + params["optimize_bbox"] = q.optimizeBbox + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + source["geo_distance"] = params + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go new file mode 100644 index 0000000000000000000000000000000000000000..1faaf24e02d7f8bfb813401648e1f957cb190733 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_geo_polygon.go @@ -0,0 +1,72 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// GeoPolygonQuery allows to include hits that only fall within a polygon of points. +// +// For more details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-geo-polygon-query.html +type GeoPolygonQuery struct { + name string + points []*GeoPoint + queryName string +} + +// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. +func NewGeoPolygonQuery(name string) *GeoPolygonQuery { + return &GeoPolygonQuery{ + name: name, + points: make([]*GeoPoint, 0), + } +} + +// AddPoint adds a point from latitude and longitude. +func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { + q.points = append(q.points, GeoPointFromLatLon(lat, lon)) + return q +} + +// AddGeoPoint adds a GeoPoint. +func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { + q.points = append(q.points, point) + return q +} + +func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *GeoPolygonQuery) Source() (interface{}, error) { + // "geo_polygon" : { + // "person.location" : { + // "points" : [ + // {"lat" : 40, "lon" : -70}, + // {"lat" : 30, "lon" : -80}, + // {"lat" : 20, "lon" : -90} + // ] + // } + // } + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["geo_polygon"] = params + + polygon := make(map[string]interface{}) + params[q.name] = polygon + + var points []interface{} + for _, point := range q.points { + points = append(points, point.Source()) + } + polygon["points"] = points + + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go new file mode 100644 index 0000000000000000000000000000000000000000..27cea7ad61d708d60b13f86281234fa463b0b8b9 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_child.go @@ -0,0 +1,131 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasChildQuery accepts a query and the child type to run against, and results +// in parent documents that have child docs matching the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-has-child-query.html +type HasChildQuery struct { + query Query + childType string + boost *float64 + scoreMode string + minChildren *int + maxChildren *int + shortCircuitCutoff *int + queryName string + innerHit *InnerHit +} + +// NewHasChildQuery creates and initializes a new has_child query. +func NewHasChildQuery(childType string, query Query) *HasChildQuery { + return &HasChildQuery{ + query: query, + childType: childType, + } +} + +// Boost sets the boost for this query. +func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { + q.boost = &boost + return q +} + +// ScoreMode defines how the scores from the matching child documents +// are mapped into the parent document. Allowed values are: min, max, +// avg, or none. +func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery { + q.scoreMode = scoreMode + return q +} + +// MinChildren defines the minimum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { + q.minChildren = &minChildren + return q +} + +// MaxChildren defines the maximum number of children that are required +// to match for the parent to be considered a match. +func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { + q.maxChildren = &maxChildren + return q +} + +// ShortCircuitCutoff configures what cut off point only to evaluate +// parent documents that contain the matching parent id terms instead +// of evaluating all parent docs. +func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { + q.shortCircuitCutoff = &shortCircuitCutoff + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasChildQuery) Source() (interface{}, error) { + // { + // "has_child" : { + // "type" : "blog_tag", + // "score_mode" : "min", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_child"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["type"] = q.childType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.scoreMode != "" { + query["score_mode"] = q.scoreMode + } + if q.minChildren != nil { + query["min_children"] = *q.minChildren + } + if q.maxChildren != nil { + query["max_children"] = *q.maxChildren + } + if q.shortCircuitCutoff != nil { + query["short_circuit_cutoff"] = *q.shortCircuitCutoff + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go new file mode 100644 index 0000000000000000000000000000000000000000..7df110cd2a394b73d86602a39c5a9470f4b529be --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_has_parent.go @@ -0,0 +1,97 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// HasParentQuery accepts a query and a parent type. The query is executed +// in the parent document space which is specified by the parent type. +// This query returns child documents which associated parents have matched. +// For the rest has_parent query has the same options and works in the +// same manner as has_child query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-has-parent-query.html +type HasParentQuery struct { + query Query + parentType string + boost *float64 + score *bool + queryName string + innerHit *InnerHit +} + +// NewHasParentQuery creates and initializes a new has_parent query. +func NewHasParentQuery(parentType string, query Query) *HasParentQuery { + return &HasParentQuery{ + query: query, + parentType: parentType, + } +} + +// Boost sets the boost for this query. +func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { + q.boost = &boost + return q +} + +// Score defines if the parent score is mapped into the child documents. +func (q *HasParentQuery) Score(score bool) *HasParentQuery { + q.score = &score + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the function score query. +func (q *HasParentQuery) Source() (interface{}, error) { + // { + // "has_parent" : { + // "parent_type" : "blog", + // "query" : { + // "term" : { + // "tag" : "something" + // } + // } + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["has_parent"] = query + + src, err := q.query.Source() + if err != nil { + return nil, err + } + query["query"] = src + query["parent_type"] = q.parentType + if q.boost != nil { + query["boost"] = *q.boost + } + if q.score != nil { + query["score"] = *q.score + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go new file mode 100644 index 0000000000000000000000000000000000000000..42cc656721d17bdec12b6e33d3ac79ed2144866a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_ids.go @@ -0,0 +1,76 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IdsQuery filters documents that only have the provided ids. +// Note, this query uses the _uid field. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-ids-query.html +type IdsQuery struct { + types []string + values []string + boost *float64 + queryName string +} + +// NewIdsQuery creates and initializes a new ids query. +func NewIdsQuery(types ...string) *IdsQuery { + return &IdsQuery{ + types: types, + values: make([]string, 0), + } +} + +// Ids adds ids to the filter. +func (q *IdsQuery) Ids(ids ...string) *IdsQuery { + q.values = append(q.values, ids...) + return q +} + +// Boost sets the boost for this query. +func (q *IdsQuery) Boost(boost float64) *IdsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter. +func (q *IdsQuery) QueryName(queryName string) *IdsQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IdsQuery) Source() (interface{}, error) { + // { + // "ids" : { + // "type" : "my_type", + // "values" : ["1", "4", "100"] + // } + // } + + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["ids"] = query + + // type(s) + if len(q.types) == 1 { + query["type"] = q.types[0] + } else if len(q.types) > 1 { + query["types"] = q.types + } + + // values + query["values"] = q.values + + if q.boost != nil { + query["boost"] = *q.boost + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go new file mode 100644 index 0000000000000000000000000000000000000000..ed5ec9d84112a314f8de12b6b4c225aec66663cf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_indices.go @@ -0,0 +1,89 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// IndicesQuery can be used when executed across multiple indices, allowing +// to have a query that executes only when executed on an index that matches +// a specific list of indices, and another query that executes when it is +// executed on an index that does not match the listed indices. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-indices-query.html +type IndicesQuery struct { + query Query + indices []string + noMatchQueryType string + noMatchQuery Query + queryName string +} + +// NewIndicesQuery creates and initializes a new indices query. +func NewIndicesQuery(query Query, indices ...string) *IndicesQuery { + return &IndicesQuery{ + query: query, + indices: indices, + } +} + +// NoMatchQuery sets the query to use when it executes on an index that +// does not match the indices provided. +func (q *IndicesQuery) NoMatchQuery(query Query) *IndicesQuery { + q.noMatchQuery = query + return q +} + +// NoMatchQueryType sets the no match query which can be either all or none. +func (q *IndicesQuery) NoMatchQueryType(typ string) *IndicesQuery { + q.noMatchQueryType = typ + return q +} + +// QueryName sets the query name for the filter. +func (q *IndicesQuery) QueryName(queryName string) *IndicesQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *IndicesQuery) Source() (interface{}, error) { + // { + // "indices" : { + // "indices" : ["index1", "index2"], + // "query" : { + // "term" : { "tag" : "wow" } + // }, + // "no_match_query" : { + // "term" : { "tag" : "kow" } + // } + // } + // } + + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["indices"] = params + + params["indices"] = q.indices + + src, err := q.query.Source() + if err != nil { + return nil, err + } + params["query"] = src + + if q.noMatchQuery != nil { + src, err := q.noMatchQuery.Source() + if err != nil { + return nil, err + } + params["no_match_query"] = src + } else if q.noMatchQueryType != "" { + params["no_match_query"] = q.noMatchQueryType + } + if q.queryName != "" { + params["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go new file mode 100644 index 0000000000000000000000000000000000000000..0c44c5d575f010155f75ce62aef4ba0282a11d9f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match.go @@ -0,0 +1,214 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchQuery is a family of queries that accepts text/numerics/dates, +// analyzes them, and constructs a query. +// +// To create a new MatchQuery, use NewMatchQuery. To create specific types +// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), +// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-match-query.html +type MatchQuery struct { + name string + text interface{} + typ string // boolean, phrase, phrase_prefix + operator string // or / and + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + lenient *bool + fuzzyTranspositions *bool + zeroTermsQuery string + cutoffFrequency *float64 + queryName string +} + +// NewMatchQuery creates and initializes a new MatchQuery. +func NewMatchQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text} +} + +// NewMatchPhraseQuery creates and initializes a new MatchQuery of type phrase. +func NewMatchPhraseQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase"} +} + +// NewMatchPhrasePrefixQuery creates and initializes a new MatchQuery of type phrase_prefix. +func NewMatchPhrasePrefixQuery(name string, text interface{}) *MatchQuery { + return &MatchQuery{name: name, text: text, typ: "phrase_prefix"} +} + +// Type can be "boolean", "phrase", or "phrase_prefix". Defaults to "boolean". +func (q *MatchQuery) Type(typ string) *MatchQuery { + q.typ = typ + return q +} + +// Operator sets the operator to use when using a boolean query. +// Can be "AND" or "OR" (default). +func (q *MatchQuery) Operator(operator string) *MatchQuery { + q.operator = operator + return q +} + +// Analyzer explicitly sets the analyzer to use. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost to apply to this query. +func (q *MatchQuery) Boost(boost float64) *MatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MatchQuery) Slop(slop int) *MatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. +// Defaults to "AUTO". +func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { + q.fuzziness = fuzziness + return q +} + +func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is used with fuzzy or prefix type queries. It specifies +// the number of term expansions to use. It defaults to unbounded so that +// its recommended to set it to a reasonable value for faster execution. +func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). +// It represents the maximum treshold of a terms document frequency to be +// considered a low frequency term. +func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MatchQuery) Rewrite(rewrite string) *MatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +// Lenient specifies whether format based failures will be ignored. +func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { + q.lenient = &lenient + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MatchQuery) QueryName(queryName string) *MatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the function score query. +func (q *MatchQuery) Source() (interface{}, error) { + // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} + source := make(map[string]interface{}) + + match := make(map[string]interface{}) + source["match"] = match + + query := make(map[string]interface{}) + match[q.name] = query + + query["query"] = q.text + + if q.typ != "" { + query["type"] = q.typ + } + if q.operator != "" { + query["operator"] = q.operator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.slop != nil { + query["slop"] = *q.slop + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + query["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + query["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.fuzzyTranspositions != nil { + query["fuzzy_transpositions"] = *q.fuzzyTranspositions + } + if q.zeroTermsQuery != "" { + query["zero_terms_query"] = q.zeroTermsQuery + } + if q.cutoffFrequency != nil { + query["cutoff_frequency"] = q.cutoffFrequency + } + if q.queryName != "" { + query["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go new file mode 100644 index 0000000000000000000000000000000000000000..d681ba23793bf29dd2680657cdef706fe5c11213 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_match_all.go @@ -0,0 +1,41 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// MatchAllQuery is the most simple query, which matches all documents, +// giving them all a _score of 1.0. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-match-all-query.html +type MatchAllQuery struct { + boost *float64 +} + +// NewMatchAllQuery creates and initializes a new match all query. +func NewMatchAllQuery() *MatchAllQuery { + return &MatchAllQuery{} +} + +// Boost sets the boost for this query. Documents matching this query will +// (in addition to the normal weightings) have their score multiplied by the +// boost provided. +func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { + q.boost = &boost + return q +} + +// Source returns JSON for the function score query. +func (q MatchAllQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["match_all"] = params + if q.boost != nil { + params["boost"] = *q.boost + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go new file mode 100644 index 0000000000000000000000000000000000000000..606f43b0c5d714d413e577804dcef872aff5001b --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_more_like_this.go @@ -0,0 +1,412 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// MoreLikeThis query (MLT Query) finds documents that are "like" a given +// set of documents. In order to do so, MLT selects a set of representative +// terms of these input documents, forms a query using these terms, executes +// the query and returns the results. The user controls the input documents, +// how the terms should be selected and how the query is formed. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-mlt-query.html +type MoreLikeThisQuery struct { + fields []string + docs []*MoreLikeThisQueryItem + unlikeDocs []*MoreLikeThisQueryItem + include *bool + minimumShouldMatch string + minTermFreq *int + maxQueryTerms *int + stopWords []string + minDocFreq *int + maxDocFreq *int + minWordLen *int + maxWordLen *int + boostTerms *float64 + boost *float64 + analyzer string + failOnUnsupportedField *bool + queryName string +} + +// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. +func NewMoreLikeThisQuery() *MoreLikeThisQuery { + return &MoreLikeThisQuery{ + fields: make([]string, 0), + stopWords: make([]string, 0), + docs: make([]*MoreLikeThisQueryItem, 0), + unlikeDocs: make([]*MoreLikeThisQueryItem, 0), + } +} + +// Field adds one or more field names to the query. +func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { + q.fields = append(q.fields, fields...) + return q +} + +// StopWord sets the stopwords. Any word in this set is considered +// "uninteresting" and ignored. Even if your Analyzer allows stopwords, +// you might want to tell the MoreLikeThis code to ignore them, as for +// the purposes of document similarity it seems reasonable to assume that +// "a stop word is never interesting". +func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { + q.stopWords = append(q.stopWords, stopWords...) + return q +} + +// LikeText sets the text to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { + for _, s := range likeTexts { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.docs = append(q.docs, item) + } + return q +} + +// LikeItems sets the documents to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.docs = append(q.docs, docs...) + return q +} + +// IgnoreLikeText sets the text from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { + for _, s := range ignoreLikeText { + item := NewMoreLikeThisQueryItem().LikeText(s) + q.unlikeDocs = append(q.unlikeDocs, item) + } + return q +} + +// IgnoreLikeItems sets the documents from which the terms should not be selected from. +func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { + q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) + return q +} + +// Ids sets the document ids to use in order to find documents that are "like" this. +func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { + for _, id := range ids { + item := NewMoreLikeThisQueryItem().Id(id) + q.docs = append(q.docs, item) + } + return q +} + +// Include specifies whether the input documents should also be included +// in the results returned. Defaults to false. +func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { + q.include = &include + return q +} + +// MinimumShouldMatch sets the number of terms that must match the generated +// query expressed in the common syntax for minimum should match. +// The default value is "30%". +// +// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. +func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// MinTermFreq is the frequency below which terms will be ignored in the +// source doc. The default frequency is 2. +func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { + q.minTermFreq = &minTermFreq + return q +} + +// MaxQueryTerms sets the maximum number of query terms that will be included +// in any generated query. It defaults to 25. +func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { + q.maxQueryTerms = &maxQueryTerms + return q +} + +// MinDocFreq sets the frequency at which words will be ignored which do +// not occur in at least this many docs. The default is 5. +func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { + q.minDocFreq = &minDocFreq + return q +} + +// MaxDocFreq sets the maximum frequency for which words may still appear. +// Words that appear in more than this many docs will be ignored. +// It defaults to unbounded. +func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { + q.maxDocFreq = &maxDocFreq + return q +} + +// MinWordLength sets the minimum word length below which words will be +// ignored. It defaults to 0. +func (q *MoreLikeThisQuery) MinWordLen(minWordLen int) *MoreLikeThisQuery { + q.minWordLen = &minWordLen + return q +} + +// MaxWordLen sets the maximum word length above which words will be ignored. +// Defaults to unbounded (0). +func (q *MoreLikeThisQuery) MaxWordLen(maxWordLen int) *MoreLikeThisQuery { + q.maxWordLen = &maxWordLen + return q +} + +// BoostTerms sets the boost factor to use when boosting terms. +// It defaults to 1. +func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { + q.boostTerms = &boostTerms + return q +} + +// Analyzer specifies the analyzer that will be use to analyze the text. +// Defaults to the analyzer associated with the field. +func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { + q.boost = &boost + return q +} + +// FailOnUnsupportedField indicates whether to fail or return no result +// when this query is run against a field which is not supported such as +// a binary/numeric field. +func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { + q.failOnUnsupportedField = &fail + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { + q.queryName = queryName + return q +} + +// Source creates the source for the MLT query. +// It may return an error if the caller forgot to specify any documents to +// be "liked" in the MoreLikeThisQuery. +func (q *MoreLikeThisQuery) Source() (interface{}, error) { + // { + // "match_all" : { ... } + // } + if len(q.docs) == 0 { + return nil, errors.New(`more_like_this requires some documents to be "liked"`) + } + + source := make(map[string]interface{}) + + params := make(map[string]interface{}) + source["mlt"] = params + + if len(q.fields) > 0 { + params["fields"] = q.fields + } + + var likes []interface{} + for _, doc := range q.docs { + src, err := doc.Source() + if err != nil { + return nil, err + } + likes = append(likes, src) + } + params["like"] = likes + + if len(q.unlikeDocs) > 0 { + var dontLikes []interface{} + for _, doc := range q.unlikeDocs { + src, err := doc.Source() + if err != nil { + return nil, err + } + dontLikes = append(dontLikes, src) + } + params["unlike"] = dontLikes + } + + if q.minimumShouldMatch != "" { + params["minimum_should_match"] = q.minimumShouldMatch + } + if q.minTermFreq != nil { + params["min_term_freq"] = *q.minTermFreq + } + if q.maxQueryTerms != nil { + params["max_query_terms"] = *q.maxQueryTerms + } + if len(q.stopWords) > 0 { + params["stop_words"] = q.stopWords + } + if q.minDocFreq != nil { + params["min_doc_freq"] = *q.minDocFreq + } + if q.maxDocFreq != nil { + params["max_doc_freq"] = *q.maxDocFreq + } + if q.minWordLen != nil { + params["min_word_len"] = *q.minWordLen + } + if q.maxWordLen != nil { + params["max_word_len"] = *q.maxWordLen + } + if q.boostTerms != nil { + params["boost_terms"] = *q.boostTerms + } + if q.boost != nil { + params["boost"] = *q.boost + } + if q.analyzer != "" { + params["analyzer"] = q.analyzer + } + if q.failOnUnsupportedField != nil { + params["fail_on_unsupported_field"] = *q.failOnUnsupportedField + } + if q.queryName != "" { + params["_name"] = q.queryName + } + if q.include != nil { + params["include"] = *q.include + } + + return source, nil +} + +// -- MoreLikeThisQueryItem -- + +// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery +// to be "liked" or "unliked". +type MoreLikeThisQueryItem struct { + likeText string + + index string + typ string + id string + doc interface{} + fields []string + routing string + fsc *FetchSourceContext + version int64 + versionType string +} + +// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. +func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { + return &MoreLikeThisQueryItem{ + version: -1, + } +} + +// LikeText represents a text to be "liked". +func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { + item.likeText = likeText + return item +} + +// Index represents the index of the item. +func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { + item.index = index + return item +} + +// Type represents the document type of the item. +func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { + item.typ = typ + return item +} + +// Id represents the document id of the item. +func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { + item.id = id + return item +} + +// Doc represents a raw document template for the item. +func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { + item.doc = doc + return item +} + +// Fields represents the list of fields of the item. +func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { + item.fields = append(item.fields, fields...) + return item +} + +// Routing sets the routing associated with the item. +func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { + item.routing = routing + return item +} + +// FetchSourceContext represents the fetch source of the item which controls +// if and how _source should be returned. +func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { + item.fsc = fsc + return item +} + +// Version specifies the version of the item. +func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { + item.version = version + return item +} + +// VersionType represents the version type of the item. +func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { + item.versionType = versionType + return item +} + +// Source returns the JSON-serializable fragment of the entity. +func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { + if item.likeText != "" { + return item.likeText, nil + } + + source := make(map[string]interface{}) + + if item.index != "" { + source["_index"] = item.index + } + if item.typ != "" { + source["_type"] = item.typ + } + if item.id != "" { + source["_id"] = item.id + } + if item.doc != nil { + source["doc"] = item.doc + } + if len(item.fields) > 0 { + source["fields"] = item.fields + } + if item.routing != "" { + source["_routing"] = item.routing + } + if item.fsc != nil { + src, err := item.fsc.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + if item.version >= 0 { + source["_version"] = item.version + } + if item.versionType != "" { + source["_version_type"] = item.versionType + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go new file mode 100644 index 0000000000000000000000000000000000000000..9a149fed5524b2d1c020774e1ff49ba97b7ba370 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_multi_match.go @@ -0,0 +1,275 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-multi-match-query.html +type MultiMatchQuery struct { + text interface{} + fields []string + fieldBoosts map[string]*float64 + typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix + operator string // AND or OR + analyzer string + boost *float64 + slop *int + fuzziness string + prefixLength *int + maxExpansions *int + minimumShouldMatch string + rewrite string + fuzzyRewrite string + tieBreaker *float64 + lenient *bool + cutoffFrequency *float64 + zeroTermsQuery string + queryName string +} + +// MultiMatchQuery creates and initializes a new MultiMatchQuery. +func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { + q := &MultiMatchQuery{ + text: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } + q.fields = append(q.fields, fields...) + return q +} + +// Field adds a field to run the multi match against. +func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the multi match against with a specific boost. +func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Type can be "best_fields", "boolean", "most_fields", "cross_fields", +// "phrase", or "phrase_prefix". +func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { + var zero = float64(0.0) + var one = float64(1.0) + + switch strings.ToLower(typ) { + default: // best_fields / boolean + q.typ = "best_fields" + q.tieBreaker = &zero + case "most_fields": + q.typ = "most_fields" + q.tieBreaker = &one + case "cross_fields": + q.typ = "cross_fields" + q.tieBreaker = &zero + case "phrase": + q.typ = "phrase" + q.tieBreaker = &zero + case "phrase_prefix": + q.typ = "phrase_prefix" + q.tieBreaker = &zero + } + return q +} + +// Operator sets the operator to use when using boolean query. +// It can be either AND or OR (default). +func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { + q.operator = operator + return q +} + +// Analyzer sets the analyzer to use explicitly. It defaults to use explicit +// mapping config for the field, or, if not set, the default search analyzer. +func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { + q.analyzer = analyzer + return q +} + +// Boost sets the boost for this query. +func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { + q.boost = &boost + return q +} + +// Slop sets the phrase slop if evaluated to a phrase query type. +func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { + q.slop = &slop + return q +} + +// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. +// It defaults to "AUTO". +func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { + q.fuzziness = fuzziness + return q +} + +// PrefixLength for the fuzzy process. +func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { + q.prefixLength = &prefixLength + return q +} + +// MaxExpansions is the number of term expansions to use when using fuzzy +// or prefix type query. It defaults to unbounded so it's recommended +// to set it to a reasonable value for faster execution. +func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { + q.maxExpansions = &maxExpansions + return q +} + +// MinimumShouldMatch represents the minimum number of optional should clauses +// to match. +func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { + q.rewrite = rewrite + return q +} + +func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// TieBreaker for "best-match" disjunction queries (OR queries). +// The tie breaker capability allows documents that match more than one +// query clause (in this case on more than one field) to be scored better +// than documents that match only the best of the fields, without confusing +// this with the better case of two distinct matches in the multiple fields. +// +// A tie-breaker value of 1.0 is interpreted as a signal to score queries as +// "most-match" queries where all matching query clauses are considered for scoring. +func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { + q.tieBreaker = &tieBreaker + return q +} + +// Lenient indicates whether format based failures will be ignored. +func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { + q.lenient = &lenient + return q +} + +// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) +// representing the maximum threshold of a terms document frequency to be +// considered a low frequency term. +func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { + q.cutoffFrequency = &cutoff + return q +} + +// ZeroTermsQuery can be "all" or "none". +func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { + q.zeroTermsQuery = zeroTermsQuery + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *MultiMatchQuery) Source() (interface{}, error) { + // + // { + // "multi_match" : { + // "query" : "this is a test", + // "fields" : [ "subject", "message" ] + // } + // } + + source := make(map[string]interface{}) + + multiMatch := make(map[string]interface{}) + source["multi_match"] = multiMatch + + multiMatch["query"] = q.text + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + multiMatch["fields"] = fields + } + + if q.typ != "" { + multiMatch["type"] = q.typ + } + + if q.operator != "" { + multiMatch["operator"] = q.operator + } + if q.analyzer != "" { + multiMatch["analyzer"] = q.analyzer + } + if q.boost != nil { + multiMatch["boost"] = *q.boost + } + if q.slop != nil { + multiMatch["slop"] = *q.slop + } + if q.fuzziness != "" { + multiMatch["fuzziness"] = q.fuzziness + } + if q.prefixLength != nil { + multiMatch["prefix_length"] = *q.prefixLength + } + if q.maxExpansions != nil { + multiMatch["max_expansions"] = *q.maxExpansions + } + if q.minimumShouldMatch != "" { + multiMatch["minimum_should_match"] = q.minimumShouldMatch + } + if q.rewrite != "" { + multiMatch["rewrite"] = q.rewrite + } + if q.fuzzyRewrite != "" { + multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.tieBreaker != nil { + multiMatch["tie_breaker"] = *q.tieBreaker + } + if q.lenient != nil { + multiMatch["lenient"] = *q.lenient + } + if q.cutoffFrequency != nil { + multiMatch["cutoff_frequency"] = *q.cutoffFrequency + } + if q.zeroTermsQuery != "" { + multiMatch["zero_terms_query"] = q.zeroTermsQuery + } + if q.queryName != "" { + multiMatch["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go new file mode 100644 index 0000000000000000000000000000000000000000..11e3bb1c6687f33b891423c60bd56d804e53eb1a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_nested.go @@ -0,0 +1,85 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// NestedQuery allows to query nested objects / docs. +// The query is executed against the nested objects / docs as if they were +// indexed as separate docs (they are, internally) and resulting in the +// root parent doc (or parent nested mapping). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-nested-query.html +type NestedQuery struct { + query Query + path string + scoreMode string + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewNestedQuery creates and initializes a new NestedQuery. +func NewNestedQuery(path string, query Query) *NestedQuery { + return &NestedQuery{path: path, query: query} +} + +// ScoreMode specifies the score mode. +func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { + q.scoreMode = scoreMode + return q +} + +// Boost sets the boost for this query. +func (q *NestedQuery) Boost(boost float64) *NestedQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *NestedQuery) QueryName(queryName string) *NestedQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this nested query +// and reusing the defined path and query. +func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the query. +func (q *NestedQuery) Source() (interface{}, error) { + query := make(map[string]interface{}) + nq := make(map[string]interface{}) + query["nested"] = nq + + src, err := q.query.Source() + if err != nil { + return nil, err + } + nq["query"] = src + + nq["path"] = q.path + + if q.scoreMode != "" { + nq["score_mode"] = q.scoreMode + } + if q.boost != nil { + nq["boost"] = *q.boost + } + if q.queryName != "" { + nq["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + nq["inner_hits"] = src + } + return query, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go new file mode 100644 index 0000000000000000000000000000000000000000..bd11cc2914e98bc8dfcfc022d988c7ae04ca22cb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_parent_id.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// ParentIdQuery can be used to find child documents which belong to a +// particular parent. Given the following mapping definition. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-parent-id-query.html +type ParentIdQuery struct { + typ string + id string + ignoreUnmapped *bool + boost *float64 + queryName string + innerHit *InnerHit +} + +// NewParentIdQuery creates and initializes a new parent_id query. +func NewParentIdQuery(typ, id string) *ParentIdQuery { + return &ParentIdQuery{ + typ: typ, + id: id, + } +} + +// Type sets the parent type. +func (q *ParentIdQuery) Type(typ string) *ParentIdQuery { + q.typ = typ + return q +} + +// Id sets the id. +func (q *ParentIdQuery) Id(id string) *ParentIdQuery { + q.id = id + return q +} + +// IgnoreUnmapped specifies whether unmapped types should be ignored. +// If set to false, the query failes when an unmapped type is found. +func (q *ParentIdQuery) IgnoreUnmapped(ignore bool) *ParentIdQuery { + q.ignoreUnmapped = &ignore + return q +} + +// Boost sets the boost for this query. +func (q *ParentIdQuery) Boost(boost float64) *ParentIdQuery { + q.boost = &boost + return q +} + +// QueryName specifies the query name for the filter that can be used when +// searching for matched filters per hit. +func (q *ParentIdQuery) QueryName(queryName string) *ParentIdQuery { + q.queryName = queryName + return q +} + +// InnerHit sets the inner hit definition in the scope of this query and +// reusing the defined type and query. +func (q *ParentIdQuery) InnerHit(innerHit *InnerHit) *ParentIdQuery { + q.innerHit = innerHit + return q +} + +// Source returns JSON for the parent_id query. +func (q *ParentIdQuery) Source() (interface{}, error) { + // { + // "parent_id" : { + // "type" : "blog", + // "id" : "1" + // } + // } + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["parent_id"] = query + + query["type"] = q.typ + query["id"] = q.id + if q.boost != nil { + query["boost"] = *q.boost + } + if q.ignoreUnmapped != nil { + query["ignore_unmapped"] = *q.ignoreUnmapped + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.innerHit != nil { + src, err := q.innerHit.Source() + if err != nil { + return nil, err + } + query["inner_hits"] = src + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go new file mode 100644 index 0000000000000000000000000000000000000000..16f7611ed3ce76cf801d2b32192d757302c1c6ef --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_percolator.go @@ -0,0 +1,115 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// PercolatorQuery can be used to match queries stored in an index. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html +type PercolatorQuery struct { + field string + documentType string + document interface{} + indexedDocumentIndex string + indexedDocumentType string + indexedDocumentId string + indexedDocumentRouting string + indexedDocumentPreference string + indexedDocumentVersion *int64 +} + +// NewPercolatorQuery creates and initializes a new Percolator query. +func NewPercolatorQuery() *PercolatorQuery { + return &PercolatorQuery{} +} + +func (q *PercolatorQuery) Field(field string) *PercolatorQuery { + q.field = field + return q +} + +func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery { + q.documentType = typ + return q +} + +func (q *PercolatorQuery) Document(doc interface{}) *PercolatorQuery { + q.document = doc + return q +} + +func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery { + q.indexedDocumentIndex = index + return q +} + +func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery { + q.indexedDocumentType = typ + return q +} + +func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery { + q.indexedDocumentId = id + return q +} + +func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery { + q.indexedDocumentRouting = routing + return q +} + +func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery { + q.indexedDocumentPreference = preference + return q +} + +func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery { + q.indexedDocumentVersion = &version + return q +} + +// Source returns JSON for the percolate query. +func (q *PercolatorQuery) Source() (interface{}, error) { + if len(q.field) == 0 { + return nil, errors.New("elastic: Field is required in PercolatorQuery") + } + if len(q.documentType) == 0 { + return nil, errors.New("elastic: DocumentType is required in PercolatorQuery") + } + if q.document == nil { + return nil, errors.New("elastic: Document is required in PercolatorQuery") + } + + // { + // "percolate" : { ... } + // } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["percolate"] = params + params["field"] = q.field + params["document_type"] = q.documentType + params["document"] = q.document + if len(q.indexedDocumentIndex) > 0 { + params["index"] = q.indexedDocumentIndex + } + if len(q.indexedDocumentType) > 0 { + params["type"] = q.indexedDocumentType + } + if len(q.indexedDocumentId) > 0 { + params["id"] = q.indexedDocumentId + } + if len(q.indexedDocumentRouting) > 0 { + params["routing"] = q.indexedDocumentRouting + } + if len(q.indexedDocumentPreference) > 0 { + params["preference"] = q.indexedDocumentPreference + } + if q.indexedDocumentVersion != nil { + params["version"] = *q.indexedDocumentVersion + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go new file mode 100644 index 0000000000000000000000000000000000000000..dcf20ea704c16fcefdf4be585841c22b1faaba94 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_prefix.go @@ -0,0 +1,67 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PrefixQuery matches documents that have fields containing terms +// with a specified prefix (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-prefix-query.html +type PrefixQuery struct { + name string + prefix string + boost *float64 + rewrite string + queryName string +} + +// NewPrefixQuery creates and initializes a new PrefixQuery. +func NewPrefixQuery(name string, prefix string) *PrefixQuery { + return &PrefixQuery{name: name, prefix: prefix} +} + +// Boost sets the boost for this query. +func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { + q.boost = &boost + return q +} + +func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *PrefixQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["prefix"] = query + + if q.boost == nil && q.rewrite == "" && q.queryName == "" { + query[q.name] = q.prefix + } else { + subQuery := make(map[string]interface{}) + subQuery["prefix"] = q.prefix + if q.boost != nil { + subQuery["boost"] = *q.boost + } + if q.rewrite != "" { + subQuery["rewrite"] = q.rewrite + } + if q.queryName != "" { + subQuery["_name"] = q.queryName + } + query[q.name] = subQuery + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go new file mode 100644 index 0000000000000000000000000000000000000000..427e54c5b2239317b62e9252960dc7ce3dea06f0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_query_string.go @@ -0,0 +1,359 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// QueryStringQuery uses the query parser in order to parse its content. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-query-string-query.html +type QueryStringQuery struct { + queryString string + defaultField string + defaultOperator string + analyzer string + quoteAnalyzer string + quoteFieldSuffix string + autoGeneratePhraseQueries *bool + allowLeadingWildcard *bool + lowercaseExpandedTerms *bool + enablePositionIncrements *bool + analyzeWildcard *bool + locale string + boost *float64 + fuzziness string + fuzzyPrefixLength *int + fuzzyMaxExpansions *int + fuzzyRewrite string + phraseSlop *int + fields []string + fieldBoosts map[string]*float64 + useDisMax *bool + tieBreaker *float64 + rewrite string + minimumShouldMatch string + lenient *bool + queryName string + timeZone string + maxDeterminizedStates *int + escape *bool +} + +// NewQueryStringQuery creates and initializes a new QueryStringQuery. +func NewQueryStringQuery(queryString string) *QueryStringQuery { + return &QueryStringQuery{ + queryString: queryString, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// DefaultField specifies the field to run against when no prefix field +// is specified. Only relevant when not explicitly adding fields the query +// string will run against. +func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { + q.defaultField = defaultField + return q +} + +// Field adds a field to run the query string against. +func (q *QueryStringQuery) Field(field string) *QueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// FieldWithBoost adds a field to run the query string against with a specific boost. +func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// UseDisMax specifies whether to combine queries using dis max or boolean +// query when more zhan one field is used with the query string. Defaults +// to dismax (true). +func (q *QueryStringQuery) UseDisMax(useDisMax bool) *QueryStringQuery { + q.useDisMax = &useDisMax + return q +} + +// TieBreaker is used when more than one field is used with the query string, +// and combined queries are using dismax. +func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { + q.tieBreaker = &tieBreaker + return q +} + +// DefaultOperator sets the boolean operator of the query parser used to +// parse the query string. +// +// In default mode (OR) terms without any modifiers +// are considered optional, e.g. "capital of Hungary" is equal to +// "capital OR of OR Hungary". +// +// In AND mode, terms are considered to be in conjunction. The above mentioned +// query is then parsed as "capital AND of AND Hungary". +func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { + q.defaultOperator = operator + return q +} + +// Analyzer is an optional analyzer used to analyze the query string. +// Note, if a field has search analyzer defined for it, then it will be used +// automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { + q.analyzer = analyzer + return q +} + +// QuoteAnalyzer is an optional analyzer to be used to analyze the query string +// for phrase searches. Note, if a field has search analyzer defined for it, +// then it will be used automatically. Defaults to the smart search analyzer. +func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { + q.quoteAnalyzer = quoteAnalyzer + return q +} + +// AutoGeneratePhraseQueries indicates whether or not phrase queries will +// be automatically generated when the analyzer returns more then one term +// from whitespace delimited text. Set to false if phrase queries should only +// be generated when surrounded by double quotes. +func (q *QueryStringQuery) AutoGeneratePhraseQueries(autoGeneratePhraseQueries bool) *QueryStringQuery { + q.autoGeneratePhraseQueries = &autoGeneratePhraseQueries + return q +} + +// MaxDeterminizedState protects against too-difficult regular expression queries. +func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// AllowLeadingWildcard specifies whether leading wildcards should be allowed +// or not (defaults to true). +func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { + q.allowLeadingWildcard = &allowLeadingWildcard + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +// EnablePositionIncrements indicates whether to enable position increments +// in result query. Defaults to true. +// +// When set, result phrase and multi-phrase queries will be aware of position +// increments. Useful when e.g. a StopFilter increases the position increment +// of the token that follows an omitted token. +func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { + q.enablePositionIncrements = &enablePositionIncrements + return q +} + +// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". +func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { + q.fuzziness = fuzziness + return q +} + +// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. +// Default is 1. +func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { + q.fuzzyPrefixLength = &fuzzyPrefixLength + return q +} + +func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { + q.fuzzyMaxExpansions = &fuzzyMaxExpansions + return q +} + +func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { + q.fuzzyRewrite = fuzzyRewrite + return q +} + +// PhraseSlop sets the default slop for phrases. If zero, then exact matches +// are required. Default value is zero. +func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { + q.phraseSlop = &phraseSlop + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { + q.rewrite = rewrite + return q +} + +func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Boost sets the boost for this query. +func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { + q.boost = &boost + return q +} + +// QuoteFieldSuffix is an optional field name suffix to automatically +// try and add to the field searched when using quoted text. +func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { + q.quoteFieldSuffix = quoteFieldSuffix + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { + q.lenient = &lenient + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { + q.queryName = queryName + return q +} + +func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { + q.locale = locale + return q +} + +// TimeZone can be used to automatically adjust to/from fields using a +// timezone. Only used with date fields, of course. +func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { + q.timeZone = timeZone + return q +} + +// Escape performs escaping of the query string. +func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery { + q.escape = &escape + return q +} + +// Source returns JSON for the query. +func (q *QueryStringQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["query_string"] = query + + query["query"] = q.queryString + + if q.defaultField != "" { + query["default_field"] = q.defaultField + } + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.tieBreaker != nil { + query["tie_breaker"] = *q.tieBreaker + } + if q.useDisMax != nil { + query["use_dis_max"] = *q.useDisMax + } + if q.defaultOperator != "" { + query["default_operator"] = q.defaultOperator + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.quoteAnalyzer != "" { + query["quote_analyzer"] = q.quoteAnalyzer + } + if q.autoGeneratePhraseQueries != nil { + query["auto_generate_phrase_queries"] = *q.autoGeneratePhraseQueries + } + if q.maxDeterminizedStates != nil { + query["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.allowLeadingWildcard != nil { + query["allow_leading_wildcard"] = *q.allowLeadingWildcard + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.enablePositionIncrements != nil { + query["enable_position_increments"] = *q.enablePositionIncrements + } + if q.fuzziness != "" { + query["fuzziness"] = q.fuzziness + } + if q.boost != nil { + query["boost"] = *q.boost + } + if q.fuzzyPrefixLength != nil { + query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength + } + if q.fuzzyMaxExpansions != nil { + query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions + } + if q.fuzzyRewrite != "" { + query["fuzzy_rewrite"] = q.fuzzyRewrite + } + if q.phraseSlop != nil { + query["phrase_slop"] = *q.phraseSlop + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.rewrite != "" { + query["rewrite"] = q.rewrite + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.quoteFieldSuffix != "" { + query["quote_field_suffix"] = q.quoteFieldSuffix + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.timeZone != "" { + query["time_zone"] = q.timeZone + } + if q.escape != nil { + query["escape"] = *q.escape + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go new file mode 100644 index 0000000000000000000000000000000000000000..54303fb4a04108fb70b798b57bebbd17af17b132 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_range.go @@ -0,0 +1,144 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RangeQuery matches documents with fields that have terms within a certain range. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-range-query.html +type RangeQuery struct { + name string + from interface{} + to interface{} + timeZone string + includeLower bool + includeUpper bool + boost *float64 + queryName string + format string +} + +// NewRangeQuery creates and initializes a new RangeQuery. +func NewRangeQuery(name string) *RangeQuery { + return &RangeQuery{name: name, includeLower: true, includeUpper: true} +} + +// From indicates the from part of the RangeQuery. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) From(from interface{}) *RangeQuery { + q.from = from + return q +} + +// Gt indicates a greater-than value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gt(from interface{}) *RangeQuery { + q.from = from + q.includeLower = false + return q +} + +// Gte indicates a greater-than-or-equal value for the from part. +// Use nil to indicate an unbounded from part. +func (q *RangeQuery) Gte(from interface{}) *RangeQuery { + q.from = from + q.includeLower = true + return q +} + +// To indicates the to part of the RangeQuery. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) To(to interface{}) *RangeQuery { + q.to = to + return q +} + +// Lt indicates a less-than value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lt(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = false + return q +} + +// Lte indicates a less-than-or-equal value for the to part. +// Use nil to indicate an unbounded to part. +func (q *RangeQuery) Lte(to interface{}) *RangeQuery { + q.to = to + q.includeUpper = true + return q +} + +// IncludeLower indicates whether the lower bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { + q.includeLower = includeLower + return q +} + +// IncludeUpper indicates whether the upper bound should be included or not. +// Defaults to true. +func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { + q.includeUpper = includeUpper + return q +} + +// Boost sets the boost for this query. +func (q *RangeQuery) Boost(boost float64) *RangeQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *RangeQuery) QueryName(queryName string) *RangeQuery { + q.queryName = queryName + return q +} + +// TimeZone is used for date fields. In that case, we can adjust the +// from/to fields using a timezone. +func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { + q.timeZone = timeZone + return q +} + +// Format is used for date fields. In that case, we can set the format +// to be used instead of the mapper format. +func (q *RangeQuery) Format(format string) *RangeQuery { + q.format = format + return q +} + +// Source returns JSON for the query. +func (q *RangeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + rangeQ := make(map[string]interface{}) + source["range"] = rangeQ + + params := make(map[string]interface{}) + rangeQ[q.name] = params + + params["from"] = q.from + params["to"] = q.to + if q.timeZone != "" { + params["time_zone"] = q.timeZone + } + if q.format != "" { + params["format"] = q.format + } + if q.boost != nil { + params["boost"] = *q.boost + } + params["include_lower"] = q.includeLower + params["include_upper"] = q.includeUpper + + if q.queryName != "" { + rangeQ["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go new file mode 100644 index 0000000000000000000000000000000000000000..3f9685c41ad374457a3ce1be4c70b6ffd21726f2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_raw_string.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard, John Stanford. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "encoding/json" + +// RawStringQuery can be used to treat a string representation of an ES query +// as a Query. Example usage: +// q := RawStringQuery("{\"match_all\":{}}") +// db.Search().Query(q).From(1).Size(100).Do() +type RawStringQuery string + +// NewRawStringQuery ininitializes a new RawStringQuery. +// It is the same as RawStringQuery(q). +func NewRawStringQuery(q string) RawStringQuery { + return RawStringQuery(q) +} + +// Source returns the JSON encoded body +func (q RawStringQuery) Source() (interface{}, error) { + var f interface{} + err := json.Unmarshal([]byte(q), &f) + return f, err +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go new file mode 100644 index 0000000000000000000000000000000000000000..636e4baf9a792f52ac18bfc0c1cdfa4114f80af7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_regexp.go @@ -0,0 +1,82 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// RegexpQuery allows you to use regular expression term queries. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-regexp-query.html +type RegexpQuery struct { + name string + regexp string + flags string + boost *float64 + rewrite string + queryName string + maxDeterminizedStates *int +} + +// NewRegexpQuery creates and initializes a new RegexpQuery. +func NewRegexpQuery(name string, regexp string) *RegexpQuery { + return &RegexpQuery{name: name, regexp: regexp} +} + +// Flags sets the regexp flags. +func (q *RegexpQuery) Flags(flags string) *RegexpQuery { + q.flags = flags + return q +} + +// MaxDeterminizedStates protects against complex regular expressions. +func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { + q.maxDeterminizedStates = &maxDeterminizedStates + return q +} + +// Boost sets the boost for this query. +func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { + q.boost = &boost + return q +} + +func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON-serializable query data. +func (q *RegexpQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + query := make(map[string]interface{}) + source["regexp"] = query + + x := make(map[string]interface{}) + x["value"] = q.regexp + if q.flags != "" { + x["flags"] = q.flags + } + if q.maxDeterminizedStates != nil { + x["max_determinized_states"] = *q.maxDeterminizedStates + } + if q.boost != nil { + x["boost"] = *q.boost + } + if q.rewrite != "" { + x["rewrite"] = q.rewrite + } + if q.queryName != "" { + x["name"] = q.queryName + } + query[q.name] = x + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go new file mode 100644 index 0000000000000000000000000000000000000000..664555b3e1ee35590d3e6d4f626be6debe6bc60d --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_script.go @@ -0,0 +1,51 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// ScriptQuery allows to define scripts as filters. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-script-query.html +type ScriptQuery struct { + script *Script + queryName string +} + +// NewScriptQuery creates and initializes a new ScriptQuery. +func NewScriptQuery(script *Script) *ScriptQuery { + return &ScriptQuery{ + script: script, + } +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *ScriptQuery) Source() (interface{}, error) { + if q.script == nil { + return nil, errors.New("ScriptQuery expected a script") + } + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["script"] = params + + src, err := q.script.Source() + if err != nil { + return nil, err + } + params["script"] = src + + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go new file mode 100644 index 0000000000000000000000000000000000000000..764fa0a204cf162e68e64b5c24e813fd351ebf2e --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_simple_query_string.go @@ -0,0 +1,185 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "strings" +) + +// SimpleQueryStringQuery is a query that uses the SimpleQueryParser +// to parse its context. Unlike the regular query_string query, +// the simple_query_string query will never throw an exception, +// and discards invalid parts of the query. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-simple-query-string-query.html +type SimpleQueryStringQuery struct { + queryText string + analyzer string + operator string + fields []string + fieldBoosts map[string]*float64 + minimumShouldMatch string + flags string + boost *float64 + lowercaseExpandedTerms *bool + lenient *bool + analyzeWildcard *bool + locale string + queryName string +} + +// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. +func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { + return &SimpleQueryStringQuery{ + queryText: text, + fields: make([]string, 0), + fieldBoosts: make(map[string]*float64), + } +} + +// Field adds a field to run the query against. +func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + return q +} + +// Field adds a field to run the query against with a specific boost. +func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { + q.fields = append(q.fields, field) + q.fieldBoosts[field] = &boost + return q +} + +// Boost sets the boost for this query. +func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used when +// searching for matched_filters per hit. +func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { + q.queryName = queryName + return q +} + +// Analyzer specifies the analyzer to use for the query. +func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { + q.analyzer = analyzer + return q +} + +// DefaultOperator specifies the default operator for the query. +func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { + q.operator = defaultOperator + return q +} + +// Flags sets the flags for the query. +func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { + q.flags = flags + return q +} + +// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy +// and range queries are automatically lower-cased or not. Default is true. +func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { + q.lowercaseExpandedTerms = &lowercaseExpandedTerms + return q +} + +func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { + q.locale = locale + return q +} + +// Lenient indicates whether the query string parser should be lenient +// when parsing field values. It defaults to the index setting and if not +// set, defaults to false. +func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { + q.lenient = &lenient + return q +} + +// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. +func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { + q.analyzeWildcard = &analyzeWildcard + return q +} + +func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { + q.minimumShouldMatch = minimumShouldMatch + return q +} + +// Source returns JSON for the query. +func (q *SimpleQueryStringQuery) Source() (interface{}, error) { + // { + // "simple_query_string" : { + // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", + // "analyzer" : "snowball", + // "fields" : ["body^5","_all"], + // "default_operator" : "and" + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["simple_query_string"] = query + + query["query"] = q.queryText + + if len(q.fields) > 0 { + var fields []string + for _, field := range q.fields { + if boost, found := q.fieldBoosts[field]; found { + if boost != nil { + fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) + } else { + fields = append(fields, field) + } + } else { + fields = append(fields, field) + } + } + query["fields"] = fields + } + + if q.flags != "" { + query["flags"] = q.flags + } + if q.analyzer != "" { + query["analyzer"] = q.analyzer + } + if q.operator != "" { + query["default_operator"] = strings.ToLower(q.operator) + } + if q.lowercaseExpandedTerms != nil { + query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms + } + if q.lenient != nil { + query["lenient"] = *q.lenient + } + if q.analyzeWildcard != nil { + query["analyze_wildcard"] = *q.analyzeWildcard + } + if q.locale != "" { + query["locale"] = q.locale + } + if q.queryName != "" { + query["_name"] = q.queryName + } + if q.minimumShouldMatch != "" { + query["minimum_should_match"] = q.minimumShouldMatch + } + if q.boost != nil { + query["boost"] = *q.boost + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go new file mode 100644 index 0000000000000000000000000000000000000000..0ebf880090660699be5d048b0dc209c870a76ed7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_slice.go @@ -0,0 +1,53 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SliceQuery allows to partition the documents into several slices. +// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll +// for details. +type SliceQuery struct { + field string + id *int + max *int +} + +// NewSliceQuery creates a new SliceQuery. +func NewSliceQuery() *SliceQuery { + return &SliceQuery{} +} + +// Field is the name of the field to slice against (_uid by default). +func (s *SliceQuery) Field(field string) *SliceQuery { + s.field = field + return s +} + +// Id is the id of the slice. +func (s *SliceQuery) Id(id int) *SliceQuery { + s.id = &id + return s +} + +// Max is the maximum number of slices. +func (s *SliceQuery) Max(max int) *SliceQuery { + s.max = &max + return s +} + +// Source returns the JSON body. +func (s *SliceQuery) Source() (interface{}, error) { + m := make(map[string]interface{}) + if s.field != "" { + m["field"] = s.field + } + if s.id != nil { + m["id"] = *s.id + } + if s.max != nil { + m["max"] = *s.max + } + return m, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go new file mode 100644 index 0000000000000000000000000000000000000000..051f6dee3ed2e789ddcad4047cf7013d93a9bf0c --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_term.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermQuery finds documents that contain the exact term specified +// in the inverted index. +// +// For details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-term-query.html +type TermQuery struct { + name string + value interface{} + boost *float64 + queryName string +} + +// NewTermQuery creates and initializes a new TermQuery. +func NewTermQuery(name string, value interface{}) *TermQuery { + return &TermQuery{name: name, value: value} +} + +// Boost sets the boost for this query. +func (q *TermQuery) Boost(boost float64) *TermQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermQuery) QueryName(queryName string) *TermQuery { + q.queryName = queryName + return q +} + +// Source returns JSON for the query. +func (q *TermQuery) Source() (interface{}, error) { + // {"term":{"name":"value"}} + source := make(map[string]interface{}) + tq := make(map[string]interface{}) + source["term"] = tq + + if q.boost == nil && q.queryName == "" { + tq[q.name] = q.value + } else { + subQ := make(map[string]interface{}) + subQ["value"] = q.value + if q.boost != nil { + subQ["boost"] = *q.boost + } + if q.queryName != "" { + subQ["_name"] = q.queryName + } + tq[q.name] = subQ + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go new file mode 100644 index 0000000000000000000000000000000000000000..6875ae49a701bc9104d5ca999629406226cbf9eb --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_terms.go @@ -0,0 +1,58 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermsQuery filters documents that have fields that match any +// of the provided terms (not analyzed). +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-terms-query.html +type TermsQuery struct { + name string + values []interface{} + queryName string + boost *float64 +} + +// NewTermsQuery creates and initializes a new TermsQuery. +func NewTermsQuery(name string, values ...interface{}) *TermsQuery { + q := &TermsQuery{ + name: name, + values: make([]interface{}, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +// Boost sets the boost for this query. +func (q *TermsQuery) Boost(boost float64) *TermsQuery { + q.boost = &boost + return q +} + +// QueryName sets the query name for the filter that can be used +// when searching for matched_filters per hit +func (q *TermsQuery) QueryName(queryName string) *TermsQuery { + q.queryName = queryName + return q +} + +// Creates the query source for the term query. +func (q *TermsQuery) Source() (interface{}, error) { + // {"terms":{"name":["value1","value2"]}} + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["terms"] = params + params[q.name] = q.values + if q.boost != nil { + params["boost"] = *q.boost + } + if q.queryName != "" { + params["_name"] = q.queryName + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go new file mode 100644 index 0000000000000000000000000000000000000000..70ace4541ab618d7a7741a73b83002e4b8792607 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_type.go @@ -0,0 +1,26 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TypeQuery filters documents matching the provided document / mapping type. +// +// For details, see: +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-type-query.html +type TypeQuery struct { + typ string +} + +func NewTypeQuery(typ string) *TypeQuery { + return &TypeQuery{typ: typ} +} + +// Source returns JSON for the query. +func (q *TypeQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + params := make(map[string]interface{}) + source["type"] = params + params["value"] = q.typ + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go new file mode 100644 index 0000000000000000000000000000000000000000..35f481542fe955e51623168bc34dc211c90d8b39 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_queries_wildcard.go @@ -0,0 +1,81 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// WildcardQuery matches documents that have fields matching a wildcard +// expression (not analyzed). Supported wildcards are *, which matches +// any character sequence (including the empty one), and ?, which matches +// any single character. Note this query can be slow, as it needs to iterate +// over many terms. In order to prevent extremely slow wildcard queries, +// a wildcard term should not start with one of the wildcards * or ?. +// The wildcard query maps to Lucene WildcardQuery. +// +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/query-dsl-wildcard-query.html +type WildcardQuery struct { + name string + wildcard string + boost *float64 + rewrite string + queryName string +} + +// NewWildcardQuery creates and initializes a new WildcardQuery. +func NewWildcardQuery(name, wildcard string) *WildcardQuery { + return &WildcardQuery{ + name: name, + wildcard: wildcard, + } +} + +// Boost sets the boost for this query. +func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { + q.boost = &boost + return q +} + +func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { + q.rewrite = rewrite + return q +} + +// QueryName sets the name of this query. +func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { + q.queryName = queryName + return q +} + +// Source returns the JSON serializable body of this query. +func (q *WildcardQuery) Source() (interface{}, error) { + // { + // "wildcard" : { + // "user" : { + // "wildcard" : "ki*y", + // "boost" : 1.0 + // } + // } + + source := make(map[string]interface{}) + + query := make(map[string]interface{}) + source["wildcard"] = query + + wq := make(map[string]interface{}) + query[q.name] = wq + + wq["wildcard"] = q.wildcard + + if q.boost != nil { + wq["boost"] = *q.boost + } + if q.rewrite != "" { + wq["rewrite"] = q.rewrite + } + if q.queryName != "" { + wq["_name"] = q.queryName + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_request.go b/vendor/gopkg.in/olivere/elastic.v5/search_request.go new file mode 100644 index 0000000000000000000000000000000000000000..ad22a5a3fb965d083ecd1dfdfa08f17d84637f3a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_request.go @@ -0,0 +1,202 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "strings" + +// SearchRequest combines a search request and its +// query details (see SearchSource). +// It is used in combination with MultiSearch. +type SearchRequest struct { + searchType string // default in ES is "query_then_fetch" + indices []string + types []string + routing *string + preference *string + requestCache *bool + ignoreUnavailable *bool + allowNoIndices *bool + expandWildcards string + scroll string + source interface{} +} + +// NewSearchRequest creates a new search request. +func NewSearchRequest() *SearchRequest { + return &SearchRequest{} +} + +// SearchRequest must be one of "query_then_fetch", "query_and_fetch", +// "scan", "count", "dfs_query_then_fetch", or "dfs_query_and_fetch". +// Use one of the constants defined via SearchType. +func (r *SearchRequest) SearchType(searchType string) *SearchRequest { + r.searchType = searchType + return r +} + +func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { + return r.SearchType("dfs_query_then_fetch") +} + +func (r *SearchRequest) SearchTypeDfsQueryAndFetch() *SearchRequest { + return r.SearchType("dfs_query_and_fetch") +} + +func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { + return r.SearchType("query_then_fetch") +} + +func (r *SearchRequest) SearchTypeQueryAndFetch() *SearchRequest { + return r.SearchType("query_and_fetch") +} + +func (r *SearchRequest) SearchTypeScan() *SearchRequest { + return r.SearchType("scan") +} + +func (r *SearchRequest) SearchTypeCount() *SearchRequest { + return r.SearchType("count") +} + +func (r *SearchRequest) Index(indices ...string) *SearchRequest { + r.indices = append(r.indices, indices...) + return r +} + +func (r *SearchRequest) HasIndices() bool { + return len(r.indices) > 0 +} + +func (r *SearchRequest) Type(types ...string) *SearchRequest { + r.types = append(r.types, types...) + return r +} + +func (r *SearchRequest) Routing(routing string) *SearchRequest { + r.routing = &routing + return r +} + +func (r *SearchRequest) Routings(routings ...string) *SearchRequest { + if routings != nil { + routings := strings.Join(routings, ",") + r.routing = &routings + } else { + r.routing = nil + } + return r +} + +func (r *SearchRequest) Preference(preference string) *SearchRequest { + r.preference = &preference + return r +} + +func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest { + r.requestCache = &requestCache + return r +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *SearchRequest) IgnoreUnavailable(ignoreUnavailable bool) *SearchRequest { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices +// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). +func (s *SearchRequest) AllowNoIndices(allowNoIndices bool) *SearchRequest { + s.allowNoIndices = &allowNoIndices + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *SearchRequest) ExpandWildcards(expandWildcards string) *SearchRequest { + s.expandWildcards = expandWildcards + return s +} + +func (r *SearchRequest) Scroll(scroll string) *SearchRequest { + r.scroll = scroll + return r +} + +func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest { + return r.Source(searchSource) +} + +func (r *SearchRequest) Source(source interface{}) *SearchRequest { + switch v := source.(type) { + case *SearchSource: + src, err := v.Source() + if err != nil { + // Do not do anything in case of an error + return r + } + r.source = src + default: + r.source = source + } + return r +} + +// header is used e.g. by MultiSearch to get information about the search header +// of one SearchRequest. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-multi-search.html +func (r *SearchRequest) header() interface{} { + h := make(map[string]interface{}) + if r.searchType != "" { + h["search_type"] = r.searchType + } + + switch len(r.indices) { + case 0: + case 1: + h["index"] = r.indices[0] + default: + h["indices"] = r.indices + } + + switch len(r.types) { + case 0: + case 1: + h["type"] = r.types[0] + default: + h["types"] = r.types + } + + if r.routing != nil && *r.routing != "" { + h["routing"] = *r.routing + } + if r.preference != nil && *r.preference != "" { + h["preference"] = *r.preference + } + if r.requestCache != nil { + h["request_cache"] = *r.requestCache + } + if r.ignoreUnavailable != nil { + h["ignore_unavailable"] = *r.ignoreUnavailable + } + if r.allowNoIndices != nil { + h["allow_no_indices"] = *r.allowNoIndices + } + if r.expandWildcards != "" { + h["expand_wildcards"] = r.expandWildcards + } + if r.scroll != "" { + h["scroll"] = r.scroll + } + + return h +} + +// body is used by MultiSearch to get information about the search body +// of one SearchRequest. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-multi-search.html +func (r *SearchRequest) body() interface{} { + return r.source +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/search_source.go b/vendor/gopkg.in/olivere/elastic.v5/search_source.go new file mode 100644 index 0000000000000000000000000000000000000000..9d71fef25f419e1e64357cc049f3b6097b29f583 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/search_source.go @@ -0,0 +1,531 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" +) + +// SearchSource enables users to build the search source. +// It resembles the SearchSourceBuilder in Elasticsearch. +type SearchSource struct { + query Query + postQuery Query + sliceQuery Query + from int + size int + explain *bool + version *bool + sorters []Sorter + trackScores bool + searchAfterSortValues []interface{} + minScore *float64 + timeout string + terminateAfter *int + storedFieldNames []string + docvalueFields []string + scriptFields []*ScriptField + fetchSourceContext *FetchSourceContext + aggregations map[string]Aggregation + highlight *Highlight + globalSuggestText string + suggesters []Suggester + rescores []*Rescore + defaultRescoreWindowSize *int + indexBoosts map[string]float64 + stats []string + innerHits map[string]*InnerHit + profile bool +} + +// NewSearchSource initializes a new SearchSource. +func NewSearchSource() *SearchSource { + return &SearchSource{ + from: -1, + size: -1, + trackScores: false, + aggregations: make(map[string]Aggregation), + indexBoosts: make(map[string]float64), + innerHits: make(map[string]*InnerHit), + } +} + +// Query sets the query to use with this search source. +func (s *SearchSource) Query(query Query) *SearchSource { + s.query = query + return s +} + +// Profile specifies that this search source should activate the +// Profile API for queries made on it. +func (s *SearchSource) Profile(profile bool) *SearchSource { + s.profile = profile + return s +} + +// PostFilter will be executed after the query has been executed and +// only affects the search hits, not the aggregations. +// This filter is always executed as the last filtering mechanism. +func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { + s.postQuery = postFilter + return s +} + +// Slice allows partitioning the documents in multiple slices. +// It is e.g. used to slice a scroll operation, supported in +// Elasticsearch 5.0 or later. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html#sliced-scroll +// for details. +func (s *SearchSource) Slice(sliceQuery Query) *SearchSource { + s.sliceQuery = sliceQuery + return s +} + +// From index to start the search from. Defaults to 0. +func (s *SearchSource) From(from int) *SearchSource { + s.from = from + return s +} + +// Size is the number of search hits to return. Defaults to 10. +func (s *SearchSource) Size(size int) *SearchSource { + s.size = size + return s +} + +// MinScore sets the minimum score below which docs will be filtered out. +func (s *SearchSource) MinScore(minScore float64) *SearchSource { + s.minScore = &minScore + return s +} + +// Explain indicates whether each search hit should be returned with +// an explanation of the hit (ranking). +func (s *SearchSource) Explain(explain bool) *SearchSource { + s.explain = &explain + return s +} + +// Version indicates whether each search hit should be returned with +// a version associated to it. +func (s *SearchSource) Version(version bool) *SearchSource { + s.version = &version + return s +} + +// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". +func (s *SearchSource) Timeout(timeout string) *SearchSource { + s.timeout = timeout + return s +} + +// TimeoutInMillis controls how many milliseconds a search is allowed +// to take before it is canceled. +func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TerminateAfter allows the request to stop after the given number +// of search hits are collected. +func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { + s.terminateAfter = &terminateAfter + return s +} + +// Sort adds a sort order. +func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { + s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending}) + return s +} + +// SortWithInfo adds a sort order. +func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { + s.sorters = append(s.sorters, info) + return s +} + +// SortBy adds a sort order. +func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { + s.sorters = append(s.sorters, sorter...) + return s +} + +func (s *SearchSource) hasSort() bool { + return len(s.sorters) > 0 +} + +// TrackScores is applied when sorting and controls if scores will be +// tracked as well. Defaults to false. +func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { + s.trackScores = trackScores + return s +} + +// SearchAfter allows a different form of pagination by using a live cursor, +// using the results of the previous page to help the retrieval of the next. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-search-after.html +func (s *SearchSource) SearchAfter(sortValues ...interface{}) *SearchSource { + s.searchAfterSortValues = append(s.searchAfterSortValues, sortValues...) + return s +} + +// Aggregation adds an aggreation to perform as part of the search. +func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { + s.aggregations[name] = aggregation + return s +} + +// DefaultRescoreWindowSize sets the rescore window size for rescores +// that don't specify their window. +func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { + s.defaultRescoreWindowSize = &defaultRescoreWindowSize + return s +} + +// Highlight adds highlighting to the search. +func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { + s.highlight = highlight + return s +} + +// Highlighter returns the highlighter. +func (s *SearchSource) Highlighter() *Highlight { + if s.highlight == nil { + s.highlight = NewHighlight() + } + return s.highlight +} + +// GlobalSuggestText defines the global text to use with all suggesters. +// This avoids repetition. +func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { + s.globalSuggestText = text + return s +} + +// Suggester adds a suggester to the search. +func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// Rescorer adds a rescorer to the search. +func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { + s.rescores = append(s.rescores, rescore) + return s +} + +// ClearRescorers removes all rescorers from the search. +func (s *SearchSource) ClearRescorers() *SearchSource { + s.rescores = make([]*Rescore, 0) + return s +} + +// FetchSource indicates whether the response should contain the stored +// _source for every hit. +func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { + if s.fetchSourceContext == nil { + s.fetchSourceContext = NewFetchSourceContext(fetchSource) + } else { + s.fetchSourceContext.SetFetchSource(fetchSource) + } + return s +} + +// FetchSourceContext indicates how the _source should be fetched. +func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { + s.fetchSourceContext = fetchSourceContext + return s +} + +// NoStoredFields indicates that no fields should be loaded, resulting in only +// id and type to be returned per field. +func (s *SearchSource) NoStoredFields() *SearchSource { + s.storedFieldNames = nil + return s +} + +// StoredField adds a single field to load and return (note, must be stored) as +// part of the search request. If none are specified, the source of the +// document will be returned. +func (s *SearchSource) StoredField(storedFieldName string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldName) + return s +} + +// StoredFields sets the fields to load and return as part of the search request. +// If none are specified, the source of the document will be returned. +func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource { + s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...) + return s +} + +// DocvalueField adds a single field to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, fieldDataField) + return s +} + +// DocvalueFields adds one or more fields to load from the field data cache +// and return as part of the search request. +func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource { + s.docvalueFields = append(s.docvalueFields, docvalueFields...) + return s +} + +// ScriptField adds a single script field with the provided script. +func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptField) + return s +} + +// ScriptFields adds one or more script fields with the provided scripts. +func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { + s.scriptFields = append(s.scriptFields, scriptFields...) + return s +} + +// IndexBoost sets the boost that a specific index will receive when the +// query is executed against it. +func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { + s.indexBoosts[index] = boost + return s +} + +// Stats group this request will be aggregated under. +func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { + s.stats = append(s.stats, statsGroup...) + return s +} + +// InnerHit adds an inner hit to return with the result. +func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { + s.innerHits[name] = innerHit + return s +} + +// Source returns the serializable JSON for the source builder. +func (s *SearchSource) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if s.from != -1 { + source["from"] = s.from + } + if s.size != -1 { + source["size"] = s.size + } + if s.timeout != "" { + source["timeout"] = s.timeout + } + if s.terminateAfter != nil { + source["terminate_after"] = *s.terminateAfter + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + if s.postQuery != nil { + src, err := s.postQuery.Source() + if err != nil { + return nil, err + } + source["post_filter"] = src + } + if s.sliceQuery != nil { + src, err := s.sliceQuery.Source() + if err != nil { + return nil, err + } + source["slice"] = src + } + if s.minScore != nil { + source["min_score"] = *s.minScore + } + if s.version != nil { + source["version"] = *s.version + } + if s.explain != nil { + source["explain"] = *s.explain + } + if s.profile { + source["profile"] = s.profile + } + if s.fetchSourceContext != nil { + src, err := s.fetchSourceContext.Source() + if err != nil { + return nil, err + } + source["_source"] = src + } + + if s.storedFieldNames != nil { + switch len(s.storedFieldNames) { + case 1: + source["stored_fields"] = s.storedFieldNames[0] + default: + source["stored_fields"] = s.storedFieldNames + } + } + + if len(s.docvalueFields) > 0 { + source["docvalue_fields"] = s.docvalueFields + } + + if len(s.scriptFields) > 0 { + sfmap := make(map[string]interface{}) + for _, scriptField := range s.scriptFields { + src, err := scriptField.Source() + if err != nil { + return nil, err + } + sfmap[scriptField.FieldName] = src + } + source["script_fields"] = sfmap + } + + if len(s.sorters) > 0 { + var sortarr []interface{} + for _, sorter := range s.sorters { + src, err := sorter.Source() + if err != nil { + return nil, err + } + sortarr = append(sortarr, src) + } + source["sort"] = sortarr + } + + if s.trackScores { + source["track_scores"] = s.trackScores + } + + if len(s.searchAfterSortValues) > 0 { + source["search_after"] = s.searchAfterSortValues + } + + if len(s.indexBoosts) > 0 { + source["indices_boost"] = s.indexBoosts + } + + if len(s.aggregations) > 0 { + aggsMap := make(map[string]interface{}) + for name, aggregate := range s.aggregations { + src, err := aggregate.Source() + if err != nil { + return nil, err + } + aggsMap[name] = src + } + source["aggregations"] = aggsMap + } + + if s.highlight != nil { + src, err := s.highlight.Source() + if err != nil { + return nil, err + } + source["highlight"] = src + } + + if len(s.suggesters) > 0 { + suggesters := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + suggesters[s.Name()] = src + } + if s.globalSuggestText != "" { + suggesters["text"] = s.globalSuggestText + } + source["suggest"] = suggesters + } + + if len(s.rescores) > 0 { + // Strip empty rescores from request + var rescores []*Rescore + for _, r := range s.rescores { + if !r.IsEmpty() { + rescores = append(rescores, r) + } + } + + if len(rescores) == 1 { + rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := rescores[0].Source() + if err != nil { + return nil, err + } + source["rescore"] = src + } else { + var slice []interface{} + for _, r := range rescores { + r.defaultRescoreWindowSize = s.defaultRescoreWindowSize + src, err := r.Source() + if err != nil { + return nil, err + } + slice = append(slice, src) + } + source["rescore"] = slice + } + } + + if len(s.stats) > 0 { + source["stats"] = s.stats + } + + if len(s.innerHits) > 0 { + // Top-level inner hits + // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits + // "inner_hits": { + // "<inner_hits_name>": { + // "<path|type>": { + // "<path-to-nested-object-field|child-or-parent-type>": { + // <inner_hits_body>, + // [,"inner_hits" : { [<sub_inner_hits>]+ } ]? + // } + // } + // }, + // [,"<inner_hits_name_2>" : { ... } ]* + // } + m := make(map[string]interface{}) + for name, hit := range s.innerHits { + if hit.path != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + path := make(map[string]interface{}) + path[hit.path] = src + m[name] = map[string]interface{}{ + "path": path, + } + } else if hit.typ != "" { + src, err := hit.Source() + if err != nil { + return nil, err + } + typ := make(map[string]interface{}) + typ[hit.typ] = src + m[name] = map[string]interface{}{ + "type": typ, + } + } else { + // TODO the Java client throws here, because either path or typ must be specified + } + } + source["inner_hits"] = m + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/sort.go b/vendor/gopkg.in/olivere/elastic.v5/sort.go new file mode 100644 index 0000000000000000000000000000000000000000..122b69104babea69eccb927ae729ecacacc89653 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/sort.go @@ -0,0 +1,505 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// -- Sorter -- + +// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html. +type Sorter interface { + Source() (interface{}, error) +} + +// -- SortInfo -- + +// SortInfo contains information about sorting a field. +type SortInfo struct { + Sorter + Field string + Ascending bool + Missing interface{} + IgnoreUnmapped *bool + SortMode string + NestedFilter Query + NestedPath string +} + +func (info SortInfo) Source() (interface{}, error) { + prop := make(map[string]interface{}) + if info.Ascending { + prop["order"] = "asc" + } else { + prop["order"] = "desc" + } + if info.Missing != nil { + prop["missing"] = info.Missing + } + if info.IgnoreUnmapped != nil { + prop["ignore_unmapped"] = *info.IgnoreUnmapped + } + if info.SortMode != "" { + prop["mode"] = info.SortMode + } + if info.NestedFilter != nil { + src, err := info.NestedFilter.Source() + if err != nil { + return nil, err + } + prop["nested_filter"] = src + } + if info.NestedPath != "" { + prop["nested_path"] = info.NestedPath + } + source := make(map[string]interface{}) + source[info.Field] = prop + return source, nil +} + +// -- SortByDoc -- + +// SortByDoc sorts by the "_doc" field, as described in +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-scroll.html. +// +// Example: +// ss := elastic.NewSearchSource() +// ss = ss.SortBy(elastic.SortByDoc{}) +type SortByDoc struct { + Sorter +} + +// Source returns the JSON-serializable data. +func (s SortByDoc) Source() (interface{}, error) { + return "_doc", nil +} + +// -- ScoreSort -- + +// ScoreSort sorts by relevancy score. +type ScoreSort struct { + Sorter + ascending bool +} + +// NewScoreSort creates a new ScoreSort. +func NewScoreSort() *ScoreSort { + return &ScoreSort{ascending: false} // Descending by default! +} + +// Order defines whether sorting ascending (default) or descending. +func (s *ScoreSort) Order(ascending bool) *ScoreSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *ScoreSort) Asc() *ScoreSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *ScoreSort) Desc() *ScoreSort { + s.ascending = false + return s +} + +// Source returns the JSON-serializable data. +func (s *ScoreSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_score"] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + return source, nil +} + +// -- FieldSort -- + +// FieldSort sorts by a given field. +type FieldSort struct { + Sorter + fieldName string + ascending bool + missing interface{} + ignoreUnmapped *bool + unmappedType *string + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewFieldSort creates a new FieldSort. +func NewFieldSort(fieldName string) *FieldSort { + return &FieldSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the field to be used for sorting. +func (s *FieldSort) FieldName(fieldName string) *FieldSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *FieldSort) Order(ascending bool) *FieldSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *FieldSort) Asc() *FieldSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *FieldSort) Desc() *FieldSort { + s.ascending = false + return s +} + +// Missing sets the value to be used when a field is missing in a document. +// You can also use "_last" or "_first" to sort missing last or first +// respectively. +func (s *FieldSort) Missing(missing interface{}) *FieldSort { + s.missing = missing + return s +} + +// IgnoreUnmapped specifies what happens if the field does not exist in +// the index. Set it to true to ignore, or set it to false to not ignore (default). +func (s *FieldSort) IgnoreUnmapped(ignoreUnmapped bool) *FieldSort { + s.ignoreUnmapped = &ignoreUnmapped + return s +} + +// UnmappedType sets the type to use when the current field is not mapped +// in an index. +func (s *FieldSort) UnmappedType(typ string) *FieldSort { + s.unmappedType = &typ + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s *FieldSort) SortMode(sortMode string) *FieldSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *FieldSort) NestedPath(nestedPath string) *FieldSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *FieldSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source[s.fieldName] = x + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.missing != nil { + x["missing"] = s.missing + } + if s.ignoreUnmapped != nil { + x["ignore_unmapped"] = *s.ignoreUnmapped + } + if s.unmappedType != nil { + x["unmapped_type"] = *s.unmappedType + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- GeoDistanceSort -- + +// GeoDistanceSort allows for sorting by geographic distance. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html#_geo_distance_sorting. +type GeoDistanceSort struct { + Sorter + fieldName string + points []*GeoPoint + geohashes []string + geoDistance *string + unit string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewGeoDistanceSort creates a new sorter for geo distances. +func NewGeoDistanceSort(fieldName string) *GeoDistanceSort { + return &GeoDistanceSort{ + fieldName: fieldName, + ascending: true, + } +} + +// FieldName specifies the name of the (geo) field to use for sorting. +func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort { + s.fieldName = fieldName + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *GeoDistanceSort) Asc() *GeoDistanceSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *GeoDistanceSort) Desc() *GeoDistanceSort { + s.ascending = false + return s +} + +// Point specifies a point to create the range distance aggregations from. +func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort { + s.points = append(s.points, GeoPointFromLatLon(lat, lon)) + return s +} + +// Points specifies the geo point(s) to create the range distance aggregations from. +func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort { + s.points = append(s.points, points...) + return s +} + +// GeoHashes specifies the geo point to create the range distance aggregations from. +func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort { + s.geohashes = append(s.geohashes, geohashes...) + return s +} + +// GeoDistance represents how to compute the distance. +// It can be sloppy_arc (default), arc, or plane. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-request-sort.html#_geo_distance_sorting. +func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort { + s.geoDistance = &geoDistance + return s +} + +// Unit specifies the distance unit to use. It defaults to km. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/common-options.html#distance-units +// for details. +func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort { + s.unit = unit + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min, max, sum, and avg. +func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *GeoDistanceSort) Source() (interface{}, error) { + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_geo_distance"] = x + + // Points + var ptarr []interface{} + for _, pt := range s.points { + ptarr = append(ptarr, pt.Source()) + } + for _, geohash := range s.geohashes { + ptarr = append(ptarr, geohash) + } + x[s.fieldName] = ptarr + + if s.unit != "" { + x["unit"] = s.unit + } + if s.geoDistance != nil { + x["distance_type"] = *s.geoDistance + } + + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} + +// -- ScriptSort -- + +// ScriptSort sorts by a custom script. See +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/modules-scripting.html#modules-scripting +// for details about scripting. +type ScriptSort struct { + Sorter + script *Script + typ string + ascending bool + sortMode *string + nestedFilter Query + nestedPath *string +} + +// NewScriptSort creates and initializes a new ScriptSort. +// You must provide a script and a type, e.g. "string" or "number". +func NewScriptSort(script *Script, typ string) *ScriptSort { + return &ScriptSort{ + script: script, + typ: typ, + ascending: true, + } +} + +// Type sets the script type, which can be either "string" or "number". +func (s *ScriptSort) Type(typ string) *ScriptSort { + s.typ = typ + return s +} + +// Order defines whether sorting ascending (default) or descending. +func (s *ScriptSort) Order(ascending bool) *ScriptSort { + s.ascending = ascending + return s +} + +// Asc sets ascending sort order. +func (s *ScriptSort) Asc() *ScriptSort { + s.ascending = true + return s +} + +// Desc sets descending sort order. +func (s *ScriptSort) Desc() *ScriptSort { + s.ascending = false + return s +} + +// SortMode specifies what values to pick in case a document contains +// multiple values for the targeted sort field. Possible values are: +// min or max. +func (s *ScriptSort) SortMode(sortMode string) *ScriptSort { + s.sortMode = &sortMode + return s +} + +// NestedFilter sets a filter that nested objects should match with +// in order to be taken into account for sorting. +func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort { + s.nestedFilter = nestedFilter + return s +} + +// NestedPath is used if sorting occurs on a field that is inside a +// nested object. +func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort { + s.nestedPath = &nestedPath + return s +} + +// Source returns the JSON-serializable data. +func (s *ScriptSort) Source() (interface{}, error) { + if s.script == nil { + return nil, errors.New("ScriptSort expected a script") + } + source := make(map[string]interface{}) + x := make(map[string]interface{}) + source["_script"] = x + + src, err := s.script.Source() + if err != nil { + return nil, err + } + x["script"] = src + + x["type"] = s.typ + + if s.ascending { + x["order"] = "asc" + } else { + x["order"] = "desc" + } + if s.sortMode != nil { + x["mode"] = *s.sortMode + } + if s.nestedFilter != nil { + src, err := s.nestedFilter.Source() + if err != nil { + return nil, err + } + x["nested_filter"] = src + } + if s.nestedPath != nil { + x["nested_path"] = *s.nestedPath + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest.go b/vendor/gopkg.in/olivere/elastic.v5/suggest.go new file mode 100644 index 0000000000000000000000000000000000000000..b45cf35458769da888986f79425d3a53ec36d7b0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest.go @@ -0,0 +1,159 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// SuggestService returns suggestions for text. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters.html. +type SuggestService struct { + client *Client + pretty bool + routing string + preference string + index []string + suggesters []Suggester +} + +// NewSuggestService creates a new instance of SuggestService. +func NewSuggestService(client *Client) *SuggestService { + builder := &SuggestService{ + client: client, + } + return builder +} + +// Index adds one or more indices to use for the suggestion request. +func (s *SuggestService) Index(index ...string) *SuggestService { + s.index = append(s.index, index...) + return s +} + +// Pretty asks Elasticsearch to return indented JSON. +func (s *SuggestService) Pretty(pretty bool) *SuggestService { + s.pretty = pretty + return s +} + +// Routing specifies the routing value. +func (s *SuggestService) Routing(routing string) *SuggestService { + s.routing = routing + return s +} + +// Preference specifies the node or shard the operation should be +// performed on (default: random). +func (s *SuggestService) Preference(preference string) *SuggestService { + s.preference = preference + return s +} + +// Suggester adds a suggester to the request. +func (s *SuggestService) Suggester(suggester Suggester) *SuggestService { + s.suggesters = append(s.suggesters, suggester) + return s +} + +// buildURL builds the URL for the operation. +func (s *SuggestService) buildURL() (string, url.Values, error) { + var err error + var path string + + if len(s.index) > 0 { + path, err = uritemplates.Expand("/{index}/_suggest", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } else { + path = "/_suggest" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", fmt.Sprintf("%v", s.pretty)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + return path, params, nil +} + +// Do executes the request. +func (s *SuggestService) Do(ctx context.Context) (SuggestResult, error) { + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Set body + body := make(map[string]interface{}) + for _, s := range s.suggesters { + src, err := s.Source(false) + if err != nil { + return nil, err + } + body[s.Name()] = src + } + + // Get response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // There is a _shard object that cannot be deserialized. + // So we use json.RawMessage instead. + var suggestions map[string]*json.RawMessage + if err := s.client.decoder.Decode(res.Body, &suggestions); err != nil { + return nil, err + } + + ret := make(SuggestResult) + for name, result := range suggestions { + if name != "_shards" { + var sug []Suggestion + if err := s.client.decoder.Decode(*result, &sug); err != nil { + return nil, err + } + ret[name] = sug + } + } + + return ret, nil +} + +// SuggestResult is the outcome of SuggestService.Do. +type SuggestResult map[string][]Suggestion + +// Suggestion is a single suggester outcome. +type Suggestion struct { + Text string `json:"text"` + Offset int `json:"offset"` + Length int `json:"length"` + Options []suggestionOption `json:"options"` +} + +type suggestionOption struct { + Text string `json:"text"` + Score float64 `json:"score"` + Freq int `json:"freq"` + Payload interface{} `json:"payload"` + CollateMatch bool `json:"collate_match"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go new file mode 100644 index 0000000000000000000000000000000000000000..8e15b4ec2427c350dec1bc6f3998935b7b3b0380 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggest_field.go @@ -0,0 +1,90 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "encoding/json" + "errors" +) + +// SuggestField can be used by the caller to specify a suggest field +// at index time. For a detailed example, see e.g. +// https://www.elastic.co/blog/you-complete-me. +type SuggestField struct { + inputs []string + weight int + contextQueries []SuggesterContextQuery +} + +func NewSuggestField(input ...string) *SuggestField { + return &SuggestField{ + inputs: input, + weight: -1, + } +} + +func (f *SuggestField) Input(input ...string) *SuggestField { + if f.inputs == nil { + f.inputs = make([]string, 0) + } + f.inputs = append(f.inputs, input...) + return f +} + +func (f *SuggestField) Weight(weight int) *SuggestField { + f.weight = weight + return f +} + +func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { + f.contextQueries = append(f.contextQueries, queries...) + return f +} + +// MarshalJSON encodes SuggestField into JSON. +func (f *SuggestField) MarshalJSON() ([]byte, error) { + source := make(map[string]interface{}) + + if f.inputs != nil { + switch len(f.inputs) { + case 1: + source["input"] = f.inputs[0] + default: + source["input"] = f.inputs + } + } + + if f.weight >= 0 { + source["weight"] = f.weight + } + + switch len(f.contextQueries) { + case 0: + case 1: + src, err := f.contextQueries[0].Source() + if err != nil { + return nil, err + } + source["context"] = src + default: + ctxq := make(map[string]interface{}) + for _, query := range f.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + m, ok := src.(map[string]interface{}) + if !ok { + return nil, errors.New("SuggesterContextQuery must be of type map[string]interface{}") + } + for k, v := range m { + ctxq[k] = v + } + } + source["context"] = ctxq + } + + return json.Marshal(source) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester.go b/vendor/gopkg.in/olivere/elastic.v5/suggester.go new file mode 100644 index 0000000000000000000000000000000000000000..f7dc48f90a48816a29742548029a4c38f38bf87a --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester.go @@ -0,0 +1,15 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// Represents the generic suggester interface. +// A suggester's only purpose is to return the +// source of the query as a JSON-serializable +// object. Returning a map[string]interface{} +// will do. +type Suggester interface { + Name() string + Source(includeName bool) (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go new file mode 100644 index 0000000000000000000000000000000000000000..df6086e631cb6d0379f77c739db2ad89dd495ffc --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion.go @@ -0,0 +1,138 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import "errors" + +// CompletionSuggester is a fast suggester for e.g. type-ahead completion. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html +// for more details. +type CompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery +} + +// Creates a new completion suggester. +func NewCompletionSuggester(name string) *CompletionSuggester { + return &CompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *CompletionSuggester) Name() string { + return q.name +} + +func (q *CompletionSuggester) Text(text string) *CompletionSuggester { + q.text = text + return q +} + +func (q *CompletionSuggester) Field(field string) *CompletionSuggester { + q.field = field + return q +} + +func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *CompletionSuggester) Size(size int) *CompletionSuggester { + q.size = &size + return q +} + +func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// completionSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the completion element. +type completionSuggesterRequest struct { + Text string `json:"text"` + Completion interface{} `json:"completion"` +} + +// Creates the source for the completion suggester. +func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make(map[string]interface{}) + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + // Merge the dictionary into ctxq + m, ok := src.(map[string]interface{}) + if !ok { + return nil, errors.New("elastic: context query is not a map") + } + for k, v := range m { + ctxq[k] = v + } + } + suggester["context"] = ctxq + } + + // TODO(oe) Add completion-suggester specific parameters here + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go new file mode 100644 index 0000000000000000000000000000000000000000..e2c06a25ffc90a8c637ca64111eaaf37714c1987 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_completion_fuzzy.go @@ -0,0 +1,179 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// FuzzyFuzzyCompletionSuggester is a FuzzyCompletionSuggester that allows fuzzy +// completion. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html +// for details, and +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-completion.html#fuzzy +// for details about the fuzzy completion suggester. +type FuzzyCompletionSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + fuzziness interface{} + fuzzyTranspositions *bool + fuzzyMinLength *int + fuzzyPrefixLength *int + unicodeAware *bool +} + +// Fuzziness defines the fuzziness which is used in FuzzyCompletionSuggester. +type Fuzziness struct { +} + +// Creates a new completion suggester. +func NewFuzzyCompletionSuggester(name string) *FuzzyCompletionSuggester { + return &FuzzyCompletionSuggester{ + name: name, + contextQueries: make([]SuggesterContextQuery, 0), + } +} + +func (q *FuzzyCompletionSuggester) Name() string { + return q.name +} + +func (q *FuzzyCompletionSuggester) Text(text string) *FuzzyCompletionSuggester { + q.text = text + return q +} + +func (q *FuzzyCompletionSuggester) Field(field string) *FuzzyCompletionSuggester { + q.field = field + return q +} + +func (q *FuzzyCompletionSuggester) Analyzer(analyzer string) *FuzzyCompletionSuggester { + q.analyzer = analyzer + return q +} + +func (q *FuzzyCompletionSuggester) Size(size int) *FuzzyCompletionSuggester { + q.size = &size + return q +} + +func (q *FuzzyCompletionSuggester) ShardSize(shardSize int) *FuzzyCompletionSuggester { + q.shardSize = &shardSize + return q +} + +func (q *FuzzyCompletionSuggester) ContextQuery(query SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *FuzzyCompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *FuzzyCompletionSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +// Fuzziness defines the strategy used to describe what "fuzzy" actually +// means for the suggester, e.g. 1, 2, "0", "1..2", ">4", or "AUTO". +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/common-options.html#fuzziness +// for a detailed description. +func (q *FuzzyCompletionSuggester) Fuzziness(fuzziness interface{}) *FuzzyCompletionSuggester { + q.fuzziness = fuzziness + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyTranspositions(fuzzyTranspositions bool) *FuzzyCompletionSuggester { + q.fuzzyTranspositions = &fuzzyTranspositions + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyMinLength(minLength int) *FuzzyCompletionSuggester { + q.fuzzyMinLength = &minLength + return q +} + +func (q *FuzzyCompletionSuggester) FuzzyPrefixLength(prefixLength int) *FuzzyCompletionSuggester { + q.fuzzyPrefixLength = &prefixLength + return q +} + +func (q *FuzzyCompletionSuggester) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggester { + q.unicodeAware = &unicodeAware + return q +} + +// Creates the source for the completion suggester. +func (q *FuzzyCompletionSuggester) Source(includeName bool) (interface{}, error) { + cs := &completionSuggesterRequest{} + + if q.text != "" { + cs.Text = q.text + } + + suggester := make(map[string]interface{}) + cs.Completion = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + var ctxq []interface{} + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Fuzzy Completion Suggester fields + fuzzy := make(map[string]interface{}) + suggester["fuzzy"] = fuzzy + if q.fuzziness != nil { + fuzzy["fuzziness"] = q.fuzziness + } + if q.fuzzyTranspositions != nil { + fuzzy["transpositions"] = *q.fuzzyTranspositions + } + if q.fuzzyMinLength != nil { + fuzzy["min_length"] = *q.fuzzyMinLength + } + if q.fuzzyPrefixLength != nil { + fuzzy["prefix_length"] = *q.fuzzyPrefixLength + } + if q.unicodeAware != nil { + fuzzy["unicode_aware"] = *q.unicodeAware + } + + if !includeName { + return cs, nil + } + + source := make(map[string]interface{}) + source[q.name] = cs + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go new file mode 100644 index 0000000000000000000000000000000000000000..caf477669cfb318b1dc5045424c0c5f72e3a1315 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context.go @@ -0,0 +1,11 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// SuggesterContextQuery is used to define context information within +// a suggestion request. +type SuggesterContextQuery interface { + Source() (interface{}, error) +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go new file mode 100644 index 0000000000000000000000000000000000000000..c224ae04a08f77958c7e6728a0191239de5623b0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_category.go @@ -0,0 +1,99 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterCategoryMapping -- + +// SuggesterCategoryMapping provides a mapping for a category context in a suggester. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_category_mapping. +type SuggesterCategoryMapping struct { + name string + fieldName string + defaultValues []string +} + +// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. +func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { + return &SuggesterCategoryMapping{ + name: name, + defaultValues: make([]string, 0), + } +} + +func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { + q.defaultValues = append(q.defaultValues, values...) + return q +} + +func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "category" + + switch len(q.defaultValues) { + case 0: + x["default"] = q.defaultValues + case 1: + x["default"] = q.defaultValues[0] + default: + x["default"] = q.defaultValues + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterCategoryQuery -- + +// SuggesterCategoryQuery provides querying a category context in a suggester. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_category_query. +type SuggesterCategoryQuery struct { + name string + values []string +} + +// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. +func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { + q := &SuggesterCategoryQuery{ + name: name, + values: make([]string, 0), + } + if len(values) > 0 { + q.values = append(q.values, values...) + } + return q +} + +func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { + q.values = append(q.values, values...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterCategoryQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + switch len(q.values) { + case 0: + source[q.name] = q.values + case 1: + source[q.name] = q.values[0] + default: + source[q.name] = q.values + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go new file mode 100644 index 0000000000000000000000000000000000000000..6815bfe730853e1f1cb72e6962cd78537a689c95 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_context_geo.go @@ -0,0 +1,130 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// -- SuggesterGeoMapping -- + +// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_geo_location_mapping. +type SuggesterGeoMapping struct { + name string + defaultLocations []*GeoPoint + precision []string + neighbors *bool + fieldName string +} + +// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. +func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { + return &SuggesterGeoMapping{ + name: name, + } +} + +func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { + q.defaultLocations = append(q.defaultLocations, locations...) + return q +} + +func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { + q.precision = append(q.precision, precision...) + return q +} + +func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { + q.neighbors = &neighbors + return q +} + +func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { + q.fieldName = fieldName + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoMapping) Source() (interface{}, error) { + source := make(map[string]interface{}) + + x := make(map[string]interface{}) + source[q.name] = x + + x["type"] = "geo" + + if len(q.precision) > 0 { + x["precision"] = q.precision + } + if q.neighbors != nil { + x["neighbors"] = *q.neighbors + } + + switch len(q.defaultLocations) { + case 0: + case 1: + x["default"] = q.defaultLocations[0].Source() + default: + var arr []interface{} + for _, p := range q.defaultLocations { + arr = append(arr, p.Source()) + } + x["default"] = arr + } + + if q.fieldName != "" { + x["path"] = q.fieldName + } + return source, nil +} + +// -- SuggesterGeoQuery -- + +// SuggesterGeoQuery provides querying a geolocation context in a suggester. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/suggester-context.html#_geo_location_query +type SuggesterGeoQuery struct { + name string + location *GeoPoint + precision []string +} + +// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. +func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { + return &SuggesterGeoQuery{ + name: name, + location: location, + precision: make([]string, 0), + } +} + +func (q *SuggesterGeoQuery) Precision(precision ...string) *SuggesterGeoQuery { + q.precision = append(q.precision, precision...) + return q +} + +// Source returns a map that will be used to serialize the context query as JSON. +func (q *SuggesterGeoQuery) Source() (interface{}, error) { + source := make(map[string]interface{}) + + if len(q.precision) == 0 { + if q.location != nil { + source[q.name] = q.location.Source() + } + } else { + x := make(map[string]interface{}) + source[q.name] = x + + if q.location != nil { + x["value"] = q.location.Source() + } + + switch len(q.precision) { + case 0: + case 1: + x["precision"] = q.precision[0] + default: + x["precision"] = q.precision + } + } + + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go new file mode 100644 index 0000000000000000000000000000000000000000..f75e1ddc1b83f97f04d551b657899eca731c2665 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_phrase.go @@ -0,0 +1,546 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// PhraseSuggester provides an API to access word alternatives +// on a per token basis within a certain string distance. +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html. +type PhraseSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to a phrase suggester + maxErrors *float64 + separator *string + realWordErrorLikelihood *float64 + confidence *float64 + generators map[string][]CandidateGenerator + gramSize *int + smoothingModel SmoothingModel + forceUnigrams *bool + tokenLimit *int + preTag, postTag *string + collateQuery *string + collatePreference *string + collateParams map[string]interface{} + collatePrune *bool +} + +// NewPhraseSuggester creates a new PhraseSuggester. +func NewPhraseSuggester(name string) *PhraseSuggester { + return &PhraseSuggester{ + name: name, + collateParams: make(map[string]interface{}), + } +} + +func (q *PhraseSuggester) Name() string { + return q.name +} + +func (q *PhraseSuggester) Text(text string) *PhraseSuggester { + q.text = text + return q +} + +func (q *PhraseSuggester) Field(field string) *PhraseSuggester { + q.field = field + return q +} + +func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { + q.analyzer = analyzer + return q +} + +func (q *PhraseSuggester) Size(size int) *PhraseSuggester { + q.size = &size + return q +} + +func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { + q.shardSize = &shardSize + return q +} + +func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { + if gramSize >= 1 { + q.gramSize = &gramSize + } + return q +} + +func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { + q.maxErrors = &maxErrors + return q +} + +func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { + q.separator = &separator + return q +} + +func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { + q.realWordErrorLikelihood = &realWordErrorLikelihood + return q +} + +func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { + q.confidence = &confidence + return q +} + +func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { + if q.generators == nil { + q.generators = make(map[string][]CandidateGenerator) + } + typ := generator.Type() + if _, found := q.generators[typ]; !found { + q.generators[typ] = make([]CandidateGenerator, 0) + } + q.generators[typ] = append(q.generators[typ], generator) + return q +} + +func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { + for _, g := range generators { + q = q.CandidateGenerator(g) + } + return q +} + +func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { + q.generators = nil + return q +} + +func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { + q.forceUnigrams = &forceUnigrams + return q +} + +func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { + q.smoothingModel = smoothingModel + return q +} + +func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { + q.tokenLimit = &tokenLimit + return q +} + +func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { + q.preTag = &preTag + q.postTag = &postTag + return q +} + +func (q *PhraseSuggester) CollateQuery(collateQuery string) *PhraseSuggester { + q.collateQuery = &collateQuery + return q +} + +func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { + q.collatePreference = &collatePreference + return q +} + +func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { + q.collateParams = collateParams + return q +} + +func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { + q.collatePrune = &collatePrune + return q +} + +// phraseSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the simple_phrase element. +type phraseSuggesterRequest struct { + Text string `json:"text"` + Phrase interface{} `json:"phrase"` +} + +// Source generates the source for the phrase suggester. +func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { + ps := &phraseSuggesterRequest{} + + if q.text != "" { + ps.Text = q.text + } + + suggester := make(map[string]interface{}) + ps.Phrase = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + var ctxq []interface{} + for _, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq = append(ctxq, src) + } + suggester["context"] = ctxq + } + + // Phase-specified parameters + if q.realWordErrorLikelihood != nil { + suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood + } + if q.confidence != nil { + suggester["confidence"] = *q.confidence + } + if q.separator != nil { + suggester["separator"] = *q.separator + } + if q.maxErrors != nil { + suggester["max_errors"] = *q.maxErrors + } + if q.gramSize != nil { + suggester["gram_size"] = *q.gramSize + } + if q.forceUnigrams != nil { + suggester["force_unigrams"] = *q.forceUnigrams + } + if q.tokenLimit != nil { + suggester["token_limit"] = *q.tokenLimit + } + if q.generators != nil && len(q.generators) > 0 { + for typ, generators := range q.generators { + var arr []interface{} + for _, g := range generators { + src, err := g.Source() + if err != nil { + return nil, err + } + arr = append(arr, src) + } + suggester[typ] = arr + } + } + if q.smoothingModel != nil { + src, err := q.smoothingModel.Source() + if err != nil { + return nil, err + } + x := make(map[string]interface{}) + x[q.smoothingModel.Type()] = src + suggester["smoothing"] = x + } + if q.preTag != nil { + hl := make(map[string]string) + hl["pre_tag"] = *q.preTag + if q.postTag != nil { + hl["post_tag"] = *q.postTag + } + suggester["highlight"] = hl + } + if q.collateQuery != nil { + collate := make(map[string]interface{}) + suggester["collate"] = collate + if q.collateQuery != nil { + collate["query"] = *q.collateQuery + } + if q.collatePreference != nil { + collate["preference"] = *q.collatePreference + } + if len(q.collateParams) > 0 { + collate["params"] = q.collateParams + } + if q.collatePrune != nil { + collate["prune"] = *q.collatePrune + } + } + + if !includeName { + return ps, nil + } + + source := make(map[string]interface{}) + source[q.name] = ps + return source, nil +} + +// -- Smoothing models -- + +type SmoothingModel interface { + Type() string + Source() (interface{}, error) +} + +// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type StupidBackoffSmoothingModel struct { + discount float64 +} + +func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { + return &StupidBackoffSmoothingModel{ + discount: discount, + } +} + +func (sm *StupidBackoffSmoothingModel) Type() string { + return "stupid_backoff" +} + +func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["discount"] = sm.discount + return source, nil +} + +// -- + +// LaplaceSmoothingModel implements a laplace smoothing model. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LaplaceSmoothingModel struct { + alpha float64 +} + +func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { + return &LaplaceSmoothingModel{ + alpha: alpha, + } +} + +func (sm *LaplaceSmoothingModel) Type() string { + return "laplace" +} + +func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["alpha"] = sm.alpha + return source, nil +} + +// -- + +// LinearInterpolationSmoothingModel implements a linear interpolation +// smoothing model. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type LinearInterpolationSmoothingModel struct { + trigramLamda float64 + bigramLambda float64 + unigramLambda float64 +} + +func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { + return &LinearInterpolationSmoothingModel{ + trigramLamda: trigramLamda, + bigramLambda: bigramLambda, + unigramLambda: unigramLambda, + } +} + +func (sm *LinearInterpolationSmoothingModel) Type() string { + return "linear_interpolation" +} + +func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { + source := make(map[string]interface{}) + source["trigram_lambda"] = sm.trigramLamda + source["bigram_lambda"] = sm.bigramLambda + source["unigram_lambda"] = sm.unigramLambda + return source, nil +} + +// -- CandidateGenerator -- + +type CandidateGenerator interface { + Type() string + Source() (interface{}, error) +} + +// DirectCandidateGenerator implements a direct candidate generator. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-phrase.html#_smoothing_models +// for details about smoothing models. +type DirectCandidateGenerator struct { + field string + preFilter *string + postFilter *string + suggestMode *string + accuracy *float64 + size *int + sort *string + stringDistance *string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { + return &DirectCandidateGenerator{ + field: field, + } +} + +func (g *DirectCandidateGenerator) Type() string { + return "direct_generator" +} + +func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { + g.field = field + return g +} + +func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { + g.preFilter = &preFilter + return g +} + +func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { + g.postFilter = &postFilter + return g +} + +func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { + g.suggestMode = &suggestMode + return g +} + +func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { + g.accuracy = &accuracy + return g +} + +func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { + g.size = &size + return g +} + +func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { + g.sort = &sort + return g +} + +func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { + g.stringDistance = &stringDistance + return g +} + +func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { + g.maxEdits = &maxEdits + return g +} + +func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { + g.maxInspections = &maxInspections + return g +} + +func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { + g.maxTermFreq = &maxTermFreq + return g +} + +func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { + g.prefixLength = &prefixLength + return g +} + +func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { + g.minWordLength = &minWordLength + return g +} + +func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { + g.minDocFreq = &minDocFreq + return g +} + +func (g *DirectCandidateGenerator) Source() (interface{}, error) { + source := make(map[string]interface{}) + if g.field != "" { + source["field"] = g.field + } + if g.suggestMode != nil { + source["suggest_mode"] = *g.suggestMode + } + if g.accuracy != nil { + source["accuracy"] = *g.accuracy + } + if g.size != nil { + source["size"] = *g.size + } + if g.sort != nil { + source["sort"] = *g.sort + } + if g.stringDistance != nil { + source["string_distance"] = *g.stringDistance + } + if g.maxEdits != nil { + source["max_edits"] = *g.maxEdits + } + if g.maxInspections != nil { + source["max_inspections"] = *g.maxInspections + } + if g.maxTermFreq != nil { + source["max_term_freq"] = *g.maxTermFreq + } + if g.prefixLength != nil { + source["prefix_length"] = *g.prefixLength + } + if g.minWordLength != nil { + source["min_word_length"] = *g.minWordLength + } + if g.minDocFreq != nil { + source["min_doc_freq"] = *g.minDocFreq + } + if g.preFilter != nil { + source["pre_filter"] = *g.preFilter + } + if g.postFilter != nil { + source["post_filter"] = *g.postFilter + } + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go new file mode 100644 index 0000000000000000000000000000000000000000..6dcbbc6460d2eea04f5bc9c020501c9d846e57bf --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/suggester_term.go @@ -0,0 +1,233 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +// TermSuggester suggests terms based on edit distance. +// For more details, see +// https://www.elastic.co/guide/en/elasticsearch/reference/5.2/search-suggesters-term.html. +type TermSuggester struct { + Suggester + name string + text string + field string + analyzer string + size *int + shardSize *int + contextQueries []SuggesterContextQuery + + // fields specific to term suggester + suggestMode string + accuracy *float64 + sort string + stringDistance string + maxEdits *int + maxInspections *int + maxTermFreq *float64 + prefixLength *int + minWordLength *int + minDocFreq *float64 +} + +// NewTermSuggester creates a new TermSuggester. +func NewTermSuggester(name string) *TermSuggester { + return &TermSuggester{ + name: name, + } +} + +func (q *TermSuggester) Name() string { + return q.name +} + +func (q *TermSuggester) Text(text string) *TermSuggester { + q.text = text + return q +} + +func (q *TermSuggester) Field(field string) *TermSuggester { + q.field = field + return q +} + +func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { + q.analyzer = analyzer + return q +} + +func (q *TermSuggester) Size(size int) *TermSuggester { + q.size = &size + return q +} + +func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { + q.shardSize = &shardSize + return q +} + +func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, query) + return q +} + +func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { + q.contextQueries = append(q.contextQueries, queries...) + return q +} + +func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { + q.suggestMode = suggestMode + return q +} + +func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { + q.accuracy = &accuracy + return q +} + +func (q *TermSuggester) Sort(sort string) *TermSuggester { + q.sort = sort + return q +} + +func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { + q.stringDistance = stringDistance + return q +} + +func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { + q.maxEdits = &maxEdits + return q +} + +func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { + q.maxInspections = &maxInspections + return q +} + +func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { + q.maxTermFreq = &maxTermFreq + return q +} + +func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { + q.prefixLength = &prefixLength + return q +} + +func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { + q.minWordLength = &minWordLength + return q +} + +func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { + q.minDocFreq = &minDocFreq + return q +} + +// termSuggesterRequest is necessary because the order in which +// the JSON elements are routed to Elasticsearch is relevant. +// We got into trouble when using plain maps because the text element +// needs to go before the term element. +type termSuggesterRequest struct { + Text string `json:"text"` + Term interface{} `json:"term"` +} + +// Source generates the source for the term suggester. +func (q *TermSuggester) Source(includeName bool) (interface{}, error) { + // "suggest" : { + // "my-suggest-1" : { + // "text" : "the amsterdma meetpu", + // "term" : { + // "field" : "body" + // } + // }, + // "my-suggest-2" : { + // "text" : "the rottredam meetpu", + // "term" : { + // "field" : "title", + // } + // } + // } + ts := &termSuggesterRequest{} + if q.text != "" { + ts.Text = q.text + } + + suggester := make(map[string]interface{}) + ts.Term = suggester + + if q.analyzer != "" { + suggester["analyzer"] = q.analyzer + } + if q.field != "" { + suggester["field"] = q.field + } + if q.size != nil { + suggester["size"] = *q.size + } + if q.shardSize != nil { + suggester["shard_size"] = *q.shardSize + } + switch len(q.contextQueries) { + case 0: + case 1: + src, err := q.contextQueries[0].Source() + if err != nil { + return nil, err + } + suggester["context"] = src + default: + ctxq := make([]interface{}, len(q.contextQueries)) + for i, query := range q.contextQueries { + src, err := query.Source() + if err != nil { + return nil, err + } + ctxq[i] = src + } + suggester["context"] = ctxq + } + + // Specific to term suggester + if q.suggestMode != "" { + suggester["suggest_mode"] = q.suggestMode + } + if q.accuracy != nil { + suggester["accuracy"] = *q.accuracy + } + if q.sort != "" { + suggester["sort"] = q.sort + } + if q.stringDistance != "" { + suggester["string_distance"] = q.stringDistance + } + if q.maxEdits != nil { + suggester["max_edits"] = *q.maxEdits + } + if q.maxInspections != nil { + suggester["max_inspections"] = *q.maxInspections + } + if q.maxTermFreq != nil { + suggester["max_term_freq"] = *q.maxTermFreq + } + if q.prefixLength != nil { + suggester["prefix_len"] = *q.prefixLength + } + if q.minWordLength != nil { + suggester["min_word_len"] = *q.minWordLength + } + if q.minDocFreq != nil { + suggester["min_doc_freq"] = *q.minDocFreq + } + + if !includeName { + return ts, nil + } + + source := make(map[string]interface{}) + source[q.name] = ts + return source, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go new file mode 100644 index 0000000000000000000000000000000000000000..0c2e00a01f2e7bebe5296ecd22490ebb7a966704 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_cancel.go @@ -0,0 +1,146 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TasksCancelService can cancel long-running tasks. +// It is supported as of Elasticsearch 2.3.0. +// +// See http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-cancel.html +// for details. +type TasksCancelService struct { + client *Client + pretty bool + taskId *int64 + actions []string + nodeId []string + parentNode string + parentTask *int64 +} + +// NewTasksCancelService creates a new TasksCancelService. +func NewTasksCancelService(client *Client) *TasksCancelService { + return &TasksCancelService{ + client: client, + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId specifies the task to cancel. Set to -1 to cancel all tasks. +func (s *TasksCancelService) TaskId(taskId int64) *TasksCancelService { + s.taskId = &taskId + return s +} + +// Actions is a list of actions that should be cancelled. Leave empty to cancel all. +func (s *TasksCancelService) Actions(actions []string) *TasksCancelService { + s.actions = actions + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksCancelService) NodeId(nodeId []string) *TasksCancelService { + s.nodeId = nodeId + return s +} + +// ParentNode specifies to cancel tasks with specified parent node. +func (s *TasksCancelService) ParentNode(parentNode string) *TasksCancelService { + s.parentNode = parentNode + return s +} + +// ParentTask specifies to cancel tasks with specified parent task id. +// Set to -1 to cancel all. +func (s *TasksCancelService) ParentTask(parentTask int64) *TasksCancelService { + s.parentTask = &parentTask + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksCancelService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if s.taskId != nil { + path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{ + "task_id": fmt.Sprintf("%d", *s.taskId), + }) + } else { + path = "/_tasks/_cancel" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksCancelService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go new file mode 100644 index 0000000000000000000000000000000000000000..898ac2a554d6a6cdf5561f690291a1dd0a3d9dd7 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/tasks_list.go @@ -0,0 +1,215 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TasksListService retrieves the list of currently executing tasks +// on one ore more nodes in the cluster. It is part of the Task Management API +// documented at http://www.elastic.co/guide/en/elasticsearch/reference/5.2/tasks-list.html. +// +// It is supported as of Elasticsearch 2.3.0. +type TasksListService struct { + client *Client + pretty bool + taskId []int64 + actions []string + detailed *bool + nodeId []string + parentNode string + parentTask *int64 + waitForCompletion *bool +} + +// NewTasksListService creates a new TasksListService. +func NewTasksListService(client *Client) *TasksListService { + return &TasksListService{ + client: client, + taskId: make([]int64, 0), + actions: make([]string, 0), + nodeId: make([]string, 0), + } +} + +// TaskId indicates to returns the task(s) with specified id(s). +func (s *TasksListService) TaskId(taskId ...int64) *TasksListService { + s.taskId = append(s.taskId, taskId...) + return s +} + +// Actions is a list of actions that should be returned. Leave empty to return all. +func (s *TasksListService) Actions(actions ...string) *TasksListService { + s.actions = append(s.actions, actions...) + return s +} + +// Detailed indicates whether to return detailed task information (default: false). +func (s *TasksListService) Detailed(detailed bool) *TasksListService { + s.detailed = &detailed + return s +} + +// NodeId is a list of node IDs or names to limit the returned information; +// use `_local` to return information from the node you're connecting to, +// leave empty to get information from all nodes. +func (s *TasksListService) NodeId(nodeId ...string) *TasksListService { + s.nodeId = append(s.nodeId, nodeId...) + return s +} + +// ParentNode returns tasks with specified parent node. +func (s *TasksListService) ParentNode(parentNode string) *TasksListService { + s.parentNode = parentNode + return s +} + +// ParentTask returns tasks with specified parent task id. Set to -1 to return all. +func (s *TasksListService) ParentTask(parentTask int64) *TasksListService { + s.parentTask = &parentTask + return s +} + +// WaitForCompletion indicates whether to wait for the matching tasks +// to complete (default: false). +func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService { + s.waitForCompletion = &waitForCompletion + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TasksListService) Pretty(pretty bool) *TasksListService { + s.pretty = pretty + return s +} + +// buildURL builds the URL for the operation. +func (s *TasksListService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.taskId) > 0 { + var tasks []string + for _, taskId := range s.taskId { + tasks = append(tasks, fmt.Sprintf("%d", taskId)) + } + path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{ + "task_id": strings.Join(tasks, ","), + }) + } else { + path = "/_tasks" + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.actions) > 0 { + params.Set("actions", strings.Join(s.actions, ",")) + } + if s.detailed != nil { + params.Set("detailed", fmt.Sprintf("%v", *s.detailed)) + } + if len(s.nodeId) > 0 { + params.Set("node_id", strings.Join(s.nodeId, ",")) + } + if s.parentNode != "" { + params.Set("parent_node", s.parentNode) + } + if s.parentTask != nil { + params.Set("parent_task", fmt.Sprintf("%v", *s.parentTask)) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TasksListService) Validate() error { + return nil +} + +// Do executes the operation. +func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, nil) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TasksListResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// TasksListResponse is the response of TasksListService.Do. +type TasksListResponse struct { + TaskFailures []*TaskOperationFailure `json:"task_failures"` + NodeFailures []*FailedNodeException `json:"node_failures"` + // Nodes returns the tasks per node. The key is the node id. + Nodes map[string]*DiscoveryNode `json:"nodes"` +} + +type TaskOperationFailure struct { + TaskId int64 `json:"task_id"` + NodeId string `json:"node_id"` + Status string `json:"status"` + Reason *ErrorDetails `json:"reason"` +} + +type FailedNodeException struct { + *ErrorDetails + NodeId string `json:"node_id"` +} + +type DiscoveryNode struct { + Name string `json:"name"` + TransportAddress string `json:"transport_address"` + Host string `json:"host"` + IP string `json:"ip"` + Attributes map[string]interface{} `json:"attributes"` + // Tasks returns the tasks by its id (as a string). + Tasks map[string]*TaskInfo `json:"tasks"` +} + +type TaskInfo struct { + Node string `json:"node"` + Id int64 `json:"id"` // the task id + Type string `json:"type"` + Action string `json:"action"` + Status interface{} `json:"status"` + Description interface{} `json:"description"` + StartTime string `json:"start_time"` + StartTimeInMillis int64 `json:"start_time_in_millis"` + RunningTime string `json:"running_time"` + RunningTimeInNanos int64 `json:"running_time_in_nanos"` + ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356" +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/termvectors.go b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go new file mode 100644 index 0000000000000000000000000000000000000000..627ee11db3a071964b51e343d111369995172687 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/termvectors.go @@ -0,0 +1,460 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// TermvectorsService returns information and statistics on terms in the +// fields of a particular document. The document could be stored in the +// index or artificially provided by the user. +// +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-termvectors.html +// for documentation. +type TermvectorsService struct { + client *Client + pretty bool + id string + index string + typ string + dfs *bool + doc interface{} + fieldStatistics *bool + fields []string + filter *TermvectorsFilterSettings + perFieldAnalyzer map[string]string + offsets *bool + parent string + payloads *bool + positions *bool + preference string + realtime *bool + routing string + termStatistics *bool + version interface{} + versionType string + bodyJson interface{} + bodyString string +} + +// NewTermvectorsService creates a new TermvectorsService. +func NewTermvectorsService(client *Client) *TermvectorsService { + return &TermvectorsService{ + client: client, + } +} + +// Index in which the document resides. +func (s *TermvectorsService) Index(index string) *TermvectorsService { + s.index = index + return s +} + +// Type of the document. +func (s *TermvectorsService) Type(typ string) *TermvectorsService { + s.typ = typ + return s +} + +// Id of the document. +func (s *TermvectorsService) Id(id string) *TermvectorsService { + s.id = id + return s +} + +// Dfs specifies if distributed frequencies should be returned instead +// shard frequencies. +func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { + s.dfs = &dfs + return s +} + +// Doc is the document to analyze. +func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { + s.doc = doc + return s +} + +// FieldStatistics specifies if document count, sum of document frequencies +// and sum of total term frequencies should be returned. +func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { + s.fieldStatistics = &fieldStatistics + return s +} + +// Fields a list of fields to return. +func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { + if s.fields == nil { + s.fields = make([]string, 0) + } + s.fields = append(s.fields, fields...) + return s +} + +// Filter adds terms filter settings. +func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { + s.filter = filter + return s +} + +// PerFieldAnalyzer allows to specify a different analyzer than the one +// at the field. +func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { + s.perFieldAnalyzer = perFieldAnalyzer + return s +} + +// Offsets specifies if term offsets should be returned. +func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { + s.offsets = &offsets + return s +} + +// Parent id of documents. +func (s *TermvectorsService) Parent(parent string) *TermvectorsService { + s.parent = parent + return s +} + +// Payloads specifies if term payloads should be returned. +func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { + s.payloads = &payloads + return s +} + +// Positions specifies if term positions should be returned. +func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { + s.positions = &positions + return s +} + +// Preference specify the node or shard the operation +// should be performed on (default: random). +func (s *TermvectorsService) Preference(preference string) *TermvectorsService { + s.preference = preference + return s +} + +// Realtime specifies if request is real-time as opposed to +// near-real-time (default: true). +func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { + s.realtime = &realtime + return s +} + +// Routing is a specific routing value. +func (s *TermvectorsService) Routing(routing string) *TermvectorsService { + s.routing = routing + return s +} + +// TermStatistics specifies if total term frequency and document frequency +// should be returned. +func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { + s.termStatistics = &termStatistics + return s +} + +// Version an explicit version number for concurrency control. +func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { + s.version = version + return s +} + +// VersionType specifies a version type ("internal", "external", "external_gte", or "force"). +func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { + s.versionType = versionType + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { + s.pretty = pretty + return s +} + +// BodyJson defines the body parameters. See documentation. +func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { + s.bodyJson = body + return s +} + +// BodyString defines the body parameters as a string. See documentation. +func (s *TermvectorsService) BodyString(body string) *TermvectorsService { + s.bodyString = body + return s +} + +// buildURL builds the URL for the operation. +func (s *TermvectorsService) buildURL() (string, url.Values, error) { + var pathParam = map[string]string{ + "index": s.index, + "type": s.typ, + } + var path string + var err error + + // Build URL + if s.id != "" { + pathParam["id"] = s.id + path, err = uritemplates.Expand("/{index}/{type}/{id}/_termvectors", pathParam) + } else { + path, err = uritemplates.Expand("/{index}/{type}/_termvectors", pathParam) + } + + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if s.dfs != nil { + params.Set("dfs", fmt.Sprintf("%v", *s.dfs)) + } + if s.fieldStatistics != nil { + params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) + } + if len(s.fields) > 0 { + params.Set("fields", strings.Join(s.fields, ",")) + } + if s.offsets != nil { + params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) + } + if s.parent != "" { + params.Set("parent", s.parent) + } + if s.payloads != nil { + params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) + } + if s.positions != nil { + params.Set("positions", fmt.Sprintf("%v", *s.positions)) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.realtime != nil { + params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) + } + if s.routing != "" { + params.Set("routing", s.routing) + } + if s.termStatistics != nil { + params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", s.version)) + } + if s.versionType != "" { + params.Set("version_type", s.versionType) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *TermvectorsService) Validate() error { + var invalid []string + if s.index == "" { + invalid = append(invalid, "Index") + } + if s.typ == "" { + invalid = append(invalid, "Type") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// Do executes the operation. +func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + var body interface{} + if s.bodyJson != nil { + body = s.bodyJson + } else if s.bodyString != "" { + body = s.bodyString + } else { + data := make(map[string]interface{}) + if s.doc != nil { + data["doc"] = s.doc + } + if len(s.perFieldAnalyzer) > 0 { + data["per_field_analyzer"] = s.perFieldAnalyzer + } + if s.filter != nil { + src, err := s.filter.Source() + if err != nil { + return nil, err + } + data["filter"] = src + } + if len(data) > 0 { + body = data + } + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "GET", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response + ret := new(TermvectorsResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// -- Filter settings -- + +// TermvectorsFilterSettings adds additional filters to a Termsvector request. +// It allows to filter terms based on their tf-idf scores. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-termvectors.html#_terms_filtering +// for more information. +type TermvectorsFilterSettings struct { + maxNumTerms *int64 + minTermFreq *int64 + maxTermFreq *int64 + minDocFreq *int64 + maxDocFreq *int64 + minWordLength *int64 + maxWordLength *int64 +} + +// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. +func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { + return &TermvectorsFilterSettings{} +} + +// MaxNumTerms specifies the maximum number of terms the must be returned per field. +func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { + fs.maxNumTerms = &value + return fs +} + +// MinTermFreq ignores words with less than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { + fs.minTermFreq = &value + return fs +} + +// MaxTermFreq ignores words with more than this frequency in the source doc. +func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { + fs.maxTermFreq = &value + return fs +} + +// MinDocFreq ignores terms which do not occur in at least this many docs. +func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { + fs.minDocFreq = &value + return fs +} + +// MaxDocFreq ignores terms which occur in more than this many docs. +func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { + fs.maxDocFreq = &value + return fs +} + +// MinWordLength specifies the minimum word length below which words will be ignored. +func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { + fs.minWordLength = &value + return fs +} + +// MaxWordLength specifies the maximum word length above which words will be ignored. +func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { + fs.maxWordLength = &value + return fs +} + +// Source returns JSON for the query. +func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { + source := make(map[string]interface{}) + if fs.maxNumTerms != nil { + source["max_num_terms"] = *fs.maxNumTerms + } + if fs.minTermFreq != nil { + source["min_term_freq"] = *fs.minTermFreq + } + if fs.maxTermFreq != nil { + source["max_term_freq"] = *fs.maxTermFreq + } + if fs.minDocFreq != nil { + source["min_doc_freq"] = *fs.minDocFreq + } + if fs.maxDocFreq != nil { + source["max_doc_freq"] = *fs.maxDocFreq + } + if fs.minWordLength != nil { + source["min_word_length"] = *fs.minWordLength + } + if fs.maxWordLength != nil { + source["max_word_length"] = *fs.maxWordLength + } + return source, nil +} + +// -- Response types -- + +type TokenInfo struct { + StartOffset int64 `json:"start_offset"` + EndOffset int64 `json:"end_offset"` + Position int64 `json:"position"` + Payload string `json:"payload"` +} + +type TermsInfo struct { + DocFreq int64 `json:"doc_freq"` + Score float64 `json:"score"` + TermFreq int64 `json:"term_freq"` + Ttf int64 `json:"ttf"` + Tokens []TokenInfo `json:"tokens"` +} + +type FieldStatistics struct { + DocCount int64 `json:"doc_count"` + SumDocFreq int64 `json:"sum_doc_freq"` + SumTtf int64 `json:"sum_ttf"` +} + +type TermVectorsFieldInfo struct { + FieldStatistics FieldStatistics `json:"field_statistics"` + Terms map[string]TermsInfo `json:"terms"` +} + +// TermvectorsResponse is the response of TermvectorsService.Do. +type TermvectorsResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id,omitempty"` + Version int `json:"_version"` + Found bool `json:"found"` + Took int64 `json:"took"` + TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update.go b/vendor/gopkg.in/olivere/elastic.v5/update.go new file mode 100644 index 0000000000000000000000000000000000000000..a15d30bb9e3f99c4aef0ed1a381f15c947e2c72f --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update.go @@ -0,0 +1,293 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// UpdateService updates a document in Elasticsearch. +// See https://www.elastic.co/guide/en/elasticsearch/reference/5.2/docs-update.html +// for details. +type UpdateService struct { + client *Client + index string + typ string + id string + routing string + parent string + script *Script + fields []string + version *int64 + versionType string + retryOnConflict *int + refresh string + waitForActiveShards string + upsert interface{} + scriptedUpsert *bool + docAsUpsert *bool + detectNoop *bool + doc interface{} + timeout string + pretty bool +} + +// NewUpdateService creates the service to update documents in Elasticsearch. +func NewUpdateService(client *Client) *UpdateService { + builder := &UpdateService{ + client: client, + fields: make([]string, 0), + } + return builder +} + +// Index is the name of the Elasticsearch index (required). +func (b *UpdateService) Index(name string) *UpdateService { + b.index = name + return b +} + +// Type is the type of the document (required). +func (b *UpdateService) Type(typ string) *UpdateService { + b.typ = typ + return b +} + +// Id is the identifier of the document to update (required). +func (b *UpdateService) Id(id string) *UpdateService { + b.id = id + return b +} + +// Routing specifies a specific routing value. +func (b *UpdateService) Routing(routing string) *UpdateService { + b.routing = routing + return b +} + +// Parent sets the id of the parent document. +func (b *UpdateService) Parent(parent string) *UpdateService { + b.parent = parent + return b +} + +// Script is the script definition. +func (b *UpdateService) Script(script *Script) *UpdateService { + b.script = script + return b +} + +// RetryOnConflict specifies how many times the operation should be retried +// when a conflict occurs (default: 0). +func (b *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { + b.retryOnConflict = &retryOnConflict + return b +} + +// Fields is a list of fields to return in the response. +func (b *UpdateService) Fields(fields ...string) *UpdateService { + b.fields = make([]string, 0, len(fields)) + b.fields = append(b.fields, fields...) + return b +} + +// Version defines the explicit version number for concurrency control. +func (b *UpdateService) Version(version int64) *UpdateService { + b.version = &version + return b +} + +// VersionType is one of "internal" or "force". +func (b *UpdateService) VersionType(versionType string) *UpdateService { + b.versionType = versionType + return b +} + +// Refresh the index after performing the update. +func (b *UpdateService) Refresh(refresh string) *UpdateService { + b.refresh = refresh + return b +} + +// WaitForActiveShards sets the number of shard copies that must be active before +// proceeding with the update operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than +// or equal to the total number of copies for the shard (number of replicas + 1). +func (b *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService { + b.waitForActiveShards = waitForActiveShards + return b +} + +// Doc allows for updating a partial document. +func (b *UpdateService) Doc(doc interface{}) *UpdateService { + b.doc = doc + return b +} + +// Upsert can be used to index the document when it doesn't exist yet. +// Use this e.g. to initialize a document with a default value. +func (b *UpdateService) Upsert(doc interface{}) *UpdateService { + b.upsert = doc + return b +} + +// DocAsUpsert can be used to insert the document if it doesn't already exist. +func (b *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { + b.docAsUpsert = &docAsUpsert + return b +} + +// DetectNoop will instruct Elasticsearch to check if changes will occur +// when updating via Doc. It there aren't any changes, the request will +// turn into a no-op. +func (b *UpdateService) DetectNoop(detectNoop bool) *UpdateService { + b.detectNoop = &detectNoop + return b +} + +// ScriptedUpsert should be set to true if the referenced script +// (defined in Script or ScriptId) should be called to perform an insert. +// The default is false. +func (b *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { + b.scriptedUpsert = &scriptedUpsert + return b +} + +// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". +func (b *UpdateService) Timeout(timeout string) *UpdateService { + b.timeout = timeout + return b +} + +// Pretty instructs to return human readable, prettified JSON. +func (b *UpdateService) Pretty(pretty bool) *UpdateService { + b.pretty = pretty + return b +} + +// url returns the URL part of the document request. +func (b *UpdateService) url() (string, url.Values, error) { + // Build url + path := "/{index}/{type}/{id}/_update" + path, err := uritemplates.Expand(path, map[string]string{ + "index": b.index, + "type": b.typ, + "id": b.id, + }) + if err != nil { + return "", url.Values{}, err + } + + // Parameters + params := make(url.Values) + if b.pretty { + params.Set("pretty", "true") + } + if b.routing != "" { + params.Set("routing", b.routing) + } + if b.parent != "" { + params.Set("parent", b.parent) + } + if b.timeout != "" { + params.Set("timeout", b.timeout) + } + if b.refresh != "" { + params.Set("refresh", b.refresh) + } + if b.waitForActiveShards != "" { + params.Set("wait_for_active_shards", b.waitForActiveShards) + } + if len(b.fields) > 0 { + params.Set("fields", strings.Join(b.fields, ",")) + } + if b.version != nil { + params.Set("version", fmt.Sprintf("%d", *b.version)) + } + if b.versionType != "" { + params.Set("version_type", b.versionType) + } + if b.retryOnConflict != nil { + params.Set("retry_on_conflict", fmt.Sprintf("%v", *b.retryOnConflict)) + } + + return path, params, nil +} + +// body returns the body part of the document request. +func (b *UpdateService) body() (interface{}, error) { + source := make(map[string]interface{}) + + if b.script != nil { + src, err := b.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + + if b.scriptedUpsert != nil { + source["scripted_upsert"] = *b.scriptedUpsert + } + + if b.upsert != nil { + source["upsert"] = b.upsert + } + + if b.doc != nil { + source["doc"] = b.doc + } + if b.docAsUpsert != nil { + source["doc_as_upsert"] = *b.docAsUpsert + } + if b.detectNoop != nil { + source["detect_noop"] = *b.detectNoop + } + + return source, nil +} + +// Do executes the update operation. +func (b *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) { + path, params, err := b.url() + if err != nil { + return nil, err + } + + // Get body of the request + body, err := b.body() + if err != nil { + return nil, err + } + + // Get response + res, err := b.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return result + ret := new(UpdateResponse) + if err := b.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} + +// UpdateResponse is the result of updating a document in Elasticsearch. +type UpdateResponse struct { + Index string `json:"_index"` + Type string `json:"_type"` + Id string `json:"_id"` + Version int `json:"_version"` + Created bool `json:"created"` + GetResult *GetResult `json:"get"` +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go new file mode 100644 index 0000000000000000000000000000000000000000..a4e7bf25b8180074cb3459d3c52810e2fe631f42 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/update_by_query.go @@ -0,0 +1,651 @@ +// Copyright 2012-present Oliver Eilhard. All rights reserved. +// Use of this source code is governed by a MIT-license. +// See http://olivere.mit-license.org/license.txt for details. + +package elastic + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/context" + + "gopkg.in/olivere/elastic.v5/uritemplates" +) + +// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. +type UpdateByQueryService struct { + client *Client + pretty bool + index []string + typ []string + script *Script + query Query + body interface{} + xSource []string + xSourceExclude []string + xSourceInclude []string + allowNoIndices *bool + analyzeWildcard *bool + analyzer string + conflicts string + defaultOperator string + docvalueFields []string + df string + expandWildcards string + explain *bool + fielddataFields []string + from *int + ignoreUnavailable *bool + lenient *bool + lowercaseExpandedTerms *bool + pipeline string + preference string + q string + refresh string + requestCache *bool + requestsPerSecond *int + routing []string + scroll string + scrollSize *int + searchTimeout string + searchType string + size *int + sort []string + stats []string + storedFields []string + suggestField string + suggestMode string + suggestSize *int + suggestText string + terminateAfter *int + timeout string + trackScores *bool + version *bool + versionType *bool + waitForActiveShards string + waitForCompletion *bool +} + +// NewUpdateByQueryService creates a new UpdateByQueryService. +func NewUpdateByQueryService(client *Client) *UpdateByQueryService { + return &UpdateByQueryService{ + client: client, + } +} + +// Index is a list of index names to search; use `_all` or empty string to +// perform the operation on all indices. +func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { + s.index = append(s.index, index...) + return s +} + +// Type is a list of document types to search; leave empty to perform +// the operation on all types. +func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { + s.typ = append(s.typ, typ...) + return s +} + +// Pretty indicates that the JSON response be indented and human readable. +func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { + s.pretty = pretty + return s +} + +// Script sets an update script. +func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { + s.script = script + return s +} + +// Body specifies the body of the request. It overrides data being specified via +// SearchService or Script. +func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService { + s.body = body + return s +} + +// XSource is true or false to return the _source field or not, +// or a list of fields to return. +func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService { + s.xSource = append(s.xSource, xSource...) + return s +} + +// XSourceExclude represents a list of fields to exclude from the returned _source field. +func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService { + s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) + return s +} + +// XSourceInclude represents a list of fields to extract and return from the _source field. +func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService { + s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) + return s +} + +// AllowNoIndices indicates whether to ignore if a wildcard indices expression +// resolves into no concrete indices. (This includes `_all` string or when +// no indices have been specified). +func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService { + s.allowNoIndices = &allowNoIndices + return s +} + +// AnalyzeWildcard specifies whether wildcard and prefix queries should be +// analyzed (default: false). +func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService { + s.analyzeWildcard = &analyzeWildcard + return s +} + +// Analyzer specifies the analyzer to use for the query string. +func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService { + s.analyzer = analyzer + return s +} + +// Conflicts indicates what to do when the process detects version conflicts. +// Possible values are "proceed" and "abort". +func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService { + s.conflicts = conflicts + return s +} + +// AbortOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("abort"). +func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService { + s.conflicts = "abort" + return s +} + +// ProceedOnVersionConflict aborts the request on version conflicts. +// It is an alias to setting Conflicts("proceed"). +func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService { + s.conflicts = "proceed" + return s +} + +// DefaultOperator is the default operator for query string query (AND or OR). +func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService { + s.defaultOperator = defaultOperator + return s +} + +// DF specifies the field to use as default where no field prefix is given in the query string. +func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService { + s.df = df + return s +} + +// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. +func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService { + s.docvalueFields = docvalueFields + return s +} + +// ExpandWildcards indicates whether to expand wildcard expression to +// concrete indices that are open, closed or both. +func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService { + s.expandWildcards = expandWildcards + return s +} + +// Explain specifies whether to return detailed information about score +// computation as part of a hit. +func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService { + s.explain = &explain + return s +} + +// FielddataFields is a list of fields to return as the field data +// representation of a field for each hit. +func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService { + s.fielddataFields = append(s.fielddataFields, fielddataFields...) + return s +} + +// From is the starting offset (default: 0). +func (s *UpdateByQueryService) From(from int) *UpdateByQueryService { + s.from = &from + return s +} + +// IgnoreUnavailable indicates whether specified concrete indices should be +// ignored when unavailable (missing or closed). +func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService { + s.ignoreUnavailable = &ignoreUnavailable + return s +} + +// Lenient specifies whether format-based query failures +// (such as providing text to a numeric field) should be ignored. +func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService { + s.lenient = &lenient + return s +} + +// LowercaseExpandedTerms specifies whether query terms should be lowercased. +func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService { + s.lowercaseExpandedTerms = &lowercaseExpandedTerms + return s +} + +// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none). +func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService { + s.pipeline = pipeline + return s +} + +// Preference specifies the node or shard the operation should be performed on +// (default: random). +func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService { + s.preference = preference + return s +} + +// Q specifies the query in the Lucene query string syntax. +func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService { + s.q = q + return s +} + +// Query sets a query definition using the Query DSL. +func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { + s.query = query + return s +} + +// Refresh indicates whether the effected indexes should be refreshed. +func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService { + s.refresh = refresh + return s +} + +// RequestCache specifies if request cache should be used for this request +// or not, defaults to index level setting. +func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService { + s.requestCache = &requestCache + return s +} + +// RequestsPerSecond sets the throttle on this request in sub-requests per second. +// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. +func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService { + s.requestsPerSecond = &requestsPerSecond + return s +} + +// Routing is a list of specific routing values. +func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService { + s.routing = append(s.routing, routing...) + return s +} + +// Scroll specifies how long a consistent view of the index should be maintained +// for scrolled search. +func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService { + s.scroll = scroll + return s +} + +// ScrollSize is the size on the scroll request powering the update_by_query. +func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService { + s.scrollSize = &scrollSize + return s +} + +// SearchTimeout defines an explicit timeout for each search request. +// Defaults to no timeout. +func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService { + s.searchTimeout = searchTimeout + return s +} + +// SearchType is the search operation type. Possible values are +// "query_then_fetch" and "dfs_query_then_fetch". +func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService { + s.searchType = searchType + return s +} + +// Size represents the number of hits to return (default: 10). +func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService { + s.size = &size + return s +} + +// Sort is a list of <field>:<direction> pairs. +func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService { + s.sort = append(s.sort, sort...) + return s +} + +// SortByField adds a sort order. +func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService { + if ascending { + s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) + } else { + s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) + } + return s +} + +// Stats specifies specific tag(s) of the request for logging and statistical purposes. +func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService { + s.stats = append(s.stats, stats...) + return s +} + +// StoredFields specifies the list of stored fields to return as part of a hit. +func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService { + s.storedFields = storedFields + return s +} + +// SuggestField specifies which field to use for suggestions. +func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService { + s.suggestField = suggestField + return s +} + +// SuggestMode specifies the suggest mode. Possible values are +// "missing", "popular", and "always". +func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService { + s.suggestMode = suggestMode + return s +} + +// SuggestSize specifies how many suggestions to return in response. +func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService { + s.suggestSize = &suggestSize + return s +} + +// SuggestText specifies the source text for which the suggestions should be returned. +func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService { + s.suggestText = suggestText + return s +} + +// TerminateAfter indicates the maximum number of documents to collect +// for each shard, upon reaching which the query execution will terminate early. +func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService { + s.terminateAfter = &terminateAfter + return s +} + +// Timeout is the time each individual bulk request should wait for shards +// that are unavailable. +func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService { + s.timeout = timeout + return s +} + +// TimeoutInMillis sets the timeout in milliseconds. +func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService { + s.timeout = fmt.Sprintf("%dms", timeoutInMillis) + return s +} + +// TrackScores indicates whether to calculate and return scores even if +// they are not used for sorting. +func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService { + s.trackScores = &trackScores + return s +} + +// Version specifies whether to return document version as part of a hit. +func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService { + s.version = &version + return s +} + +// VersionType indicates if the document increment the version number (internal) +// on hit or not (reindex). +func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService { + s.versionType = &versionType + return s +} + +// WaitForActiveShards sets the number of shard copies that must be active before proceeding +// with the update by query operation. Defaults to 1, meaning the primary shard only. +// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal +// to the total number of copies for the shard (number of replicas + 1). +func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService { + s.waitForActiveShards = waitForActiveShards + return s +} + +// WaitForCompletion indicates if the request should block until the reindex is complete. +func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { + s.waitForCompletion = &waitForCompletion + return s +} + +// buildURL builds the URL for the operation. +func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { + // Build URL + var err error + var path string + if len(s.typ) > 0 { + path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + "type": strings.Join(s.typ, ","), + }) + } else { + path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{ + "index": strings.Join(s.index, ","), + }) + } + if err != nil { + return "", url.Values{}, err + } + + // Add query string parameters + params := url.Values{} + if s.pretty { + params.Set("pretty", "1") + } + if len(s.xSource) > 0 { + params.Set("_source", strings.Join(s.xSource, ",")) + } + if len(s.xSourceExclude) > 0 { + params.Set("_source_exclude", strings.Join(s.xSourceExclude, ",")) + } + if len(s.xSourceInclude) > 0 { + params.Set("_source_include", strings.Join(s.xSourceInclude, ",")) + } + if s.allowNoIndices != nil { + params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) + } + if s.analyzer != "" { + params.Set("analyzer", s.analyzer) + } + if s.analyzeWildcard != nil { + params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) + } + if s.conflicts != "" { + params.Set("conflicts", s.conflicts) + } + if s.defaultOperator != "" { + params.Set("default_operator", s.defaultOperator) + } + if s.df != "" { + params.Set("df", s.df) + } + if s.expandWildcards != "" { + params.Set("expand_wildcards", s.expandWildcards) + } + if s.explain != nil { + params.Set("explain", fmt.Sprintf("%v", *s.explain)) + } + if len(s.storedFields) > 0 { + params.Set("stored_fields", strings.Join(s.storedFields, ",")) + } + if len(s.docvalueFields) > 0 { + params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) + } + if len(s.fielddataFields) > 0 { + params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) + } + if s.from != nil { + params.Set("from", fmt.Sprintf("%d", *s.from)) + } + if s.ignoreUnavailable != nil { + params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) + } + if s.lenient != nil { + params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) + } + if s.lowercaseExpandedTerms != nil { + params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) + } + if s.pipeline != "" { + params.Set("pipeline", s.pipeline) + } + if s.preference != "" { + params.Set("preference", s.preference) + } + if s.q != "" { + params.Set("q", s.q) + } + if s.refresh != "" { + params.Set("refresh", s.refresh) + } + if s.requestCache != nil { + params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) + } + if len(s.routing) > 0 { + params.Set("routing", strings.Join(s.routing, ",")) + } + if s.scroll != "" { + params.Set("scroll", s.scroll) + } + if s.scrollSize != nil { + params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) + } + if s.searchTimeout != "" { + params.Set("search_timeout", s.searchTimeout) + } + if s.searchType != "" { + params.Set("search_type", s.searchType) + } + if s.size != nil { + params.Set("size", fmt.Sprintf("%d", *s.size)) + } + if len(s.sort) > 0 { + params.Set("sort", strings.Join(s.sort, ",")) + } + if len(s.stats) > 0 { + params.Set("stats", strings.Join(s.stats, ",")) + } + if s.suggestField != "" { + params.Set("suggest_field", s.suggestField) + } + if s.suggestMode != "" { + params.Set("suggest_mode", s.suggestMode) + } + if s.suggestSize != nil { + params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) + } + if s.suggestText != "" { + params.Set("suggest_text", s.suggestText) + } + if s.terminateAfter != nil { + params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) + } + if s.timeout != "" { + params.Set("timeout", s.timeout) + } + if s.trackScores != nil { + params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) + } + if s.version != nil { + params.Set("version", fmt.Sprintf("%v", *s.version)) + } + if s.versionType != nil { + params.Set("version_type", fmt.Sprintf("%v", *s.versionType)) + } + if s.waitForActiveShards != "" { + params.Set("wait_for_active_shards", s.waitForActiveShards) + } + if s.waitForCompletion != nil { + params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) + } + if s.requestsPerSecond != nil { + params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) + } + return path, params, nil +} + +// Validate checks if the operation is valid. +func (s *UpdateByQueryService) Validate() error { + var invalid []string + if len(s.index) == 0 { + invalid = append(invalid, "Index") + } + if len(invalid) > 0 { + return fmt.Errorf("missing required fields: %v", invalid) + } + return nil +} + +// getBody returns the body part of the document request. +func (s *UpdateByQueryService) getBody() (interface{}, error) { + if s.body != nil { + return s.body, nil + } + source := make(map[string]interface{}) + if s.script != nil { + src, err := s.script.Source() + if err != nil { + return nil, err + } + source["script"] = src + } + if s.query != nil { + src, err := s.query.Source() + if err != nil { + return nil, err + } + source["query"] = src + } + return source, nil +} + +// Do executes the operation. +func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { + // Check pre-conditions + if err := s.Validate(); err != nil { + return nil, err + } + + // Get URL for request + path, params, err := s.buildURL() + if err != nil { + return nil, err + } + + // Setup HTTP request body + body, err := s.getBody() + if err != nil { + return nil, err + } + + // Get HTTP response + res, err := s.client.PerformRequest(ctx, "POST", path, params, body) + if err != nil { + return nil, err + } + + // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery) + ret := new(BulkIndexByScrollResponse) + if err := s.client.decoder.Decode(res.Body, ret); err != nil { + return nil, err + } + return ret, nil +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..de9c88cb65cb3bac239115d3a7c015843842d5e2 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/LICENSE @@ -0,0 +1,18 @@ +Copyright (c) 2013 Joshua Tacoma + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go new file mode 100644 index 0000000000000000000000000000000000000000..8a84813fe52e3930f07f599b8b228c80201671e0 --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/uritemplates.go @@ -0,0 +1,359 @@ +// Copyright 2013 Joshua Tacoma. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uritemplates is a level 4 implementation of RFC 6570 (URI +// Template, http://tools.ietf.org/html/rfc6570). +// +// To use uritemplates, parse a template string and expand it with a value +// map: +// +// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") +// values := make(map[string]interface{}) +// values["user"] = "jtacoma" +// values["repo"] = "uritemplates" +// expanded, _ := template.ExpandString(values) +// fmt.Printf(expanded) +// +package uritemplates + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") + reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") + validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") + hex = []byte("0123456789ABCDEF") +) + +func pctEncode(src []byte) []byte { + dst := make([]byte, len(src)*3) + for i, b := range src { + buf := dst[i*3 : i*3+3] + buf[0] = 0x25 + buf[1] = hex[b/16] + buf[2] = hex[b%16] + } + return dst +} + +func escape(s string, allowReserved bool) (escaped string) { + if allowReserved { + escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) + } else { + escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) + } + return escaped +} + +// A UriTemplate is a parsed representation of a URI template. +type UriTemplate struct { + raw string + parts []templatePart +} + +// Parse parses a URI template string into a UriTemplate object. +func Parse(rawtemplate string) (template *UriTemplate, err error) { + template = new(UriTemplate) + template.raw = rawtemplate + split := strings.Split(rawtemplate, "{") + template.parts = make([]templatePart, len(split)*2-1) + for i, s := range split { + if i == 0 { + if strings.Contains(s, "}") { + err = errors.New("unexpected }") + break + } + template.parts[i].raw = s + } else { + subsplit := strings.Split(s, "}") + if len(subsplit) != 2 { + err = errors.New("malformed template") + break + } + expression := subsplit[0] + template.parts[i*2-1], err = parseExpression(expression) + if err != nil { + break + } + template.parts[i*2].raw = subsplit[1] + } + } + if err != nil { + template = nil + } + return template, err +} + +type templatePart struct { + raw string + terms []templateTerm + first string + sep string + named bool + ifemp string + allowReserved bool +} + +type templateTerm struct { + name string + explode bool + truncate int +} + +func parseExpression(expression string) (result templatePart, err error) { + switch expression[0] { + case '+': + result.sep = "," + result.allowReserved = true + expression = expression[1:] + case '.': + result.first = "." + result.sep = "." + expression = expression[1:] + case '/': + result.first = "/" + result.sep = "/" + expression = expression[1:] + case ';': + result.first = ";" + result.sep = ";" + result.named = true + expression = expression[1:] + case '?': + result.first = "?" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '&': + result.first = "&" + result.sep = "&" + result.named = true + result.ifemp = "=" + expression = expression[1:] + case '#': + result.first = "#" + result.sep = "," + result.allowReserved = true + expression = expression[1:] + default: + result.sep = "," + } + rawterms := strings.Split(expression, ",") + result.terms = make([]templateTerm, len(rawterms)) + for i, raw := range rawterms { + result.terms[i], err = parseTerm(raw) + if err != nil { + break + } + } + return result, err +} + +func parseTerm(term string) (result templateTerm, err error) { + if strings.HasSuffix(term, "*") { + result.explode = true + term = term[:len(term)-1] + } + split := strings.Split(term, ":") + if len(split) == 1 { + result.name = term + } else if len(split) == 2 { + result.name = split[0] + var parsed int64 + parsed, err = strconv.ParseInt(split[1], 10, 0) + result.truncate = int(parsed) + } else { + err = errors.New("multiple colons in same term") + } + if !validname.MatchString(result.name) { + err = errors.New("not a valid name: " + result.name) + } + if result.explode && result.truncate > 0 { + err = errors.New("both explode and prefix modifers on same term") + } + return result, err +} + +// Expand expands a URI template with a set of values to produce a string. +func (self *UriTemplate) Expand(value interface{}) (string, error) { + values, ismap := value.(map[string]interface{}) + if !ismap { + if m, ismap := struct2map(value); !ismap { + return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") + } else { + return self.Expand(m) + } + } + var buf bytes.Buffer + for _, p := range self.parts { + err := p.expand(&buf, values) + if err != nil { + return "", err + } + } + return buf.String(), nil +} + +func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { + if len(self.raw) > 0 { + buf.WriteString(self.raw) + return nil + } + var zeroLen = buf.Len() + buf.WriteString(self.first) + var firstLen = buf.Len() + for _, term := range self.terms { + value, exists := values[term.name] + if !exists { + continue + } + if buf.Len() != firstLen { + buf.WriteString(self.sep) + } + switch v := value.(type) { + case string: + self.expandString(buf, term, v) + case []interface{}: + self.expandArray(buf, term, v) + case map[string]interface{}: + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, v) + default: + if m, ismap := struct2map(value); ismap { + if term.truncate > 0 { + return errors.New("cannot truncate a map expansion") + } + self.expandMap(buf, term, m) + } else { + str := fmt.Sprintf("%v", value) + self.expandString(buf, term, str) + } + } + } + if buf.Len() == firstLen { + original := buf.Bytes()[:zeroLen] + buf.Reset() + buf.Write(original) + } + return nil +} + +func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { + if self.named { + buf.WriteString(name) + if empty { + buf.WriteString(self.ifemp) + } else { + buf.WriteString("=") + } + } +} + +func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + self.expandName(buf, t.name, len(s) == 0) + buf.WriteString(escape(s, self.allowReserved)) +} + +func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { + if len(a) == 0 { + return + } else if !t.explode { + self.expandName(buf, t.name, false) + } + for i, value := range a { + if t.explode && i > 0 { + buf.WriteString(self.sep) + } else if i > 0 { + buf.WriteString(",") + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if len(s) > t.truncate && t.truncate > 0 { + s = s[:t.truncate] + } + if self.named && t.explode { + self.expandName(buf, t.name, len(s) == 0) + } + buf.WriteString(escape(s, self.allowReserved)) + } +} + +func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { + if len(m) == 0 { + return + } + if !t.explode { + self.expandName(buf, t.name, len(m) == 0) + } + var firstLen = buf.Len() + for k, value := range m { + if firstLen != buf.Len() { + if t.explode { + buf.WriteString(self.sep) + } else { + buf.WriteString(",") + } + } + var s string + switch v := value.(type) { + case string: + s = v + default: + s = fmt.Sprintf("%v", v) + } + if t.explode { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune('=') + buf.WriteString(escape(s, self.allowReserved)) + } else { + buf.WriteString(escape(k, self.allowReserved)) + buf.WriteRune(',') + buf.WriteString(escape(s, self.allowReserved)) + } + } +} + +func struct2map(v interface{}) (map[string]interface{}, bool) { + value := reflect.ValueOf(v) + switch value.Type().Kind() { + case reflect.Ptr: + return struct2map(value.Elem().Interface()) + case reflect.Struct: + m := make(map[string]interface{}) + for i := 0; i < value.NumField(); i++ { + tag := value.Type().Field(i).Tag + var name string + if strings.Contains(string(tag), ":") { + name = tag.Get("uri") + } else { + name = strings.TrimSpace(string(tag)) + } + if len(name) == 0 { + name = value.Type().Field(i).Name + } + m[name] = value.Field(i).Interface() + } + return m, true + } + return nil, false +} diff --git a/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go new file mode 100644 index 0000000000000000000000000000000000000000..399ef4623698be7b00eaddbb84608355e9ae0bed --- /dev/null +++ b/vendor/gopkg.in/olivere/elastic.v5/uritemplates/utils.go @@ -0,0 +1,13 @@ +package uritemplates + +func Expand(path string, expansions map[string]string) (string, error) { + template, err := Parse(path) + if err != nil { + return "", err + } + values := make(map[string]interface{}) + for k, v := range expansions { + values[k] = v + } + return template.Expand(values) +} diff --git a/vendor/gopkg.in/src-d/go-billy.v2/LICENSE b/vendor/gopkg.in/src-d/go-billy.v2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..853832aed69ee0116bda3d6500b7f1bd1eabf741 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-billy.v2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2016 Sourced Technologies S.L. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/gopkg.in/src-d/go-billy.v2/Makefile b/vendor/gopkg.in/src-d/go-billy.v2/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..19e743378c268a7a51957c103a8c9b5f9d753ccb --- /dev/null +++ b/vendor/gopkg.in/src-d/go-billy.v2/Makefile @@ -0,0 +1,25 @@ +# General +WORKDIR = $(PWD) + +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test -v + +# Coverage +COVERAGE_REPORT = coverage.txt +COVERAGE_PROFILE = profile.out +COVERAGE_MODE = atomic + +test-coverage: + cd $(WORKDIR); \ + echo "" > $(COVERAGE_REPORT); \ + for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \ + $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \ + if [ $$? != 0 ]; then \ + exit 2; \ + fi; \ + if [ -f $(COVERAGE_PROFILE) ]; then \ + cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \ + rm $(COVERAGE_PROFILE); \ + fi; \ + done; \ diff --git a/vendor/gopkg.in/src-d/go-billy.v2/README.md b/vendor/gopkg.in/src-d/go-billy.v2/README.md new file mode 100644 index 0000000000000000000000000000000000000000..14bdf34b8b427447da856eb37bbc38552ab3546c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-billy.v2/README.md @@ -0,0 +1,62 @@ +# go-billy [](https://godoc.org/gopkg.in/src-d/go-billy.v2) [](https://travis-ci.org/src-d/go-billy) [](https://codebeat.co/projects/github-com-src-d-go-billy) + +An interface to abstract several storages. + +This library was extracted from +[src-d/go-git](https://github.com/src-d/go-git). + +## Installation + +```go +go get -u gopkg.in/src-d/go-billy.v2/... +``` + +## Why billy? + +The library billy deals with storage systems and Billy is the name of a well-known, IKEA +bookcase. That's it. + +## Usage + +Billy exposes filesystems using the +[`Filesystem` interface](https://godoc.org/github.com/src-d/go-billy#Filesystem). +Each filesystem implementation gives you a `New` method, whose arguments depend on +the implementation itself, that returns a new `Filesystem`. + +The following example caches in memory all readable files in a directory from any +billy's filesystem implementation. + +```go +func LoadToMemory(fs billy.Filesystem, path string) (*memory.Memory, error) { + memory := memory.New() + + files, err := fs.ReadDir("/") + if err != nil { + return nil, err + } + + for _, file := range files { + if !file.IsDir() { + orig, err := fs.Open(file.Name()) + if err != nil { + continue + } + + dest, err := memory.Create(file.Name()) + if err != nil { + continue + } + + if _, err = io.Copy(dest, orig); err != nil { + return nil, err + } + } + } + + return memory, nil +} +``` + +## License + +MIT, see [LICENSE](LICENSE) diff --git a/vendor/gopkg.in/src-d/go-billy.v2/fs.go b/vendor/gopkg.in/src-d/go-billy.v2/fs.go new file mode 100644 index 0000000000000000000000000000000000000000..77cf25f3445d38b9e28fdba4f7f506b67f17df33 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-billy.v2/fs.go @@ -0,0 +1,146 @@ +package billy + +import ( + "errors" + "io" + "os" +) + +var ( + ErrClosed = errors.New("file: Writing on closed file.") + ErrReadOnly = errors.New("this is a read-only filesystem") + ErrNotSupported = errors.New("feature not supported") +) + +// Filesystem abstract the operations in a storage-agnostic interface. +// It allows you to: +// * Create files. +// * Open existing files. +// * Get info about files. +// * List files in a directory. +// * Get a temporal file. +// * Rename files. +// * Remove files. +// * Create directories. +// * Join parts of path. +// * Obtain a filesystem starting on a subdirectory in the current filesystem. +// * Get the base path for the filesystem. +// Each method implementation varies from implementation to implementation. Refer to +// the specific documentation for more info. +type Filesystem interface { + Create(filename string) (File, error) + Open(filename string) (File, error) + OpenFile(filename string, flag int, perm os.FileMode) (File, error) + Stat(filename string) (FileInfo, error) + ReadDir(path string) ([]FileInfo, error) + TempFile(dir, prefix string) (File, error) + Rename(from, to string) error + Remove(filename string) error + MkdirAll(filename string, perm os.FileMode) error + Join(elem ...string) string + Dir(path string) Filesystem + Base() string +} + +// File implements io.Closer, io.Reader, io.Seeker, and io.Writer> +// Provides method to obtain the file name and the state of the file (open or closed). +type File interface { + Filename() string + IsClosed() bool + io.Writer + io.Reader + io.Seeker + io.Closer +} + +type FileInfo os.FileInfo + +type BaseFile struct { + BaseFilename string + Closed bool +} + +func (f *BaseFile) Filename() string { + return f.BaseFilename +} + +func (f *BaseFile) IsClosed() bool { + return f.Closed +} + +type removerAll interface { + RemoveAll(string) error +} + +// RemoveAll removes path and any children it contains. +// It removes everything it can but returns the first error +// it encounters. If the path does not exist, RemoveAll +// returns nil (no error). +func RemoveAll(fs Filesystem, path string) error { + r, ok := fs.(removerAll) + if ok { + return r.RemoveAll(path) + } + + return removeAll(fs, path) +} + +func removeAll(fs Filesystem, path string) error { + // This implementation is adapted from os.RemoveAll. + + // Simple case: if Remove works, we're done. + err := fs.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + + // Otherwise, is this a directory we need to recurse into? + dir, serr := fs.Stat(path) + if serr != nil { + if os.IsNotExist(serr) { + return nil + } + + return serr + } + + if !dir.IsDir() { + // Not a directory; return the error from Remove. + return err + } + + // Directory. + fis, err := fs.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + + return err + } + + // Remove contents & return first error. + err = nil + for _, fi := range fis { + cpath := fs.Join(path, fi.Name()) + err1 := removeAll(fs, cpath) + if err == nil { + err = err1 + } + } + + // Remove directory. + err1 := fs.Remove(path) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + + if err == nil { + err = err1 + } + + return err + +} diff --git a/vendor/gopkg.in/src-d/go-billy.v2/osfs/os.go b/vendor/gopkg.in/src-d/go-billy.v2/osfs/os.go new file mode 100644 index 0000000000000000000000000000000000000000..0475450c15357cd224e1534283a8d311fea83007 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-billy.v2/osfs/os.go @@ -0,0 +1,205 @@ +// Package os provides a billy filesystem for the OS. +package osfs // import "gopkg.in/src-d/go-billy.v2/osfs" + +import ( + "io/ioutil" + "os" + "path" + "path/filepath" + + "gopkg.in/src-d/go-billy.v2" +) + +const ( + defaultDirectoryMode = 0755 + defaultCreateMode = 0666 +) + +// OS is a filesystem based on the os filesystem +type OS struct { + base string +} + +// New returns a new OS filesystem +func New(baseDir string) *OS { + return &OS{ + base: baseDir, + } +} + +// Create creates a file and opens it with standard permissions +// and modes O_RDWR, O_CREATE and O_TRUNC. +func (fs *OS) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +} + +// OpenFile is equivalent to standard os.OpenFile. +// If flag os.O_CREATE is set, all parent directories will be created. +func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + fullpath := path.Join(fs.base, filename) + + if flag&os.O_CREATE != 0 { + if err := fs.createDir(fullpath); err != nil { + return nil, err + } + } + + f, err := os.OpenFile(fullpath, flag, perm) + if err != nil { + return nil, err + } + + filename, err = filepath.Rel(fs.base, fullpath) + if err != nil { + return nil, err + } + + return newOSFile(filename, f), nil +} + +func (fs *OS) createDir(fullpath string) error { + dir := filepath.Dir(fullpath) + if dir != "." { + if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { + return err + } + } + + return nil +} + +// ReadDir returns the filesystem info for all the archives under the specified +// path. +func (ofs *OS) ReadDir(path string) ([]billy.FileInfo, error) { + fullpath := ofs.Join(ofs.base, path) + + l, err := ioutil.ReadDir(fullpath) + if err != nil { + return nil, err + } + + var s = make([]billy.FileInfo, len(l)) + for i, f := range l { + s[i] = f + } + + return s, nil +} + +// Rename moves a file in disk from _from_ to _to_. +func (fs *OS) Rename(from, to string) error { + from = fs.Join(fs.base, from) + to = fs.Join(fs.base, to) + + if err := fs.createDir(to); err != nil { + return err + } + + return os.Rename(from, to) +} + +// MkdirAll creates a directory. +func (fs *OS) MkdirAll(path string, perm os.FileMode) error { + fullpath := fs.Join(fs.base, path) + return os.MkdirAll(fullpath, defaultDirectoryMode) +} + +// Open opens a file in read-only mode. +func (fs *OS) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +// Stat returns the FileInfo structure describing file. +func (fs *OS) Stat(filename string) (billy.FileInfo, error) { + fullpath := fs.Join(fs.base, filename) + return os.Stat(fullpath) +} + +// Remove deletes a file in disk. +func (fs *OS) Remove(filename string) error { + fullpath := fs.Join(fs.base, filename) + return os.Remove(fullpath) +} + +// TempFile creates a new temporal file. +func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { + fullpath := fs.Join(fs.base, dir) + if err := fs.createDir(fullpath + string(os.PathSeparator)); err != nil { + return nil, err + } + + f, err := ioutil.TempFile(fullpath, prefix) + if err != nil { + return nil, err + } + + s, err := f.Stat() + if err != nil { + return nil, err + } + + filename, err := filepath.Rel(fs.base, fs.Join(fullpath, s.Name())) + if err != nil { + return nil, err + } + + return newOSFile(filename, f), nil +} + +// Join joins the specified elements using the filesystem separator. +func (fs *OS) Join(elem ...string) string { + return filepath.Join(elem...) +} + +// Dir returns a new Filesystem from the same type of fs using as baseDir the +// given path +func (fs *OS) Dir(path string) billy.Filesystem { + return New(fs.Join(fs.base, path)) +} + +// Base returns the base path of the filesytem +func (fs *OS) Base() string { + return fs.base +} + +// RemoveAll removes a file or directory recursively. Removes everything it can, +// but returns the first error. +func (fs *OS) RemoveAll(path string) error { + fullpath := fs.Join(fs.base, path) + return os.RemoveAll(fullpath) +} + +// osFile represents a file in the os filesystem +type osFile struct { + billy.BaseFile + file *os.File +} + +func newOSFile(filename string, file *os.File) billy.File { + return &osFile{ + BaseFile: billy.BaseFile{BaseFilename: filename}, + file: file, + } +} + +func (f *osFile) Read(p []byte) (int, error) { + return f.file.Read(p) +} + +func (f *osFile) Seek(offset int64, whence int) (int64, error) { + return f.file.Seek(offset, whence) +} + +func (f *osFile) Write(p []byte) (int, error) { + return f.file.Write(p) +} + +func (f *osFile) Close() error { + f.BaseFile.Closed = true + + return f.file.Close() +} + +func (f *osFile) ReadAt(p []byte, off int64) (int, error) { + return f.file.ReadAt(p, off) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/LICENSE b/vendor/gopkg.in/src-d/go-git.v4/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6a4a6aa1f8304d34440c6944ad7a523548177197 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2015 Sourced Technologies S.L. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/gopkg.in/src-d/go-git.v4/Makefile b/vendor/gopkg.in/src-d/go-git.v4/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d576778f4d295e5b010366ebba8748089d3451ba --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/Makefile @@ -0,0 +1,52 @@ +# General +WORKDIR = $(PWD) + +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test -v + +# Git config +GIT_VERSION ?= +GIT_DIST_PATH ?= $(PWD)/.git-dist +GIT_REPOSITORY = http://github.com/git/git.git + +# Coverage +COVERAGE_REPORT = coverage.txt +COVERAGE_PROFILE = profile.out +COVERAGE_MODE = atomic + +ifneq ($(origin CI), undefined) + WORKDIR := $(GOPATH)/src/gopkg.in/src-d/go-git.v4 +endif + +build-git: + @if [ -f $(GIT_DIST_PATH)/git ]; then \ + echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ + else \ + git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ + cd $(GIT_DIST_PATH); \ + make configure; \ + ./configure; \ + make all; \ + fi + +test: + @cd $(WORKDIR); \ + $(GOTEST) ./... + +test-coverage: + @cd $(WORKDIR); \ + echo "" > $(COVERAGE_REPORT); \ + for dir in `find . -name "*.go" | grep -o '.*/' | sort | uniq`; do \ + $(GOTEST) $$dir -coverprofile=$(COVERAGE_PROFILE) -covermode=$(COVERAGE_MODE); \ + if [ $$? != 0 ]; then \ + exit 2; \ + fi; \ + if [ -f $(COVERAGE_PROFILE) ]; then \ + cat $(COVERAGE_PROFILE) >> $(COVERAGE_REPORT); \ + rm $(COVERAGE_PROFILE); \ + fi; \ + done; \ + +clean: + rm -rf $(GIT_DIST_PATH) \ No newline at end of file diff --git a/vendor/gopkg.in/src-d/go-git.v4/README.md b/vendor/gopkg.in/src-d/go-git.v4/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cd6450f365f60124ab2077e28ad1917fa7c3ab53 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/README.md @@ -0,0 +1,122 @@ +# go-git [](https://godoc.org/github.com/src-d/go-git) [](https://travis-ci.org/src-d/go-git) [](https://codecov.io/github/src-d/go-git) [](https://codebeat.co/projects/github-com-src-d-go-git) + +A highly extensible git implementation in **pure Go**. + +*go-git* aims to reach the completeness of [libgit2](https://libgit2.github.com/) or [jgit](http://www.eclipse.org/jgit/), nowadays covers the **majority** of the plumbing **read operations** and **some** of the main **write operations**, but lacks the main porcelain operations such as merges. + +It is **highly extensible**, we have been following the open/close principle in its design to facilitate extensions, mainly focusing the efforts on the persistence of the objects. + +### ... is this production ready? + +The master branch represents the `v4` of the library, it is currently under active development and is planned to be released in early 2017. + +If you are looking for a production ready version, please take a look to the [`v3`](https://github.com/src-d/go-git/tree/v3) which is being used in production at [source{d}](http://sourced.tech) since August 2015 to analyze all GitHub public repositories (i.e. 16M repositories). + +We recommend the use of `v4` to develop new projects since it includes much new functionality and provides a more *idiomatic git* API + +Installation +------------ + +The recommended way to install *go-git* is: + +``` +go get -u gopkg.in/src-d/go-git.v4/... +``` + + +Examples +-------- + +> Please note that the functions `CheckIfError` and `Info` used in the examples are from the [examples package](https://github.com/src-d/go-git/blob/master/_examples/common.go#L17) just to be used in the examples. + + +### Basic example + +A basic example that mimics the standard `git clone` command + +```go +// Clone the given repository to the given directory +Info("git clone https://github.com/src-d/go-git") + +_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ + URL: "https://github.com/src-d/go-git", + Progress: os.Stdout, +}) + +CheckIfError(err) +``` + +Outputs: +``` +Counting objects: 4924, done. +Compressing objects: 100% (1333/1333), done. +Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 +``` + +### In-memory example + +Cloning a repository into memory and printing the history of HEAD, just like `git log` does + + +```go +// Clones the given repository in memory, creating the remote, the local +// branches and fetching the objects, exactly as: +Info("git clone https://github.com/src-d/go-siva") + +r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ + URL: "https://github.com/src-d/go-siva", +}) + +CheckIfError(err) + +// Gets the HEAD history from HEAD, just like does: +Info("git log") + +// ... retrieves the branch pointed by HEAD +ref, err := r.Head() +CheckIfError(err) + +// ... retrieves the commit object +commit, err := r.Commit(ref.Hash()) +CheckIfError(err) + +// ... retrieves the commit history +history, err := commit.History() +CheckIfError(err) + +// ... just iterates over the commits, printing it +for _, c := range history { + fmt.Println(c) +} +``` + +Outputs: +``` +commit ded8054fd0c3994453e9c8aacaf48d118d42991e +Author: Santiago M. Mola <santi@mola.io> +Date: Sat Nov 12 21:18:41 2016 +0100 + + index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) + +commit df707095626f384ce2dc1a83b30f9a21d69b9dfc +Author: Santiago M. Mola <santi@mola.io> +Date: Fri Nov 11 13:23:22 2016 +0100 + + readwriter: fix bug when writing index. (#10) + + When using ReadWriter on an existing siva file, absolute offset for + index entries was not being calculated correctly. +... +``` + +You can find this [example](_examples/log/main.go) and many other at the [examples](_examples) folder + +Contribute +---------- + +If you are interested on contributing to go-git, open an [issue](https://github.com/src-d/go-git/issues) explaining which missing functionality you want to work in, and we will guide you through the implementation. + +License +------- + +MIT, see [LICENSE](LICENSE) diff --git a/vendor/gopkg.in/src-d/go-git.v4/blame.go b/vendor/gopkg.in/src-d/go-git.v4/blame.go new file mode 100644 index 0000000000000000000000000000000000000000..2f0843d29abe09408ac4e5c14f31e415fba91471 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/blame.go @@ -0,0 +1,287 @@ +package git + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "unicode/utf8" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/object" + "gopkg.in/src-d/go-git.v4/utils/diff" +) + +// BlameResult represents the result of a Blame operation. +type BlameResult struct { + // Path is the path of the File that we're blaming. + Path string + // Rev (Revision) is the hash of the specified Commit used to generate this result. + Rev plumbing.Hash + // Lines contains every line with its authorship. + Lines []*Line +} + +// Blame returns a BlameResult with the information about the last author of +// each line from file `path` at commit `c`. +func Blame(c *object.Commit, path string) (*BlameResult, error) { + // The file to blame is identified by the input arguments: + // commit and path. commit is a Commit object obtained from a Repository. Path + // represents a path to a specific file contained into the repository. + // + // Blaming a file is a two step process: + // + // 1. Create a linear history of the commits affecting a file. We use + // revlist.New for that. + // + // 2. Then build a graph with a node for every line in every file in + // the history of the file. + // + // Each node is assigned a commit: Start by the nodes in the first + // commit. Assign that commit as the creator of all its lines. + // + // Then jump to the nodes in the next commit, and calculate the diff + // between the two files. Newly created lines get + // assigned the new commit as its origin. Modified lines also get + // this new commit. Untouched lines retain the old commit. + // + // All this work is done in the assignOrigin function which holds all + // the internal relevant data in a "blame" struct, that is not + // exported. + // + // TODO: ways to improve the efficiency of this function: + // 1. Improve revlist + // 2. Improve how to traverse the history (example a backward traversal will + // be much more efficient) + // + // TODO: ways to improve the function in general: + // 1. Add memoization between revlist and assign. + // 2. It is using much more memory than needed, see the TODOs below. + + b := new(blame) + b.fRev = c + b.path = path + + // get all the file revisions + if err := b.fillRevs(); err != nil { + return nil, err + } + + // calculate the line tracking graph and fill in + // file contents in data. + if err := b.fillGraphAndData(); err != nil { + return nil, err + } + + file, err := b.fRev.File(b.path) + if err != nil { + return nil, err + } + finalLines, err := file.Lines() + if err != nil { + return nil, err + } + + // Each node (line) holds the commit where it was introduced or + // last modified. To achieve that we use the FORWARD algorithm + // described in Zimmermann, et al. "Mining Version Archives for + // Co-changed Lines", in proceedings of the Mining Software + // Repositories workshop, Shanghai, May 22-23, 2006. + lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) + if err != nil { + return nil, err + } + + return &BlameResult{ + Path: path, + Rev: c.Hash, + Lines: lines, + }, nil +} + +// Line values represent the contents and author of a line in BlamedResult values. +type Line struct { + // Author is the email address of the last author that modified the line. + Author string + // Text is the original text of the line. + Text string +} + +func newLine(author, text string) *Line { + return &Line{ + Author: author, + Text: text, + } +} + +func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { + if len(contents) != len(commits) { + return nil, errors.New("contents and commits have different length") + } + result := make([]*Line, 0, len(contents)) + for i := range contents { + l := newLine(commits[i].Author.Email, contents[i]) + result = append(result, l) + } + return result, nil +} + +// this struct is internally used by the blame function to hold its +// inputs, outputs and state. +type blame struct { + // the path of the file to blame + path string + // the commit of the final revision of the file to blame + fRev *object.Commit + // the chain of revisions affecting the the file to blame + revs []*object.Commit + // the contents of the file across all its revisions + data []string + // the graph of the lines in the file across all the revisions + graph [][]*object.Commit +} + +// calculte the history of a file "path", starting from commit "from", sorted by commit date. +func (b *blame) fillRevs() error { + var err error + + b.revs, err = References(b.fRev, b.path) + if err != nil { + return err + } + return nil +} + +// build graph of a file from its revision history +func (b *blame) fillGraphAndData() error { + //TODO: not all commits are needed, only the current rev and the prev + b.graph = make([][]*object.Commit, len(b.revs)) + b.data = make([]string, len(b.revs)) // file contents in all the revisions + // for every revision of the file, starting with the first + // one... + for i, rev := range b.revs { + // get the contents of the file + file, err := rev.File(b.path) + if err != nil { + return nil + } + b.data[i], err = file.Contents() + if err != nil { + return err + } + nLines := countLines(b.data[i]) + // create a node for each line + b.graph[i] = make([]*object.Commit, nLines) + // assign a commit to each node + // if this is the first revision, then the node is assigned to + // this first commit. + if i == 0 { + for j := 0; j < nLines; j++ { + b.graph[i][j] = (*object.Commit)(b.revs[i]) + } + } else { + // if this is not the first commit, then assign to the old + // commit or to the new one, depending on what the diff + // says. + b.assignOrigin(i, i-1) + } + } + return nil +} + +// sliceGraph returns a slice of commits (one per line) for a particular +// revision of a file (0=first revision). +func (b *blame) sliceGraph(i int) []*object.Commit { + fVs := b.graph[i] + result := make([]*object.Commit, 0, len(fVs)) + for _, v := range fVs { + c := object.Commit(*v) + result = append(result, &c) + } + return result +} + +// Assigns origin to vertexes in current (c) rev from data in its previous (p) +// revision +func (b *blame) assignOrigin(c, p int) { + // assign origin based on diff info + hunks := diff.Do(b.data[p], b.data[c]) + sl := -1 // source line + dl := -1 // destination line + for h := range hunks { + hLines := countLines(hunks[h].Text) + for hl := 0; hl < hLines; hl++ { + switch { + case hunks[h].Type == 0: + sl++ + dl++ + b.graph[c][dl] = b.graph[p][sl] + case hunks[h].Type == 1: + dl++ + b.graph[c][dl] = (*object.Commit)(b.revs[c]) + case hunks[h].Type == -1: + sl++ + default: + panic("unreachable") + } + } + } +} + +// GoString prints the results of a Blame using git-blame's style. +func (b *blame) GoString() string { + var buf bytes.Buffer + + file, err := b.fRev.File(b.path) + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + contents, err := file.Contents() + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + + lines := strings.Split(contents, "\n") + // max line number length + mlnl := len(fmt.Sprintf("%s", strconv.Itoa(len(lines)))) + // max author length + mal := b.maxAuthorLength() + format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", + mal, mlnl) + + fVs := b.graph[len(b.graph)-1] + for ln, v := range fVs { + fmt.Fprintf(&buf, format, v.Hash.String()[:8], + prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) + } + return buf.String() +} + +// utility function to pretty print the author. +func prettyPrintAuthor(c *object.Commit) string { + return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) +} + +// utility function to calculate the number of runes needed +// to print the longest author name in the blame of a file. +func (b *blame) maxAuthorLength() int { + memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) + fVs := b.graph[len(b.graph)-1] + m := 0 + for ln := range fVs { + if _, ok := memo[fVs[ln].Hash]; ok { + continue + } + memo[fVs[ln].Hash] = struct{}{} + m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) + } + return m +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/common.go b/vendor/gopkg.in/src-d/go-git.v4/common.go new file mode 100644 index 0000000000000000000000000000000000000000..f837a2654c174180a4455c0f705e02168576594d --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/common.go @@ -0,0 +1,22 @@ +package git + +import "strings" + +const defaultDotGitPath = ".git" + +// countLines returns the number of lines in a string à la git, this is +// The newline character is assumed to be '\n'. The empty string +// contains 0 lines. If the last line of the string doesn't end with a +// newline, it will still be considered a line. +func countLines(s string) int { + if s == "" { + return 0 + } + + nEOL := strings.Count(s, "\n") + if strings.HasSuffix(s, "\n") { + return nEOL + } + + return nEOL + 1 +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/config.go b/vendor/gopkg.in/src-d/go-git.v4/config/config.go new file mode 100644 index 0000000000000000000000000000000000000000..e8c99a96932c5b3232792970b07debe2b2e3ff9b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/config/config.go @@ -0,0 +1,255 @@ +// Package config contains the abstraction of multiple config files +package config + +import ( + "bytes" + "errors" + "fmt" + + format "gopkg.in/src-d/go-git.v4/plumbing/format/config" +) + +const ( + // DefaultFetchRefSpec is the default refspec used for fetch. + DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" + // DefaultPushRefSpec is the default refspec used for push. + DefaultPushRefSpec = "refs/heads/*:refs/heads/*" +) + +// ConfigStorer generic storage of Config object +type ConfigStorer interface { + Config() (*Config, error) + SetConfig(*Config) error +} + +var ( + ErrInvalid = errors.New("config invalid remote") + ErrRemoteConfigNotFound = errors.New("remote config not found") + ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") + ErrRemoteConfigEmptyName = errors.New("remote config: empty name") +) + +// Config contains the repository configuration +// ftp://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES +type Config struct { + Core struct { + // IsBare if true this repository is assumed to be bare and has no + // working directory associated with it. + IsBare bool + // Worktree is the path to the root of the working tree. + Worktree string + } + // Remotes list of repository remotes, the key of the map is the name + // of the remote, should equal to RemoteConfig.Name. + Remotes map[string]*RemoteConfig + // Submodules list of repository submodules, the key of the map is the name + // of the submodule, should equal to Submodule.Name. + Submodules map[string]*Submodule + + // contains the raw information of a config file, the main goal is preserve + // the parsed information from the original format, to avoid missing + // unsupported features. + raw *format.Config +} + +// NewConfig returns a new empty Config. +func NewConfig() *Config { + return &Config{ + Remotes: make(map[string]*RemoteConfig, 0), + Submodules: make(map[string]*Submodule, 0), + raw: format.New(), + } +} + +// Validate validates the fields and sets the default values. +func (c *Config) Validate() error { + for name, r := range c.Remotes { + if r.Name != name { + return ErrInvalid + } + + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +const ( + remoteSection = "remote" + submoduleSection = "submodule" + coreSection = "core" + fetchKey = "fetch" + urlKey = "url" + bareKey = "bare" + worktreeKey = "worktree" +) + +// Unmarshal parses a git-config file and stores it. +func (c *Config) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + c.raw = format.New() + if err := d.Decode(c.raw); err != nil { + return err + } + + c.unmarshalCore() + c.unmarshalSubmodules() + return c.unmarshalRemotes() +} + +func (c *Config) unmarshalCore() { + s := c.raw.Section(coreSection) + if s.Options.Get(bareKey) == "true" { + c.Core.IsBare = true + } + + c.Core.Worktree = s.Options.Get(worktreeKey) +} + +func (c *Config) unmarshalRemotes() error { + s := c.raw.Section(remoteSection) + for _, sub := range s.Subsections { + r := &RemoteConfig{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.Remotes[r.Name] = r + } + + return nil +} + +func (c *Config) unmarshalSubmodules() { + s := c.raw.Section(submoduleSection) + for _, sub := range s.Subsections { + m := &Submodule{} + m.unmarshal(sub) + + c.Submodules[m.Name] = m + } +} + +// Marshal returns Config encoded as a git-config file. +func (c *Config) Marshal() ([]byte, error) { + c.marshalCore() + c.marshalRemotes() + c.marshalSubmodules() + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(c.raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Config) marshalCore() { + s := c.raw.Section(coreSection) + s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) + + if c.Core.Worktree != "" { + s.SetOption(worktreeKey, c.Core.Worktree) + } +} + +func (c *Config) marshalRemotes() { + s := c.raw.Section(remoteSection) + s.Subsections = make(format.Subsections, len(c.Remotes)) + + var i int + for _, r := range c.Remotes { + s.Subsections[i] = r.marshal() + i++ + } +} + +func (c *Config) marshalSubmodules() { + s := c.raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(c.Submodules)) + + var i int + for _, r := range c.Submodules { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + section.RemoveOption(pathKey) + s.Subsections[i] = section + i++ + } +} + +// RemoteConfig contains the configuration for a given remote repository. +type RemoteConfig struct { + // Name of the remote + Name string + // URL the URL of a remote repository + URL string + // Fetch the default set of "refspec" for fetch operation + Fetch []RefSpec + + // raw representation of the subsection, filled by marshal or unmarshal are + // called + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (c *RemoteConfig) Validate() error { + if c.Name == "" { + return ErrRemoteConfigEmptyName + } + + if c.URL == "" { + return ErrRemoteConfigEmptyURL + } + + for _, r := range c.Fetch { + if err := r.Validate(); err != nil { + return err + } + } + + if len(c.Fetch) == 0 { + c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} + } + + return nil +} + +func (c *RemoteConfig) unmarshal(s *format.Subsection) error { + c.raw = s + + fetch := []RefSpec{} + for _, f := range c.raw.Options.GetAll(fetchKey) { + rs := RefSpec(f) + if err := rs.Validate(); err != nil { + return err + } + + fetch = append(fetch, rs) + } + + c.Name = c.raw.Name + c.URL = c.raw.Option(urlKey) + c.Fetch = fetch + + return nil +} + +func (c *RemoteConfig) marshal() *format.Subsection { + if c.raw == nil { + c.raw = &format.Subsection{} + } + + c.raw.Name = c.Name + c.raw.SetOption(urlKey, c.URL) + for _, rs := range c.Fetch { + c.raw.SetOption(fetchKey, rs.String()) + } + + return c.raw +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/modules.go b/vendor/gopkg.in/src-d/go-git.v4/config/modules.go new file mode 100644 index 0000000000000000000000000000000000000000..24ef304a0237b30bfd02de1d24510b64ff5a6955 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/config/modules.go @@ -0,0 +1,135 @@ +package config + +import ( + "bytes" + "errors" + + format "gopkg.in/src-d/go-git.v4/plumbing/format/config" +) + +var ( + ErrModuleEmptyURL = errors.New("module config: empty URL") + ErrModuleEmptyPath = errors.New("module config: empty path") +) + +// Modules defines the submodules properties, represents a .gitmodules file +// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html +type Modules struct { + // Submodules is a map of submodules being the key the name of the submodule. + Submodules map[string]*Submodule + + raw *format.Config +} + +// NewModules returns a new empty Modules +func NewModules() *Modules { + return &Modules{ + Submodules: make(map[string]*Submodule, 0), + raw: format.New(), + } +} + +const ( + pathKey = "path" + branchKey = "branch" +) + +// Unmarshal parses a git-config file and stores it. +func (m *Modules) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + m.raw = format.New() + if err := d.Decode(m.raw); err != nil { + return err + } + + s := m.raw.Section(submoduleSection) + for _, sub := range s.Subsections { + mod := &Submodule{} + mod.unmarshal(sub) + + m.Submodules[mod.Path] = mod + } + + return nil +} + +// Marshal returns Modules encoded as a git-config file. +func (m *Modules) Marshal() ([]byte, error) { + s := m.raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(m.Submodules)) + + var i int + for _, r := range m.Submodules { + s.Subsections[i] = r.marshal() + i++ + } + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(m.raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Submodule defines a submodule. +type Submodule struct { + // Name module name + Name string + // Path defines the path, relative to the top-level directory of the Git + // working tree. + Path string + // URL defines a URL from which the submodule repository can be cloned. + URL string + // Branch is a remote branch name for tracking updates in the upstream + // submodule. Optional value. + Branch string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (m *Submodule) Validate() error { + if m.Path == "" { + return ErrModuleEmptyPath + } + + if m.URL == "" { + return ErrModuleEmptyURL + } + + return nil +} + +func (m *Submodule) unmarshal(s *format.Subsection) { + m.raw = s + + m.Name = m.raw.Name + m.Path = m.raw.Option(pathKey) + m.URL = m.raw.Option(urlKey) + m.Branch = m.raw.Option(branchKey) +} + +func (m *Submodule) marshal() *format.Subsection { + if m.raw == nil { + m.raw = &format.Subsection{} + } + + m.raw.Name = m.Name + if m.raw.Name == "" { + m.raw.Name = m.Path + } + + m.raw.SetOption(pathKey, m.Path) + m.raw.SetOption(urlKey, m.URL) + + if m.Branch != "" { + m.raw.SetOption(branchKey, m.Branch) + } + + return m.raw +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go b/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go new file mode 100644 index 0000000000000000000000000000000000000000..7e4106ad64f475a2adb309ea90780c03dc8c59eb --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/config/refspec.go @@ -0,0 +1,145 @@ +package config + +import ( + "errors" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +const ( + refSpecWildcard = "*" + refSpecForce = "+" + refSpecSeparator = ":" +) + +var ( + ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") + ErrRefSpecMalformedWildcard = errors.New("malformed refspec, missmatched number of wildcards") +) + +// RefSpec is a mapping from local branches to remote references +// The format of the refspec is an optional +, followed by <src>:<dst>, where +// <src> is the pattern for references on the remote side and <dst> is where +// those references will be written locally. The + tells Git to update the +// reference even if it isn’t a fast-forward. +// eg.: "+refs/heads/*:refs/remotes/origin/*" +// +// https://git-scm.com/book/es/v2/Git-Internals-The-Refspec +type RefSpec string + +// Validate validates the RefSpec +func (s RefSpec) Validate() error { + spec := string(s) + if strings.Count(spec, refSpecSeparator) != 1 { + return ErrRefSpecMalformedSeparator + } + + sep := strings.Index(spec, refSpecSeparator) + if sep == len(spec)-1 { + return ErrRefSpecMalformedSeparator + } + + ws := strings.Count(spec[0:sep], refSpecWildcard) + wd := strings.Count(spec[sep+1:], refSpecWildcard) + if ws == wd && ws < 2 && wd < 2 { + return nil + } + + return ErrRefSpecMalformedWildcard +} + +// IsForceUpdate returns if update is allowed in non fast-forward merges. +func (s RefSpec) IsForceUpdate() bool { + if s[0] == refSpecForce[0] { + return true + } + + return false +} + +// IsDelete returns true if the refspec indicates a delete (empty src). +func (s RefSpec) IsDelete() bool { + if s[0] == refSpecSeparator[0] { + return true + } + + return false +} + +// Src return the src side. +func (s RefSpec) Src() string { + spec := string(s) + start := strings.Index(spec, refSpecForce) + 1 + end := strings.Index(spec, refSpecSeparator) + + return spec[start:end] +} + +// Match match the given plumbing.ReferenceName against the source. +func (s RefSpec) Match(n plumbing.ReferenceName) bool { + if !s.IsWildcard() { + return s.matchExact(n) + } + + return s.matchGlob(n) +} + +// IsWildcard returns true if the RefSpec contains a wildcard. +func (s RefSpec) IsWildcard() bool { + return strings.Index(string(s), refSpecWildcard) != -1 +} + +func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { + return s.Src() == n.String() +} + +func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { + src := s.Src() + name := n.String() + wildcard := strings.Index(src, refSpecWildcard) + + var prefix, suffix string + prefix = src[0:wildcard] + if len(src) < wildcard { + suffix = src[wildcard+1 : len(suffix)] + } + + return len(name) > len(prefix)+len(suffix) && + strings.HasPrefix(name, prefix) && + strings.HasSuffix(name, suffix) +} + +// Dst returns the destination for the given remote reference. +func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { + spec := string(s) + start := strings.Index(spec, refSpecSeparator) + 1 + dst := spec[start:] + src := s.Src() + + if !s.IsWildcard() { + return plumbing.ReferenceName(dst) + } + + name := n.String() + ws := strings.Index(src, refSpecWildcard) + wd := strings.Index(dst, refSpecWildcard) + match := name[ws : len(name)-(len(src)-(ws+1))] + + return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) +} + +func (s RefSpec) String() string { + return string(s) +} + +// MatchAny returns true if any of the RefSpec match with the given ReferenceName. +func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { + for _, r := range l { + if r.Match(n) { + return true + } + } + + return false +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/doc.go b/vendor/gopkg.in/src-d/go-git.v4/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..60f2261e415ad54a97c0278b4a1616c736e27ecf --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/doc.go @@ -0,0 +1,10 @@ +// A highly extensible git implementation in pure Go. +// +// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the +// majority of the plumbing read operations and some of the main write +// operations, but lacks the main porcelain operations such as merges. +// +// It is highly extensible, we have been following the open/close principle in +// its design to facilitate extensions, mainly focusing the efforts on the +// persistence of the objects. +package git // import "gopkg.in/src-d/go-git.v4" diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go new file mode 100644 index 0000000000000000000000000000000000000000..b45a6d8d0291bbe38a4a8ef564bf4612258a0878 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go @@ -0,0 +1,622 @@ +// Package revision extracts git revision from string +// More informations about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html +package revision + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "time" +) + +// ErrInvalidRevision is emitted if string doesn't match valid revision +type ErrInvalidRevision struct { + s string +} + +func (e *ErrInvalidRevision) Error() string { + return "Revision invalid : " + e.s +} + +// Revisioner represents a revision component. +// A revision is made of multiple revision components +// obtained after parsing a revision string, +// for instance revision "master~" will be converted in +// two revision components Ref and TildePath +type Revisioner interface { +} + +// Ref represents a reference name : HEAD, master +type Ref string + +// TildePath represents ~, ~{n} +type TildePath struct { + Depth int +} + +// CaretPath represents ^, ^{n} +type CaretPath struct { + Depth int +} + +// CaretReg represents ^{/foo bar} +type CaretReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// CaretType represents ^{commit} +type CaretType struct { + ObjectType string +} + +// AtReflog represents @{n} +type AtReflog struct { + Depth int +} + +// AtCheckout represents @{-n} +type AtCheckout struct { + Depth int +} + +// AtUpstream represents @{upstream}, @{u} +type AtUpstream struct { + BranchName string +} + +// AtPush represents @{push} +type AtPush struct { + BranchName string +} + +// AtDate represents @{"2006-01-02T15:04:05Z"} +type AtDate struct { + Date time.Time +} + +// ColonReg represents :/foo bar +type ColonReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// ColonPath represents :./<path> :<path> +type ColonPath struct { + Path string +} + +// ColonStagePath represents :<n>:/<path> +type ColonStagePath struct { + Path string + Stage int +} + +// Parser represents a parser +// use to tokenize and transform to revisioner chunks +// a given string +type Parser struct { + s *scanner + currentParsedChar struct { + tok token + lit string + } + unreadLastChar bool +} + +// NewParserFromString returns a new instance of parser from a string. +func NewParserFromString(s string) *Parser { + return NewParser(bytes.NewBufferString(s)) +} + +// NewParser returns a new instance of parser. +func NewParser(r io.Reader) *Parser { + return &Parser{s: newScanner(r)} +} + +// scan returns the next token from the underlying scanner +// or the last scanned token if an unscan was requested +func (p *Parser) scan() (token, string, error) { + if p.unreadLastChar { + p.unreadLastChar = false + return p.currentParsedChar.tok, p.currentParsedChar.lit, nil + } + + tok, lit, err := p.s.scan() + + p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit + + return tok, lit, err +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { p.unreadLastChar = true } + +// Parse explode a revision string into revisioner chunks +func (p *Parser) Parse() ([]Revisioner, error) { + var rev Revisioner + var revs []Revisioner + var tok token + var err error + + for { + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case at: + rev, err = p.parseAt() + case tilde: + rev, err = p.parseTilde() + case caret: + rev, err = p.parseCaret() + case colon: + rev, err = p.parseColon() + case eof: + err = p.validateFullRevision(&revs) + + if err != nil { + return []Revisioner{}, err + } + + return revs, nil + default: + p.unscan() + rev, err = p.parseRef() + } + + if err != nil { + return []Revisioner{}, err + } + + revs = append(revs, rev) + } +} + +// validateFullRevision ensures all revisioner chunks make a valid revision +func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { + var hasReference bool + + for i, chunk := range *chunks { + switch chunk.(type) { + case Ref: + if i == 0 { + hasReference = true + } else { + return &ErrInvalidRevision{`reference must be defined once at the beginning`} + } + case AtDate: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<ISO-8601 date>}, @{<ISO-8601 date>}`} + case AtReflog: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{<n>}, @{<n>}`} + case AtCheckout: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-<n>}`} + case AtUpstream: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{upstream}, @{upstream}, <refname>@{u}, @{u}`} + case AtPush: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : <refname>@{push}, @{push}`} + case TildePath, CaretPath, CaretReg: + if !hasReference { + return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} + } + case ColonReg: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :/<regexp>`} + case ColonPath: + if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : <revision>:<path>`} + case ColonStagePath: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :<n>:<path>`} + } + } + + return nil +} + +// parseAt extract @ statements +func (p *Parser) parseAt() (Revisioner, error) { + var tok, nextTok token + var lit, nextLit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + if tok != obrace { + p.unscan() + + return Ref("HEAD"), nil + } + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, nextLit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: + return AtUpstream{}, nil + case tok == word && lit == "push" && nextTok == cbrace: + return AtPush{}, nil + case tok == number && nextTok == cbrace: + n, _ := strconv.Atoi(lit) + + return AtReflog{n}, nil + case tok == minus && nextTok == number: + n, _ := strconv.Atoi(nextLit) + + t, _, err := p.scan() + + if err != nil { + return nil, err + } + + if t != cbrace { + return nil, &ErrInvalidRevision{fmt.Sprintf(`missing "}" in @{-n} structure`)} + } + + return AtCheckout{n}, nil + default: + p.unscan() + + date := lit + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == cbrace: + t, err := time.Parse("2006-01-02T15:04:05Z", date) + + if err != nil { + return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} + } + + return AtDate{t}, nil + default: + date += lit + } + } + } +} + +// parseTilde extract ~ statements +func (p *Parser) parseTilde() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == number: + n, _ := strconv.Atoi(lit) + + return TildePath{n}, nil + default: + p.unscan() + return TildePath{1}, nil + } +} + +// parseCaret extract ^ statements +func (p *Parser) parseCaret() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == obrace: + r, err := p.parseCaretBraces() + + if err != nil { + return nil, err + } + + return r, nil + case tok == number: + n, _ := strconv.Atoi(lit) + + if n > 2 { + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} + } + + return CaretPath{n}, nil + default: + p.unscan() + return CaretPath{1}, nil + } +} + +// parseCaretBraces extract ^{<data>} statements +func (p *Parser) parseCaretBraces() (Revisioner, error) { + var tok, nextTok token + var lit, _ string + start := true + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): + return CaretType{lit}, nil + case re == "" && tok == cbrace: + return CaretType{"tag"}, nil + case re == "" && tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} + case re == "" && tok == slash: + p.unscan() + case tok != slash && start: + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} + case tok != cbrace: + p.unscan() + re += lit + case tok == cbrace: + p.unscan() + + reg, err := regexp.Compile(re) + + if err != nil { + return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return CaretReg{reg, negate}, nil + } + + start = false + } +} + +// parseColon extract : statements +func (p *Parser) parseColon() (Revisioner, error) { + var tok token + var err error + + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case slash: + return p.parseColonSlash() + default: + p.unscan() + return p.parseColonDefault() + } +} + +// parseColonSlash extract :/<data> statements +func (p *Parser) parseColonSlash() (Revisioner, error) { + var tok, nextTok token + var lit string + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component sequences starting with "/!" others than those defined are reserved`)} + case tok == eof: + p.unscan() + reg, err := regexp.Compile(re) + + if err != nil { + return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return ColonReg{reg, negate}, nil + default: + p.unscan() + re += lit + } + } +} + +// parseColonDefault extract :<data> statements +func (p *Parser) parseColonDefault() (Revisioner, error) { + var tok token + var lit string + var path string + var stage int + var err error + var n = -1 + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err := p.scan() + + if err != nil { + return nil, err + } + + if tok == number && nextTok == colon { + n, _ = strconv.Atoi(lit) + } + + switch n { + case 0, 1, 2, 3: + stage = n + default: + path += lit + p.unscan() + } + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == eof && n == -1: + return ColonPath{path}, nil + case tok == eof: + return ColonStagePath{path, stage}, nil + default: + path += lit + } + } +} + +// parseRef extract reference name +func (p *Parser) parseRef() (Revisioner, error) { + var tok, prevTok token + var lit, buf string + var endOfRef bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case eof, at, colon, tilde, caret: + endOfRef = true + } + + err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) + + if err != nil { + return "", err + } + + if endOfRef { + p.unscan() + return Ref(buf), nil + } + + buf += lit + prevTok = tok + } +} + +// checkRefFormat ensure reference name follow rules defined here : +// https://git-scm.com/docs/git-check-ref-format +func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { + switch token { + case aslash, space, control, qmark, asterisk, obracket: + return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} + } + + switch { + case (token == dot || token == slash) && buffer == "": + return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} + case previousToken == slash && endOfRef: + return &ErrInvalidRevision{`must not end with "/"`} + case previousToken == dot && endOfRef: + return &ErrInvalidRevision{`must not end with "."`} + case token == dot && previousToken == slash: + return &ErrInvalidRevision{`must not contains "/."`} + case previousToken == dot && token == dot: + return &ErrInvalidRevision{`must not contains ".."`} + case previousToken == slash && token == slash: + return &ErrInvalidRevision{`must not contains consecutively "/"`} + case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": + return &ErrInvalidRevision{"cannot end with .lock"} + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go new file mode 100644 index 0000000000000000000000000000000000000000..fb5f333f7f8a9144cbcbfa6e8342ed5dc412626b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go @@ -0,0 +1,117 @@ +package revision + +import ( + "bufio" + "io" + "unicode" +) + +// runeCategoryValidator takes a rune as input and +// validates it belongs to a rune category +type runeCategoryValidator func(r rune) bool + +// tokenizeExpression aggegates a series of runes matching check predicate into a single +// string and provides given tokenType as token type +func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { + var data []rune + data = append(data, ch) + + for { + c, _, err := r.ReadRune() + + if c == zeroRune { + break + } + + if err != nil { + return tokenError, "", err + } + + if check(c) { + data = append(data, c) + } else { + err := r.UnreadRune() + + if err != nil { + return tokenError, "", err + } + + return tokenType, string(data), nil + } + } + + return tokenType, string(data), nil +} + +var zeroRune = rune(0) + +// scanner represents a lexical scanner. +type scanner struct { + r *bufio.Reader +} + +// newScanner returns a new instance of scanner. +func newScanner(r io.Reader) *scanner { + return &scanner{r: bufio.NewReader(r)} +} + +// Scan extracts tokens and their strings counterpart +// from the reader +func (s *scanner) scan() (token, string, error) { + ch, _, err := s.r.ReadRune() + + if err != nil && err != io.EOF { + return tokenError, "", err + } + + switch ch { + case zeroRune: + return eof, "", nil + case ':': + return colon, string(ch), nil + case '~': + return tilde, string(ch), nil + case '^': + return caret, string(ch), nil + case '.': + return dot, string(ch), nil + case '/': + return slash, string(ch), nil + case '{': + return obrace, string(ch), nil + case '}': + return cbrace, string(ch), nil + case '-': + return minus, string(ch), nil + case '@': + return at, string(ch), nil + case '\\': + return aslash, string(ch), nil + case '?': + return qmark, string(ch), nil + case '*': + return asterisk, string(ch), nil + case '[': + return obracket, string(ch), nil + case '!': + return emark, string(ch), nil + } + + if unicode.IsSpace(ch) { + return space, string(ch), nil + } + + if unicode.IsControl(ch) { + return control, string(ch), nil + } + + if unicode.IsLetter(ch) { + return tokenizeExpression(ch, word, unicode.IsLetter, s.r) + } + + if unicode.IsNumber(ch) { + return tokenizeExpression(ch, number, unicode.IsNumber, s.r) + } + + return tokenError, string(ch), nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go new file mode 100644 index 0000000000000000000000000000000000000000..abc4048869376c5c0ca3ae7629ca2a1004b1c732 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go @@ -0,0 +1,28 @@ +package revision + +// token represents a entity extracted from string parsing +type token int + +const ( + eof token = iota + + aslash + asterisk + at + caret + cbrace + colon + control + dot + emark + minus + number + obrace + obracket + qmark + slash + space + tilde + tokenError + word +) diff --git a/vendor/gopkg.in/src-d/go-git.v4/options.go b/vendor/gopkg.in/src-d/go-git.v4/options.go new file mode 100644 index 0000000000000000000000000000000000000000..1047d7eed557fa11ebbbe83f46f08b13ad3d6f0a --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/options.go @@ -0,0 +1,179 @@ +package git + +import ( + "errors" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" + "gopkg.in/src-d/go-git.v4/plumbing/transport" +) + +// SubmoduleRescursivity defines how depth will affect any submodule recursive +// operation. +type SubmoduleRescursivity uint + +const ( + // DefaultRemoteName name of the default Remote, just like git command. + DefaultRemoteName = "origin" + + // NoRecurseSubmodules disables the recursion for a submodule operation. + NoRecurseSubmodules SubmoduleRescursivity = 0 + // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. + DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 +) + +var ( + ErrMissingURL = errors.New("URL field is required") +) + +// CloneOptions describes how a clone should be performed. +type CloneOptions struct { + // The (possibly remote) repository URL to clone from. + URL string + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Name of the remote to be added, by default `origin`. + RemoteName string + // Remote branch to clone. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // Limit fetching to the specified number of commits. + Depth int + // RecurseSubmodules after the clone is created, initialize all submodules + // within, using their default settings. This option is ignored if the + // cloned repository does not have a worktree. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress +} + +// Validate validates the fields and sets the default values. +func (o *CloneOptions) Validate() error { + if o.URL == "" { + return ErrMissingURL + } + + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + return nil +} + +// PullOptions describes how a pull should be performed. +type PullOptions struct { + // Name of the remote to be pulled. If empty, uses the default. + RemoteName string + // Remote branch to clone. If empty, uses HEAD. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // Limit fetching to the specified number of commits. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // RecurseSubmodules controls if new commits of all populated submodules + // should be fetched too. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress +} + +// Validate validates the fields and sets the default values. +func (o *PullOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + return nil +} + +// FetchOptions describes how a fetch should be performed +type FetchOptions struct { + // Name of the remote to fetch from. Defaults to origin. + RemoteName string + RefSpecs []config.RefSpec + // Depth limit fetching to the specified number of commits from the tip of + // each remote branch history. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress +} + +// Validate validates the fields and sets the default values. +func (o *FetchOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// PushOptions describes how a push should be performed. +type PushOptions struct { + // RemoteName is the name of the remote to be pushed to. + RemoteName string + // RefSpecs specify what destination ref to update with what source + // object. A refspec with empty src can be used to delete a reference. + RefSpecs []config.RefSpec + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod +} + +// Validate validates the fields and sets the default values. +func (o *PushOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if len(o.RefSpecs) == 0 { + o.RefSpecs = []config.RefSpec{ + config.RefSpec(config.DefaultPushRefSpec), + } + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// SubmoduleUpdateOptions describes how a submodule update should be performed. +type SubmoduleUpdateOptions struct { + // Init, if true initializes the submodules recorded in the index. + Init bool + // NoFetch tell to the update command to not fetch new objects from the + // remote site. + NoFetch bool + // RecurseSubmodules the update is performed not only in the submodules of + // the current repository but also in any nested submodules inside those + // submodules (and so on). Until the SubmoduleRescursivity is reached. + RecurseSubmodules SubmoduleRescursivity +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go new file mode 100644 index 0000000000000000000000000000000000000000..7b90c55f5b8658d0224e592c7a6c9d1c5090b1c3 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go @@ -0,0 +1,18 @@ +package cache + +import "gopkg.in/src-d/go-git.v4/plumbing" + +const ( + Byte FileSize = 1 << (iota * 10) + KiByte + MiByte + GiByte +) + +type FileSize int64 + +type Object interface { + Add(o plumbing.EncodedObject) + Get(k plumbing.Hash) plumbing.EncodedObject + Clear() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object.go new file mode 100644 index 0000000000000000000000000000000000000000..44238ce11e4d297347f20e57f8e6aad9287305dc --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object.go @@ -0,0 +1,68 @@ +package cache + +import "gopkg.in/src-d/go-git.v4/plumbing" + +const ( + initialQueueSize = 20 + MaxSize = 10 * MiByte +) + +type ObjectFIFO struct { + objects map[plumbing.Hash]plumbing.EncodedObject + order *queue + + maxSize FileSize + actualSize FileSize +} + +// NewObjectFIFO returns an Object cache that keeps the newest objects that fit +// into the specific memory size +func NewObjectFIFO(size FileSize) *ObjectFIFO { + return &ObjectFIFO{ + objects: make(map[plumbing.Hash]plumbing.EncodedObject), + order: newQueue(initialQueueSize), + maxSize: size, + } +} + +// Add adds a new object to the cache. If the object size is greater than the +// cache size, the object is not added. +func (c *ObjectFIFO) Add(o plumbing.EncodedObject) { + // if the size of the object is bigger or equal than the cache size, + // skip it + if FileSize(o.Size()) >= c.maxSize { + return + } + + // if the object is into the cache, do not add it again + if _, ok := c.objects[o.Hash()]; ok { + return + } + + // delete the oldest object if cache is full + if c.actualSize >= c.maxSize { + h := c.order.Pop() + o := c.objects[h] + if o != nil { + c.actualSize -= FileSize(o.Size()) + delete(c.objects, h) + } + } + + c.objects[o.Hash()] = o + c.order.Push(o.Hash()) + c.actualSize += FileSize(o.Size()) +} + +// Get returns an object by his hash. If the object is not found in the cache, it +// returns nil +func (c *ObjectFIFO) Get(k plumbing.Hash) plumbing.EncodedObject { + return c.objects[k] +} + +// Clear the content of this object cache +func (c *ObjectFIFO) Clear() { + c.objects = make(map[plumbing.Hash]plumbing.EncodedObject) + c.order = newQueue(initialQueueSize) + c.actualSize = 0 +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/queue.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/queue.go new file mode 100644 index 0000000000000000000000000000000000000000..85413e0b7a9433cc3c3aebc676ba82ed9e872a67 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/queue.go @@ -0,0 +1,46 @@ +package cache + +import "gopkg.in/src-d/go-git.v4/plumbing" + +// queue is a basic FIFO queue based on a circular list that resize as needed. +type queue struct { + elements []plumbing.Hash + size int + head int + tail int + count int +} + +// newQueue returns a queue with the specified initial size +func newQueue(size int) *queue { + return &queue{ + elements: make([]plumbing.Hash, size), + size: size, + } +} + +// Push adds a node to the queue. +func (q *queue) Push(h plumbing.Hash) { + if q.head == q.tail && q.count > 0 { + elements := make([]plumbing.Hash, len(q.elements)+q.size) + copy(elements, q.elements[q.head:]) + copy(elements[len(q.elements)-q.head:], q.elements[:q.head]) + q.head = 0 + q.tail = len(q.elements) + q.elements = elements + } + q.elements[q.tail] = h + q.tail = (q.tail + 1) % len(q.elements) + q.count++ +} + +// Pop removes and returns a Hash from the queue in first to last order. +func (q *queue) Pop() plumbing.Hash { + if q.count == 0 { + return plumbing.ZeroHash + } + node := q.elements[q.head] + q.head = (q.head + 1) % len(q.elements) + q.count-- + return node +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go new file mode 100644 index 0000000000000000000000000000000000000000..a3ebed3f6c26f5b89d0b246aac71cae0a42d7ee0 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go @@ -0,0 +1,35 @@ +package plumbing + +import "fmt" + +type PermanentError struct { + Err error +} + +func NewPermanentError(err error) *PermanentError { + if err == nil { + return nil + } + + return &PermanentError{Err: err} +} + +func (e *PermanentError) Error() string { + return fmt.Sprintf("permanent client error: %s", e.Err.Error()) +} + +type UnexpectedError struct { + Err error +} + +func NewUnexpectedError(err error) *UnexpectedError { + if err == nil { + return nil + } + + return &UnexpectedError{Err: err} +} + +func (e *UnexpectedError) Error() string { + return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go new file mode 100644 index 0000000000000000000000000000000000000000..0994bc4d755c001f6f063eacd69f677f7781d9f3 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go @@ -0,0 +1,188 @@ +package filemode + +import ( + "encoding/binary" + "fmt" + "os" + "strconv" +) + +// A FileMode represents the kind of tree entries used by git. It +// resembles regular file systems modes, although FileModes are +// considerably simpler (there are not so many), and there are some, +// like Submodule that has no file system equivalent. +type FileMode uint32 + +const ( + // Empty is used as the FileMode of tree elements when comparing + // trees in the following situations: + // + // - the mode of tree elements before their creation. - the mode of + // tree elements after their deletion. - the mode of unmerged + // elements when checking the index. + // + // Empty has no file system equivalent. As Empty is the zero value + // of FileMode, it is also returned by New and + // NewFromOsNewFromOSFileMode along with an error, when they fail. + Empty FileMode = 0 + // Dir represent a Directory. + Dir FileMode = 0040000 + // Regular represent non-executable files. Please note this is not + // the same as golang regular files, which include executable files. + Regular FileMode = 0100644 + // Deprecated represent non-executable files with the group writable + // bit set. This mode was supported by the first versions of git, + // but it has been deprecatred nowadays. This library uses them + // internally, so you can read old packfiles, but will treat them as + // Regulars when interfacing with the outside world. This is the + // standard git behaviuor. + Deprecated FileMode = 0100664 + // Executable represents executable files. + Executable FileMode = 0100755 + // Symlink represents symbolic links to files. + Symlink FileMode = 0120000 + // Submodule represents git submodules. This mode has no file system + // equivalent. + Submodule FileMode = 0160000 +) + +// New takes the octal string representation of a FileMode and returns +// the FileMode and a nil error. If the string can not be parsed to a +// 32 bit unsigned octal number, it returns Empty and the parsing error. +// +// Example: "40000" means Dir, "100644" means Regular. +// +// Please note this function does not check if the returned FileMode +// is valid in git or if it is malformed. For instance, "1" will +// return the malformed FileMode(1) and a nil error. +func New(s string) (FileMode, error) { + n, err := strconv.ParseUint(s, 8, 32) + if err != nil { + return Empty, err + } + + return FileMode(n), nil +} + +// NewFromOSFileMode returns the FileMode used by git to represent +// the provided file system modes and a nil error on success. If the +// file system mode cannot be mapped to any valid git mode (as with +// sockets or named pipes), it will return Empty and an error. +// +// Note that some git modes cannot be generated from os.FileModes, like +// Deprecated and Submodule; while Empty will be returned, along with an +// error, only when the method fails. +func NewFromOSFileMode(m os.FileMode) (FileMode, error) { + if m.IsRegular() { + if isSetTemporary(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetCharDevice(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetUserExecutable(m) { + return Executable, nil + } + return Regular, nil + } + + if m.IsDir() { + return Dir, nil + } + + if isSetSymLink(m) { + return Symlink, nil + } + + return Empty, fmt.Errorf("no equivalent git mode for %s", m) +} + +func isSetCharDevice(m os.FileMode) bool { + return m&os.ModeCharDevice != 0 +} + +func isSetTemporary(m os.FileMode) bool { + return m&os.ModeTemporary != 0 +} + +func isSetUserExecutable(m os.FileMode) bool { + return m&0100 != 0 +} + +func isSetSymLink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} + +// Bytes return a slice of 4 bytes with the mode in little endian +// encoding. +func (m FileMode) Bytes() []byte { + ret := make([]byte, 4) + binary.LittleEndian.PutUint32(ret, uint32(m)) + return ret[:] +} + +// IsMalformed returns if the FileMode should not appear in a git packfile, +// this is: Empty and any other mode not mentioned as a constant in this +// package. +func (m FileMode) IsMalformed() bool { + return m != Dir && + m != Regular && + m != Deprecated && + m != Executable && + m != Symlink && + m != Submodule +} + +// String returns the FileMode as a string in the standatd git format, +// this is, an octal number padded with ceros to 7 digits. Malformed +// modes are printed in that same format, for easier debugging. +// +// Example: Regular is "0100644", Empty is "0000000". +func (m FileMode) String() string { + return fmt.Sprintf("%07o", uint32(m)) +} + +// IsRegular returns if the FileMode represents that of a regular file, +// this is, either Regular or Deprecated. Please note that Executable +// are not regular even though in the UNIX tradition, they usually are: +// See the IsFile method. +func (m FileMode) IsRegular() bool { + return m == Regular || + m == Deprecated +} + +// IsFile returns if the FileMode represents that of a file, this is, +// Regular, Deprecated, Excutable or Link. +func (m FileMode) IsFile() bool { + return m == Regular || + m == Deprecated || + m == Executable || + m == Symlink +} + +// ToOSFileMode returns the os.FileMode to be used when creating file +// system elements with the given git mode and a nil error on success. +// +// When the provided mode cannot be mapped to a valid file system mode +// (e.g. Submodule) it returns os.FileMode(0) and an error. +// +// The returned file mode does not take into account the umask. +func (m FileMode) ToOSFileMode() (os.FileMode, error) { + switch m { + case Dir: + return os.ModePerm | os.ModeDir, nil + case Submodule: + return os.ModePerm | os.ModeDir, nil + case Regular: + return os.FileMode(0644), nil + // Deprecated is no longer allowed: treated as a Regular instead + case Deprecated: + return os.FileMode(0644), nil + case Executable: + return os.FileMode(0755), nil + case Symlink: + return os.ModePerm | os.ModeSymlink, nil + } + + return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go new file mode 100644 index 0000000000000000000000000000000000000000..8f98ad1741e1cc45db8d793d3b809ba4553924ae --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go @@ -0,0 +1,99 @@ +package config + +// New creates a new config instance. +func New() *Config { + return &Config{} +} + +// Config contains all the sections, comments and includes from a config file. +type Config struct { + Comment *Comment + Sections Sections + Includes Includes +} + +// Includes is a list of Includes in a config file. +type Includes []*Include + +// Include is a reference to an included config file. +type Include struct { + Path string + Config *Config +} + +// Comment string without the prefix '#' or ';'. +type Comment string + +const ( + // NoSubsection token is passed to Config.Section and Config.SetSection to + // represent the absence of a section. + NoSubsection = "" +) + +// Section returns a existing section with the given name or creates a new one. +func (c *Config) Section(name string) *Section { + for i := len(c.Sections) - 1; i >= 0; i-- { + s := c.Sections[i] + if s.IsName(name) { + return s + } + } + + s := &Section{Name: name} + c.Sections = append(c.Sections, s) + return s +} + +// AddOption adds an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).AddOption(key, value) + } else { + c.Section(section).Subsection(subsection).AddOption(key, value) + } + + return c +} + +// SetOption sets an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).SetOption(key, value) + } else { + c.Section(section).Subsection(subsection).SetOption(key, value) + } + + return c +} + +// RemoveSection removes a section from a config file. +func (c *Config) RemoveSection(name string) *Config { + result := Sections{} + for _, s := range c.Sections { + if !s.IsName(name) { + result = append(result, s) + } + } + + c.Sections = result + return c +} + +// RemoveSubsection remove s a subsection from a config file. +func (c *Config) RemoveSubsection(section string, subsection string) *Config { + for _, s := range c.Sections { + if s.IsName(section) { + result := Subsections{} + for _, ss := range s.Subsections { + if !ss.IsName(subsection) { + result = append(result, ss) + } + } + s.Subsections = result + } + } + + return c +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go new file mode 100644 index 0000000000000000000000000000000000000000..0f02ce19307bc36e9f3891af016250272a623898 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go @@ -0,0 +1,37 @@ +package config + +import ( + "io" + + "github.com/src-d/gcfg" +) + +// A Decoder reads and decodes config files from an input stream. +type Decoder struct { + io.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r} +} + +// Decode reads the whole config from its input and stores it in the +// value pointed to by config. +func (d *Decoder) Decode(config *Config) error { + cb := func(s string, ss string, k string, v string, bv bool) error { + if ss == "" && k == "" { + config.Section(s) + return nil + } + + if ss != "" && k == "" { + config.Section(s).Subsection(ss) + return nil + } + + config.AddOption(s, ss, k, v) + return nil + } + return gcfg.ReadWithCallback(d, cb) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..3986c8365816949c06bc43a0b1233fc684ac7fd9 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go @@ -0,0 +1,122 @@ +// Package config implements encoding and decoding of git config files. +// +// Configuration File +// ------------------ +// +// The Git configuration file contains a number of variables that affect +// the Git commands' behavior. The `.git/config` file in each repository +// is used to store the configuration for that repository, and +// `$HOME/.gitconfig` is used to store a per-user configuration as +// fallback values for the `.git/config` file. The file `/etc/gitconfig` +// can be used to store a system-wide default configuration. +// +// The configuration variables are used by both the Git plumbing +// and the porcelains. The variables are divided into sections, wherein +// the fully qualified variable name of the variable itself is the last +// dot-separated segment and the section name is everything before the last +// dot. The variable names are case-insensitive, allow only alphanumeric +// characters and `-`, and must start with an alphabetic character. Some +// variables may appear multiple times; we say then that the variable is +// multivalued. +// +// Syntax +// ~~~~~~ +// +// The syntax is fairly flexible and permissive; whitespaces are mostly +// ignored. The '#' and ';' characters begin comments to the end of line, +// blank lines are ignored. +// +// The file consists of sections and variables. A section begins with +// the name of the section in square brackets and continues until the next +// section begins. Section names are case-insensitive. Only alphanumeric +// characters, `-` and `.` are allowed in section names. Each variable +// must belong to some section, which means that there must be a section +// header before the first setting of a variable. +// +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// -------- +// [section "subsection"] +// +// -------- +// +// Subsection names are case sensitive and can contain any characters except +// newline (doublequote `"` and backslash can be included by escaping them +// as `\"` and `\\`, respectively). Section headers cannot span multiple +// lines. Variables may belong directly to a section or to a given subsection. +// You can have `[section]` if you have `[section "subsection"]`, but you +// don't need to. +// +// There is also a deprecated `[section.subsection]` syntax. With this +// syntax, the subsection name is converted to lower-case and is also +// compared case sensitively. These subsection names follow the same +// restrictions as section names. +// +// All the other lines (and the remainder of the line after the section +// header) are recognized as setting variables, in the form +// 'name = value' (or just 'name', which is a short-hand to say that +// the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and `-`, and must start with an alphabetic character. +// +// A line that defines a value can be continued to the next line by +// ending it with a `\`; the backquote and the end-of-line are +// stripped. Leading whitespaces after 'name =', the remainder of the +// line after the first comment character '#' or ';', and trailing +// whitespaces of the line are discarded unless they are enclosed in +// double quotes. Internal whitespaces within the value are retained +// verbatim. +// +// Inside double quotes, double quote `"` and backslash `\` characters +// must be escaped: use `\"` for `"` and `\\` for `\`. +// +// The following escape sequences (beside `\"` and `\\`) are recognized: +// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) +// and `\b` for backspace (BS). Other char escape sequences (including octal +// escape sequences) are invalid. +// +// Includes +// ~~~~~~~~ +// +// You can include one config file from another by setting the special +// `include.path` variable to the name of the file to be included. The +// variable takes a pathname as its value, and is subject to tilde +// expansion. +// +// The included file is expanded immediately, as if its contents had been +// found at the location of the include directive. If the value of the +// `include.path` variable is a relative path, the path is considered to be +// relative to the configuration file in which the include directive was +// found. See below for examples. +// +// +// Example +// ~~~~~~~ +// +// # Core variables +// [core] +// ; Don't trust file modes +// filemode = false +// +// # Our diff algorithm +// [diff] +// external = /usr/local/bin/diff-wrapper +// renames = true +// +// [branch "devel"] +// remote = origin +// merge = refs/heads/devel +// +// # Proxy settings +// [core] +// gitProxy="ssh" for "kernel.org" +// gitProxy=default-proxy ; for the rest +// +// [include] +// path = /path/to/foo.inc ; include by absolute path +// path = foo ; expand "foo" relative to the current file +// path = ~/foo ; expand "foo" in your `$HOME` directory +// +package config diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..88bdf650564e05fd80e9cc19cb091f76a070bd1a --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go @@ -0,0 +1,75 @@ +package config + +import ( + "fmt" + "io" +) + +// An Encoder writes config files to an output stream. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w} +} + +// Encode writes the config in git config format to the stream of the encoder. +func (e *Encoder) Encode(cfg *Config) error { + for _, s := range cfg.Sections { + if err := e.encodeSection(s); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSection(s *Section) error { + if len(s.Options) > 0 { + if err := e.printf("[%s]\n", s.Name); err != nil { + return err + } + + if err := e.encodeOptions(s.Options); err != nil { + return err + } + } + + for _, ss := range s.Subsections { + if err := e.encodeSubsection(s.Name, ss); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { + //TODO: escape + if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { + return err + } + + if err := e.encodeOptions(s.Options); err != nil { + return err + } + + return nil +} + +func (e *Encoder) encodeOptions(opts Options) error { + for _, o := range opts { + if err := e.printf("\t%s = %s\n", o.Key, o.Value); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) printf(msg string, args ...interface{}) error { + _, err := fmt.Fprintf(e.w, msg, args...) + return err +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go new file mode 100644 index 0000000000000000000000000000000000000000..3c391c6eb9765c0a00b35b1f29ec567086f36688 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go @@ -0,0 +1,84 @@ +package config + +import ( + "strings" +) + +// Option defines a key/value entity in a config file. +type Option struct { + // Key preserving original caseness. + // Use IsKey instead to compare key regardless of caseness. + Key string + // Original value as string, could be not normalized. + Value string +} + +type Options []*Option + +// IsKey returns true if the given key matches +// this option's key in a case-insensitive comparison. +func (o *Option) IsKey(key string) bool { + return strings.ToLower(o.Key) == strings.ToLower(key) +} + +// Get gets the value for the given key if set, +// otherwise it returns the empty string. +// +// Note that there is no difference +// +// This matches git behaviour since git v1.8.1-rc1, +// if there are multiple definitions of a key, the +// last one wins. +// +// See: http://article.gmane.org/gmane.linux.kernel/1407184 +// +// In order to get all possible values for the same key, +// use GetAll. +func (opts Options) Get(key string) string { + for i := len(opts) - 1; i >= 0; i-- { + o := opts[i] + if o.IsKey(key) { + return o.Value + } + } + return "" +} + +// GetAll returns all possible values for the same key. +func (opts Options) GetAll(key string) []string { + result := []string{} + for _, o := range opts { + if o.IsKey(key) { + result = append(result, o.Value) + } + } + return result +} + +func (opts Options) withoutOption(key string) Options { + result := Options{} + for _, o := range opts { + if !o.IsKey(key) { + result = append(result, o) + } + } + return result +} + +func (opts Options) withAddedOption(key string, value string) Options { + return append(opts, &Option{key, value}) +} + +func (opts Options) withSettedOption(key string, value string) Options { + for i := len(opts) - 1; i >= 0; i-- { + o := opts[i] + if o.IsKey(key) { + result := make(Options, len(opts)) + copy(result, opts) + result[i] = &Option{key, value} + return result + } + } + + return opts.withAddedOption(key, value) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go new file mode 100644 index 0000000000000000000000000000000000000000..547da1eb538f4cf1526401689bcf8ae67e076fcc --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go @@ -0,0 +1,125 @@ +package config + +import "strings" + +// Section is the representation of a section inside git configuration files. +// Each Section contains Options that are used by both the Git plumbing +// and the porcelains. +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// [section "subsection"] +// +// All the other lines (and the remainder of the line after the section header) +// are recognized as option variables, in the form "name = value" (or just name, +// which is a short-hand to say that the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and -, and must start with an alphabetic character: +// +// [section "subsection1"] +// option1 = value1 +// option2 +// [section "subsection2"] +// option3 = value2 +// +type Section struct { + Name string + Options Options + Subsections Subsections +} + +type Subsection struct { + Name string + Options Options +} + +type Sections []*Section + +type Subsections []*Subsection + +// IsName checks if the name provided is equals to the Section name, case insensitive. +func (s *Section) IsName(name string) bool { + return strings.ToLower(s.Name) == strings.ToLower(name) +} + +// Option return the value for the specified key. Empty string is returned if +// key does not exists. +func (s *Section) Option(key string) string { + return s.Options.Get(key) +} + +// AddOption adds a new Option to the Section. The updated Section is returned. +func (s *Section) AddOption(key string, value string) *Section { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Section. If the option already exists, is replaced. +// The updated Section is returned. +func (s *Section) SetOption(key string, value string) *Section { + s.Options = s.Options.withSettedOption(key, value) + return s +} + +// Remove an option with the specified key. The updated Section is returned. +func (s *Section) RemoveOption(key string) *Section { + s.Options = s.Options.withoutOption(key) + return s +} + +// Subsection returns a Subsection from the specified Section. If the +// Subsection does not exists, new one is created and added to Section. +func (s *Section) Subsection(name string) *Subsection { + for i := len(s.Subsections) - 1; i >= 0; i-- { + ss := s.Subsections[i] + if ss.IsName(name) { + return ss + } + } + + ss := &Subsection{Name: name} + s.Subsections = append(s.Subsections, ss) + return ss +} + +// HasSubsection checks if the Section has a Subsection with the specified name. +func (s *Section) HasSubsection(name string) bool { + for _, ss := range s.Subsections { + if ss.IsName(name) { + return true + } + } + + return false +} + +// IsName checks if the name of the subsection is exactly the specified name. +func (s *Subsection) IsName(name string) bool { + return s.Name == name +} + +// Option returns an option with the specified key. If the option does not exists, +// empty spring will be returned. +func (s *Subsection) Option(key string) string { + return s.Options.Get(key) +} + +// AddOption adds a new Option to the Subsection. The updated Subsection is returned. +func (s *Subsection) AddOption(key string, value string) *Subsection { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Subsection. If the option already exists, is replaced. +// The updated Subsection is returned. +func (s *Subsection) SetOption(key string, value string) *Subsection { + s.Options = s.Options.withSettedOption(key, value) + return s +} + +// RemoveOption removes the option with the specified key. The updated Subsection is returned. +func (s *Subsection) RemoveOption(key string) *Subsection { + s.Options = s.Options.withoutOption(key) + return s +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go new file mode 100644 index 0000000000000000000000000000000000000000..66184757f71d09030673fc3a7a54346be181881b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go @@ -0,0 +1,147 @@ +package idxfile + +import ( + "bytes" + "errors" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the idx file version + // is not supported. + ErrUnsupportedVersion = errors.New("Unsuported version") + // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. + ErrMalformedIdxFile = errors.New("Malformed IDX file") +) + +// Decoder reads and decodes idx files from an input stream. +type Decoder struct { + io.Reader +} + +// NewDecoder builds a new idx stream decoder, that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r} +} + +// Decode reads from the stream and decode the content into the Idxfile struct. +func (d *Decoder) Decode(idx *Idxfile) error { + if err := validateHeader(d); err != nil { + return err + } + + flow := []func(*Idxfile, io.Reader) error{ + readVersion, + readFanout, + readObjectNames, + readCRC32, + readOffsets, + readChecksums, + } + + for _, f := range flow { + if err := f(idx, d); err != nil { + return err + } + } + + if !idx.isValid() { + return ErrMalformedIdxFile + } + + return nil +} + +func validateHeader(r io.Reader) error { + var h = make([]byte, 4) + if _, err := io.ReadFull(r, h); err != nil { + return err + } + + if !bytes.Equal(h, idxHeader) { + return ErrMalformedIdxFile + } + + return nil +} + +func readVersion(idx *Idxfile, r io.Reader) error { + v, err := binary.ReadUint32(r) + if err != nil { + return err + } + + if v > VersionSupported { + return ErrUnsupportedVersion + } + + idx.Version = v + return nil +} + +func readFanout(idx *Idxfile, r io.Reader) error { + var err error + for i := 0; i < 255; i++ { + idx.Fanout[i], err = binary.ReadUint32(r) + if err != nil { + return err + } + } + + idx.ObjectCount, err = binary.ReadUint32(r) + return err +} + +func readObjectNames(idx *Idxfile, r io.Reader) error { + c := int(idx.ObjectCount) + for i := 0; i < c; i++ { + var ref plumbing.Hash + if _, err := io.ReadFull(r, ref[:]); err != nil { + return err + } + + idx.Entries = append(idx.Entries, Entry{Hash: ref}) + } + + return nil +} + +func readCRC32(idx *Idxfile, r io.Reader) error { + c := int(idx.ObjectCount) + for i := 0; i < c; i++ { + if err := binary.Read(r, &idx.Entries[i].CRC32); err != nil { + return err + } + } + + return nil +} + +func readOffsets(idx *Idxfile, r io.Reader) error { + c := int(idx.ObjectCount) + for i := 0; i < c; i++ { + o, err := binary.ReadUint32(r) + if err != nil { + return err + } + + idx.Entries[i].Offset = uint64(o) + } + + return nil +} + +func readChecksums(idx *Idxfile, r io.Reader) error { + if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { + return err + } + + if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { + return err + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..1e628ab4a5e600fd9a790ac10052bedf8f21f865 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go @@ -0,0 +1,128 @@ +// Package idxfile implements encoding and decoding of packfile idx files. +// +// == Original (version 1) pack-*.idx files have the following format: +// +// - The header consists of 256 4-byte network byte order +// integers. N-th entry of this table records the number of +// objects in the corresponding pack, the first byte of whose +// object name is less than or equal to N. This is called the +// 'first-level fan-out' table. +// +// - The header is followed by sorted 24-byte entries, one entry +// per object in the pack. Each entry is: +// +// 4-byte network byte order integer, recording where the +// object is stored in the packfile as the offset from the +// beginning. +// +// 20-byte object name. +// +// - The file is concluded with a trailer: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Pack Idx file: +// +// -- +--------------------------------+ +// fanout | fanout[0] = 2 (for example) |-. +// table +--------------------------------+ | +// | fanout[1] | | +// +--------------------------------+ | +// | fanout[2] | | +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | fanout[255] = total objects |---. +// -- +--------------------------------+ | | +// main | offset | | | +// index | object name 00XXXXXXXXXXXXXXXX | | | +// tab +--------------------------------+ | | +// | offset | | | +// | object name 00XXXXXXXXXXXXXXXX | | | +// +--------------------------------+<+ | +// .-| offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | +--------------------------------+ | +// | | offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | | offset | | +// | | object name FFXXXXXXXXXXXXXXXX | | +// --| +--------------------------------+<--+ +// trailer | | packfile checksum | +// | +--------------------------------+ +// | | idxfile checksum | +// | +--------------------------------+ +// .---------. +// | +// Pack file entry: <+ +// +// packed object header: +// 1-byte size extension bit (MSB) +// type (next 3 bit) +// size0 (lower 4-bit) +// n-byte sizeN (as long as MSB is set, each 7-bit) +// size0..sizeN form 4+7+7+..+7 bit integer, size0 +// is the least significant part, and sizeN is the +// most significant part. +// packed object data: +// If it is not DELTA, then deflated bytes (the size above +// is the size before compression). +// If it is REF_DELTA, then +// 20-byte base object name SHA1 (the size above is the +// size of the delta data that follows). +// delta data, deflated. +// If it is OFS_DELTA, then +// n-byte offset (see below) interpreted as a negative +// offset from the type-byte of the header of the +// ofs-delta entry (the size above is the size of +// the delta data that follows). +// delta data, deflated. +// +// offset encoding: +// n bytes with MSB set in all but the last one. +// The offset is then the number constructed by +// concatenating the lower 7 bit of each byte, and +// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) +// to the result. +// +// == Version 2 pack-*.idx files support packs larger than 4 GiB, and +// have some other reorganizations. They have the format: +// +// - A 4-byte magic number '\377tOc' which is an unreasonable +// fanout[0] value. +// +// - A 4-byte version number (= 2) +// +// - A 256-entry fan-out table just like v1. +// +// - A table of sorted 20-byte SHA1 object names. These are +// packed together without offset values to reduce the cache +// footprint of the binary search for a specific object name. +// +// - A table of 4-byte CRC32 values of the packed object data. +// This is new in v2 so compressed data can be copied directly +// from pack to pack during repacking without undetected +// data corruption. +// +// - A table of 4-byte offset values (in network byte order). +// These are usually 31-bit pack file offsets, but large +// offsets are encoded as an index into the next table with +// the msbit set. +// +// - A table of 8-byte offset entries (empty for pack files less +// than 2 GiB). Pack files are organized with heavily used +// objects toward the front, so most object references should +// not need to refer to this table. +// +// - The same trailer as a v1 pack file: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt +package idxfile diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..71e1b3fdbfa3326a975873e5744962c31a0dc57f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go @@ -0,0 +1,132 @@ +package idxfile + +import ( + "crypto/sha1" + "hash" + "io" + "sort" + + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +// Encoder writes Idxfile structs to an output stream. +type Encoder struct { + io.Writer + hash hash.Hash +} + +// NewEncoder returns a new stream encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode encodes an Idxfile to the encoder writer. +func (e *Encoder) Encode(idx *Idxfile) (int, error) { + idx.Entries.Sort() + + flow := []func(*Idxfile) (int, error){ + e.encodeHeader, + e.encodeFanout, + e.encodeHashes, + e.encodeCRC32, + e.encodeOffsets, + e.encodeChecksums, + } + + sz := 0 + for _, f := range flow { + i, err := f(idx) + sz += i + + if err != nil { + return sz, err + } + } + + return sz, nil +} + +func (e *Encoder) encodeHeader(idx *Idxfile) (int, error) { + c, err := e.Write(idxHeader) + if err != nil { + return c, err + } + + return c + 4, binary.WriteUint32(e, idx.Version) +} + +func (e *Encoder) encodeFanout(idx *Idxfile) (int, error) { + fanout := idx.calculateFanout() + for _, c := range fanout { + if err := binary.WriteUint32(e, c); err != nil { + return 0, err + } + } + + return 1024, nil +} + +func (e *Encoder) encodeHashes(idx *Idxfile) (int, error) { + sz := 0 + for _, ent := range idx.Entries { + i, err := e.Write(ent.Hash[:]) + sz += i + + if err != nil { + return sz, err + } + } + + return sz, nil +} + +func (e *Encoder) encodeCRC32(idx *Idxfile) (int, error) { + sz := 0 + for _, ent := range idx.Entries { + err := binary.Write(e, ent.CRC32) + sz += 4 + + if err != nil { + return sz, err + } + } + + return sz, nil +} + +func (e *Encoder) encodeOffsets(idx *Idxfile) (int, error) { + sz := 0 + for _, ent := range idx.Entries { + if err := binary.WriteUint32(e, uint32(ent.Offset)); err != nil { + return sz, err + } + + sz += 4 + + } + + return sz, nil +} + +func (e *Encoder) encodeChecksums(idx *Idxfile) (int, error) { + if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { + return 0, err + } + + copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) + if _, err := e.Write(idx.IdxChecksum[:]); err != nil { + return 0, err + } + + return 40, nil +} + +// EntryList implements sort.Interface allowing sorting in increasing order. +type EntryList []Entry + +func (p EntryList) Len() int { return len(p) } +func (p EntryList) Less(i, j int) bool { return p[i].Hash.String() < p[j].Hash.String() } +func (p EntryList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p EntryList) Sort() { sort.Sort(p) } diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go new file mode 100644 index 0000000000000000000000000000000000000000..5a718f3f8f5ff014e12e77d4c8ed3b555b888554 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go @@ -0,0 +1,62 @@ +package idxfile + +import "gopkg.in/src-d/go-git.v4/plumbing" + +const ( + // VersionSupported is the only idx version supported. + VersionSupported = 2 +) + +var ( + idxHeader = []byte{255, 't', 'O', 'c'} +) + +// Idxfile is the in memory representation of an idx file. +type Idxfile struct { + Version uint32 + Fanout [255]uint32 + ObjectCount uint32 + Entries EntryList + PackfileChecksum [20]byte + IdxChecksum [20]byte +} + +// Entry is the in memory representation of an object entry in the idx file. +type Entry struct { + Hash plumbing.Hash + CRC32 uint32 + Offset uint64 +} + +// Add adds a new Entry with the given values to the Idxfile. +func (idx *Idxfile) Add(h plumbing.Hash, offset uint64, crc32 uint32) { + idx.Entries = append(idx.Entries, Entry{ + Hash: h, + Offset: offset, + CRC32: crc32, + }) +} + +func (idx *Idxfile) isValid() bool { + fanout := idx.calculateFanout() + for k, c := range idx.Fanout { + if fanout[k] != c { + return false + } + } + + return true +} + +func (idx *Idxfile) calculateFanout() [256]uint32 { + fanout := [256]uint32{} + for _, e := range idx.Entries { + fanout[e.Hash[0]]++ + } + + for i := 1; i < 256; i++ { + fanout[i] += fanout[i-1] + } + + return fanout +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go new file mode 100644 index 0000000000000000000000000000000000000000..4bc5af125ead80150828080f0b653569b4a6a222 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go @@ -0,0 +1,454 @@ +package index + +import ( + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "io/ioutil" + "strconv" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +var ( + // DecodeVersionSupported is the range of supported index versions + DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} + + // ErrMalformedSignature is returned by Decode when the index header file is + // malformed + ErrMalformedSignature = errors.New("malformed index signature file") + // ErrInvalidChecksum is returned by Decode if the SHA1 hash missmatch with + // the read content + ErrInvalidChecksum = errors.New("invalid checksum") + + errUnknownExtension = errors.New("unknown extension") +) + +const ( + entryHeaderLength = 62 + entryExtended = 0x4000 + entryValid = 0x8000 + nameMask = 0xfff + intentToAddMask = 1 << 13 + skipWorkTreeMask = 1 << 14 +) + +// A Decoder reads and decodes index files from an input stream. +type Decoder struct { + r io.Reader + hash hash.Hash + lastEntry *Entry +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + h := sha1.New() + return &Decoder{ + r: io.TeeReader(r, h), + hash: h, + } +} + +// Decode reads the whole index object from its input and stores it in the +// value pointed to by idx. +func (d *Decoder) Decode(idx *Index) error { + var err error + idx.Version, err = validateHeader(d.r) + if err != nil { + return err + } + + entryCount, err := binary.ReadUint32(d.r) + if err != nil { + return err + } + + if err := d.readEntries(idx, int(entryCount)); err != nil { + return err + } + + return d.readExtensions(idx) +} + +func (d *Decoder) readEntries(idx *Index, count int) error { + for i := 0; i < count; i++ { + e, err := d.readEntry(idx) + if err != nil { + return err + } + + d.lastEntry = e + idx.Entries = append(idx.Entries, *e) + } + + return nil +} + +func (d *Decoder) readEntry(idx *Index) (*Entry, error) { + e := &Entry{} + + var msec, mnsec, sec, nsec uint32 + var flags uint16 + + flow := []interface{}{ + &sec, &nsec, + &msec, &mnsec, + &e.Dev, + &e.Inode, + &e.Mode, + &e.UID, + &e.GID, + &e.Size, + &e.Hash, + &flags, + } + + if err := binary.Read(d.r, flow...); err != nil { + return nil, err + } + + read := entryHeaderLength + + if sec != 0 || nsec != 0 { + e.CreatedAt = time.Unix(int64(sec), int64(nsec)) + } + + if msec != 0 || mnsec != 0 { + e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) + } + + e.Stage = Stage(flags>>12) & 0x3 + + if flags&entryExtended != 0 { + extended, err := binary.ReadUint16(d.r) + if err != nil { + return nil, err + } + + read += 2 + e.IntentToAdd = extended&intentToAddMask != 0 + e.SkipWorktree = extended&skipWorkTreeMask != 0 + } + + if err := d.readEntryName(idx, e, flags); err != nil { + return nil, err + } + + return e, d.padEntry(idx, e, read) +} + +func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { + var name string + var err error + + switch idx.Version { + case 2, 3: + len := flags & nameMask + name, err = d.doReadEntryName(len) + case 4: + name, err = d.doReadEntryNameV4() + default: + return ErrUnsupportedVersion + } + + if err != nil { + return err + } + + e.Name = name + return nil +} + +func (d *Decoder) doReadEntryNameV4() (string, error) { + l, err := binary.ReadVariableWidthInt(d.r) + if err != nil { + return "", err + } + + var base string + if d.lastEntry != nil { + base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] + } + + name, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return "", err + } + + return base + string(name), nil +} + +func (d *Decoder) doReadEntryName(len uint16) (string, error) { + name := make([]byte, len) + if err := binary.Read(d.r, &name); err != nil { + return "", err + } + + return string(name), nil +} + +// Index entries are padded out to the next 8 byte alignment +// for historical reasons related to how C Git read the files. +func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { + if idx.Version == 4 { + return nil + } + + entrySize := read + len(e.Name) + padLen := 8 - entrySize%8 + if _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)); err != nil { + return err + } + + return nil +} + +func (d *Decoder) readExtensions(idx *Index) error { + // TODO: support 'Split index' and 'Untracked cache' extensions, take in + // count that they are not supported by jgit or libgit + + var expected []byte + var err error + + var header [4]byte + for { + expected = d.hash.Sum(nil) + + var n int + if n, err = io.ReadFull(d.r, header[:]); err != nil { + if n == 0 { + err = io.EOF + } + + break + } + + err = d.readExtension(idx, header[:]) + if err != nil { + break + } + } + + if err != errUnknownExtension { + return err + } + + return d.readChecksum(expected, header) +} + +func (d *Decoder) readExtension(idx *Index, header []byte) error { + switch { + case bytes.Equal(header, treeExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.Cache = &Tree{} + d := &treeExtensionDecoder{r} + if err := d.Decode(idx.Cache); err != nil { + return err + } + case bytes.Equal(header, resolveUndoExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.ResolveUndo = &ResolveUndo{} + d := &resolveUndoDecoder{r} + if err := d.Decode(idx.ResolveUndo); err != nil { + return err + } + default: + return errUnknownExtension + } + + return nil +} + +func (d *Decoder) getExtensionReader() (io.Reader, error) { + len, err := binary.ReadUint32(d.r) + if err != nil { + return nil, err + } + + return &io.LimitedReader{R: d.r, N: int64(len)}, nil +} + +func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { + var h plumbing.Hash + copy(h[:4], alreadyRead[:]) + + if err := binary.Read(d.r, h[4:]); err != nil { + return err + } + + if bytes.Compare(h[:], expected) != 0 { + return ErrInvalidChecksum + } + + return nil +} + +func validateHeader(r io.Reader) (version uint32, err error) { + var s = make([]byte, 4) + if _, err := io.ReadFull(r, s); err != nil { + return 0, err + } + + if !bytes.Equal(s, indexSignature) { + return 0, ErrMalformedSignature + } + + version, err = binary.ReadUint32(r) + if err != nil { + return 0, err + } + + if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { + return 0, ErrUnsupportedVersion + } + + return +} + +type treeExtensionDecoder struct { + r io.Reader +} + +func (d *treeExtensionDecoder) Decode(t *Tree) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if e == nil { + continue + } + + t.Entries = append(t.Entries, *e) + } +} + +func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { + e := &TreeEntry{} + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + count, err := binary.ReadUntil(d.r, ' ') + if err != nil { + return nil, err + } + + i, err := strconv.Atoi(string(count)) + if err != nil { + return nil, err + } + + // An entry can be in an invalidated state and is represented by having a + // negative number in the entry_count field. + if i == -1 { + return nil, nil + } + + e.Entries = i + trees, err := binary.ReadUntil(d.r, '\n') + if err != nil { + return nil, err + } + + i, err = strconv.Atoi(string(trees)) + if err != nil { + return nil, err + } + + e.Trees = i + + if err := binary.Read(d.r, &e.Hash); err != nil { + return nil, err + } + + return e, nil +} + +type resolveUndoDecoder struct { + r io.Reader +} + +func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + ru.Entries = append(ru.Entries, *e) + } +} + +func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { + e := &ResolveUndoEntry{ + Stages: make(map[Stage]plumbing.Hash, 0), + } + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + for i := 0; i < 3; i++ { + if err := d.readStage(e, Stage(i+1)); err != nil { + return nil, err + } + } + + for s := range e.Stages { + var hash plumbing.Hash + if err := binary.Read(d.r, hash[:]); err != nil { + return nil, err + } + + e.Stages[s] = hash + } + + return e, nil +} + +func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { + ascii, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return err + } + + stage, err := strconv.ParseInt(string(ascii), 8, 64) + if err != nil { + return err + } + + if stage != 0 { + e.Stages[s] = plumbing.ZeroHash + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..d1e7b335bbe50a09be2aeb5a1edd5e7beba30177 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go @@ -0,0 +1,301 @@ +// Package index implements encoding and decoding of index format files. +// +// Git index format +// ================ +// +// == The Git index file has the following format +// +// All binary numbers are in network byte order. Version 2 is described +// here unless stated otherwise. +// +// - A 12-byte header consisting of +// +// 4-byte signature: +// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") +// +// 4-byte version number: +// The current supported versions are 2, 3 and 4. +// +// 32-bit number of index entries. +// +// - A number of sorted index entries (see below). +// +// - Extensions +// +// Extensions are identified by signature. Optional extensions can +// be ignored if Git does not understand them. +// +// Git currently supports cached tree and resolve undo extensions. +// +// 4-byte extension signature. If the first byte is 'A'..'Z' the +// extension is optional and can be ignored. +// +// 32-bit size of the extension +// +// Extension data +// +// - 160-bit SHA-1 over the content of the index file before this +// checksum. +// +// == Index entry +// +// Index entries are sorted in ascending order on the name field, +// interpreted as a string of unsigned bytes (i.e. memcmp() order, no +// localization, no special casing of directory separator '/'). Entries +// with the same name are sorted by their stage field. +// +// 32-bit ctime seconds, the last time a file's metadata changed +// this is stat(2) data +// +// 32-bit ctime nanosecond fractions +// this is stat(2) data +// +// 32-bit mtime seconds, the last time a file's data changed +// this is stat(2) data +// +// 32-bit mtime nanosecond fractions +// this is stat(2) data +// +// 32-bit dev +// this is stat(2) data +// +// 32-bit ino +// this is stat(2) data +// +// 32-bit mode, split into (high to low bits) +// +// 4-bit object type +// valid values in binary are 1000 (regular file), 1010 (symbolic link) +// and 1110 (gitlink) +// +// 3-bit unused +// +// 9-bit unix permission. Only 0755 and 0644 are valid for regular files. +// Symbolic links and gitlinks have value 0 in this field. +// +// 32-bit uid +// this is stat(2) data +// +// 32-bit gid +// this is stat(2) data +// +// 32-bit file size +// This is the on-disk size from stat(2), truncated to 32-bit. +// +// 160-bit SHA-1 for the represented object +// +// A 16-bit 'flags' field split into (high to low bits) +// +// 1-bit assume-valid flag +// +// 1-bit extended flag (must be zero in version 2) +// +// 2-bit stage (during merge) +// +// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF +// is stored in this field. +// +// (Version 3 or later) A 16-bit field, only applicable if the +// "extended flag" above is 1, split into (high to low bits). +// +// 1-bit reserved for future +// +// 1-bit skip-worktree flag (used by sparse checkout) +// +// 1-bit intent-to-add flag (used by "git add -N") +// +// 13-bit unused, must be zero +// +// Entry path name (variable length) relative to top level directory +// (without leading slash). '/' is used as path separator. The special +// path components ".", ".." and ".git" (without quotes) are disallowed. +// Trailing slash is also disallowed. +// +// The exact encoding is undefined, but the '.' and '/' characters +// are encoded in 7-bit ASCII and the encoding cannot contain a NUL +// byte (iow, this is a UNIX pathname). +// +// (Version 4) In version 4, the entry path name is prefix-compressed +// relative to the path name for the previous entry (the very first +// entry is encoded as if the path name for the previous entry is an +// empty string). At the beginning of an entry, an integer N in the +// variable width encoding (the same encoding as the offset is encoded +// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed +// by a NUL-terminated string S. Removing N bytes from the end of the +// path name for the previous entry, and replacing it with the string S +// yields the path name for this entry. +// +// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes +// while keeping the name NUL-terminated. +// +// (Version 4) In version 4, the padding after the pathname does not +// exist. +// +// Interpretation of index entries in split index mode is completely +// different. See below for details. +// +// == Extensions +// +// === Cached tree +// +// Cached tree extension contains pre-computed hashes for trees that can +// be derived from the index. It helps speed up tree object generation +// from index for a new commit. +// +// When a path is updated in index, the path must be invalidated and +// removed from tree cache. +// +// The signature for this extension is { 'T', 'R', 'E', 'E' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated path component (relative to its parent directory); +// +// - ASCII decimal number of entries in the index that is covered by the +// tree this entry represents (entry_count); +// +// - A space (ASCII 32); +// +// - ASCII decimal number that represents the number of subtrees this +// tree has; +// +// - A newline (ASCII 10); and +// +// - 160-bit object name for the object that would result from writing +// this span of index as a tree. +// +// An entry can be in an invalidated state and is represented by having +// a negative number in the entry_count field. In this case, there is no +// object name and the next entry starts immediately after the newline. +// When writing an invalid entry, -1 should always be used as entry_count. +// +// The entries are written out in the top-down, depth-first order. The +// first entry represents the root level of the repository, followed by the +// first subtree--let's call this A--of the root level (with its name +// relative to the root level), followed by the first subtree of A (with +// its name relative to A), ... +// +// === Resolve undo +// +// A conflict is represented in the index as a set of higher stage entries. +// When a conflict is resolved (e.g. with "git add path"), these higher +// stage entries will be removed and a stage-0 entry with proper resolution +// is added. +// +// When these higher stage entries are removed, they are saved in the +// resolve undo extension, so that conflicts can be recreated (e.g. with +// "git checkout -m"), in case users want to redo a conflict resolution +// from scratch. +// +// The signature for this extension is { 'R', 'E', 'U', 'C' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated pathname the entry describes (relative to the root of +// the repository, i.e. full pathname); +// +// - Three NUL-terminated ASCII octal numbers, entry mode of entries in +// stage 1 to 3 (a missing stage is represented by "0" in this field); +// and +// +// - At most three 160-bit object names of the entry in stages from 1 to 3 +// (nothing is written for a missing stage). +// +// === Split index +// +// In split index mode, the majority of index entries could be stored +// in a separate file. This extension records the changes to be made on +// top of that to produce the final index. +// +// The signature for this extension is { 'l', 'i', 'n', 'k' }. +// +// The extension consists of: +// +// - 160-bit SHA-1 of the shared index file. The shared index file path +// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the +// index does not require a shared index file. +// +// - An ewah-encoded delete bitmap, each bit represents an entry in the +// shared index. If a bit is set, its corresponding entry in the +// shared index will be removed from the final index. Note, because +// a delete operation changes index entry positions, but we do need +// original positions in replace phase, it's best to just mark +// entries for removal, then do a mass deletion after replacement. +// +// - An ewah-encoded replace bitmap, each bit represents an entry in +// the shared index. If a bit is set, its corresponding entry in the +// shared index will be replaced with an entry in this index +// file. All replaced entries are stored in sorted order in this +// index. The first "1" bit in the replace bitmap corresponds to the +// first index entry, the second "1" bit to the second entry and so +// on. Replaced entries may have empty path names to save space. +// +// The remaining index entries after replaced ones will be added to the +// final index. These added entries are also sorted by entry name then +// stage. +// +// == Untracked cache +// +// Untracked cache saves the untracked file list and necessary data to +// verify the cache. The signature for this extension is { 'U', 'N', +// 'T', 'R' }. +// +// The extension starts with +// +// - A sequence of NUL-terminated strings, preceded by the size of the +// sequence in variable width encoding. Each string describes the +// environment where the cache can be used. +// +// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from +// ctime field until "file size". +// +// - Stat data of plumbing.excludesfile +// +// - 32-bit dir_flags (see struct dir_struct) +// +// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file +// does not exist. +// +// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does +// not exist. +// +// - NUL-terminated string of per-dir exclude file name. This usually +// is ".gitignore". +// +// - The number of following directory blocks, variable width +// encoding. If this number is zero, the extension ends here with a +// following NUL. +// +// - A number of directory blocks in depth-first-search order, each +// consists of +// +// - The number of untracked entries, variable width encoding. +// +// - The number of sub-directory blocks, variable width encoding. +// +// - The directory name terminated by NUL. +// +// - A number of untracked file/dir names terminated by NUL. +// +// The remaining data of each directory block is grouped by type: +// +// - An ewah bitmap, the n-th bit marks whether the n-th directory has +// valid untracked cache entries. +// +// - An ewah bitmap, the n-th bit records "check-only" bit of +// read_directory_recursive() for the n-th directory. +// +// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data +// is valid for the n-th directory and exists in the next data. +// +// - An array of stat data. The n-th data corresponds with the n-th +// "one" bit in the previous ewah bitmap. +// +// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit +// in the previous ewah bitmap. +// +// - One NUL. +// Source https://www.kernel.org/pub/software/scm/git/docs/technical/index-format.txt +package index diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..d568f15c5d3bc6c6da9ea2ba8159d7494aa006b0 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go @@ -0,0 +1,150 @@ +package index + +import ( + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "sort" + "time" + + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +var ( + // EncodeVersionSupported is the range of supported index versions + EncodeVersionSupported uint32 = 2 + + // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with + // negative timestamp values + ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") +) + +// An Encoder writes an Index to an output stream. +type Encoder struct { + w io.Writer + hash hash.Hash +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode writes the Index to the stream of the encoder. +func (e *Encoder) Encode(idx *Index) error { + // TODO: support versions v3 and v4 + // TODO: support extensions + if idx.Version != EncodeVersionSupported { + return ErrUnsupportedVersion + } + + if err := e.encodeHeader(idx); err != nil { + return err + } + + if err := e.encodeEntries(idx); err != nil { + return err + } + + return e.encodeFooter() +} + +func (e *Encoder) encodeHeader(idx *Index) error { + return binary.Write(e.w, + indexSignature, + idx.Version, + uint32(len(idx.Entries)), + ) +} + +func (e *Encoder) encodeEntries(idx *Index) error { + sort.Sort(byName(idx.Entries)) + + for _, entry := range idx.Entries { + if err := e.encodeEntry(&entry); err != nil { + return err + } + + wrote := entryHeaderLength + len(entry.Name) + if err := e.padEntry(wrote); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeEntry(entry *Entry) error { + if entry.IntentToAdd || entry.SkipWorktree { + return ErrUnsupportedVersion + } + + sec, nsec, err := e.timeToUint32(&entry.CreatedAt) + if err != nil { + return err + } + + msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) + if err != nil { + return err + } + + flags := uint16(entry.Stage&0x3) << 12 + if l := len(entry.Name); l < nameMask { + flags |= uint16(l) + } else { + flags |= nameMask + } + + flow := []interface{}{ + sec, nsec, + msec, mnsec, + entry.Dev, + entry.Inode, + entry.Mode, + entry.UID, + entry.GID, + entry.Size, + entry.Hash[:], + flags, + } + + if err := binary.Write(e.w, flow...); err != nil { + return err + } + + return binary.Write(e.w, []byte(entry.Name)) +} + +func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { + if t.IsZero() { + return 0, 0, nil + } + + if t.Unix() < 0 || t.UnixNano() < 0 { + return 0, 0, ErrInvalidTimestamp + } + + return uint32(t.Unix()), uint32(t.Nanosecond()), nil +} + +func (e *Encoder) padEntry(wrote int) error { + padLen := 8 - wrote%8 + + _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) + return err +} + +func (e *Encoder) encodeFooter() error { + return binary.Write(e.w, e.hash.Sum(nil)) +} + +type byName []Entry + +func (l byName) Len() int { return len(l) } +func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go new file mode 100644 index 0000000000000000000000000000000000000000..ee50efde6fc42db110be210f4fdb4721326341bc --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go @@ -0,0 +1,113 @@ +package index + +import ( + "errors" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/filemode" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the idxindex file + // version is not supported. + ErrUnsupportedVersion = errors.New("Unsuported version") + + indexSignature = []byte{'D', 'I', 'R', 'C'} + treeExtSignature = []byte{'T', 'R', 'E', 'E'} + resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} +) + +// Stage during merge +type Stage int + +const ( + // Merged is the default stage, fully merged + Merged Stage = 1 + // AncestorMode is the base revision + AncestorMode Stage = 1 + // OurMode is the first tree revision, ours + OurMode Stage = 2 + // TheirMode is the second tree revision, theirs + TheirMode Stage = 3 +) + +// Index contains the information about which objects are currently checked out +// in the worktree, having information about the working files. Changes in +// worktree are detected using this Index. The Index is also used during merges +type Index struct { + // Version is index version + Version uint32 + // Entries collection of entries represented by this Index. The order of + // this collection is not guaranteed + Entries []Entry + // Cache represents the 'Cached tree' extension + Cache *Tree + // ResolveUndo represents the 'Resolve undo' extension + ResolveUndo *ResolveUndo +} + +// Entry represents a single file (or stage of a file) in the cache. An entry +// represents exactly one stage of a file. If a file path is unmerged then +// multiple Entry instances may appear for the same path name. +type Entry struct { + // Hash is the SHA1 of the represented file + Hash plumbing.Hash + // Name is the Entry path name relative to top level directory + Name string + // CreatedAt time when the tracked path was created + CreatedAt time.Time + // ModifiedAt time when the tracked path was changed + ModifiedAt time.Time + // Dev and Inode of the tracked path + Dev, Inode uint32 + // Mode of the path + Mode filemode.FileMode + // UID and GID, userid and group id of the owner + UID, GID uint32 + // Size is the length in bytes for regular files + Size uint32 + // Stage on a merge is defines what stage is representing this entry + // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging + Stage Stage + // SkipWorktree used in sparse checkouts + // https://git-scm.com/docs/git-read-tree#_sparse_checkout + SkipWorktree bool + // IntentToAdd record only the fact that the path will be added later + // https://git-scm.com/docs/git-add ("git add -N") + IntentToAdd bool +} + +// Tree contains pre-computed hashes for trees that can be derived from the +// index. It helps speed up tree object generation from index for a new commit. +type Tree struct { + Entries []TreeEntry +} + +// TreeEntry entry of a cached Tree +type TreeEntry struct { + // Path component (relative to its parent directory) + Path string + // Entries is the number of entries in the index that is covered by the tree + // this entry represents. + Entries int + // Trees is the number that represents the number of subtrees this tree has + Trees int + // Hash object name for the object that would result from writing this span + // of index as a tree. + Hash plumbing.Hash +} + +// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), +// these higher stage entries are removed and a stage-0 entry with proper +// resolution is added. When these higher stage entries are removed, they are +// saved in the resolve undo extension. +type ResolveUndo struct { + Entries []ResolveUndoEntry +} + +// ResolveUndoEntry contains the information about a conflict when is resolved +type ResolveUndoEntry struct { + Path string + Stages map[Stage]plumbing.Hash +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..a7145160ae0e43e93add96a5deb810c40ae8597f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go @@ -0,0 +1,2 @@ +// Package objfile implements encoding and decoding of object files. +package objfile diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go new file mode 100644 index 0000000000000000000000000000000000000000..e7e119cb1f9e10712945d9a7352d20fd7aabab53 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go @@ -0,0 +1,118 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" +) + +var ( + ErrClosed = errors.New("objfile: already closed") + ErrHeader = errors.New("objfile: invalid header") + ErrNegativeSize = errors.New("objfile: negative object size") +) + +// Reader reads and decodes compressed objfile data from a provided io.Reader. +// Reader implements io.ReadCloser. Close should be called when finished with +// the Reader. Close will not close the underlying io.Reader. +type Reader struct { + multi io.Reader + zlib io.ReadCloser + hasher plumbing.Hasher +} + +// NewReader returns a new Reader reading from r. +func NewReader(r io.Reader) (*Reader, error) { + zlib, err := zlib.NewReader(r) + if err != nil { + return nil, packfile.ErrZLib.AddDetails(err.Error()) + } + + return &Reader{ + zlib: zlib, + }, nil +} + +// Header reads the type and the size of object, and prepares the reader for read +func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { + var raw []byte + raw, err = r.readUntil(' ') + if err != nil { + return + } + + t, err = plumbing.ParseObjectType(string(raw)) + if err != nil { + return + } + + raw, err = r.readUntil(0) + if err != nil { + return + } + + size, err = strconv.ParseInt(string(raw), 10, 64) + if err != nil { + err = ErrHeader + return + } + + defer r.prepareForRead(t, size) + return +} + +// readSlice reads one byte at a time from r until it encounters delim or an +// error. +func (r *Reader) readUntil(delim byte) ([]byte, error) { + var buf [1]byte + value := make([]byte, 0, 16) + for { + if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { + if err == io.EOF { + return nil, ErrHeader + } + return nil, err + } + + if buf[0] == delim { + return value, nil + } + + value = append(value, buf[0]) + } +} + +func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { + r.hasher = plumbing.NewHasher(t, size) + r.multi = io.TeeReader(r.zlib, r.hasher) +} + +// Read reads len(p) bytes into p from the object data stream. It returns +// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even +// if Read returns n < len(p), it may use all of p as scratch space during the +// call. +// +// If Read encounters the end of the data stream it will return err == io.EOF, +// either in the current call if n > 0 or in a subsequent call. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.multi.Read(p) +} + +// Hash returns the hash of the object data stream that has been read so far. +func (r *Reader) Hash() plumbing.Hash { + return r.hasher.Sum() +} + +// Close releases any resources consumed by the Reader. Calling Close does not +// close the wrapped io.Reader originally passed to NewReader. +func (r *Reader) Close() error { + if err := r.zlib.Close(); err != nil { + return err + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go new file mode 100644 index 0000000000000000000000000000000000000000..5555243401d9e0be3682e9149d79bc2fbc344930 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go @@ -0,0 +1,109 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +var ( + ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") +) + +// Writer writes and encodes data in compressed objfile format to a provided +// io.Writer. Close should be called when finished with the Writer. Close will +// not close the underlying io.Writer. +type Writer struct { + raw io.Writer + zlib io.WriteCloser + hasher plumbing.Hasher + multi io.Writer + + closed bool + pending int64 // number of unwritten bytes +} + +// NewWriter returns a new Writer writing to w. +// +// The returned Writer implements io.WriteCloser. Close should be called when +// finished with the Writer. Close will not close the underlying io.Writer. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + raw: w, + zlib: zlib.NewWriter(w), + } +} + +// WriteHeader writes the type and the size and prepares to accept the object's +// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a +// negative size is provided, ErrNegativeSize is returned. +func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { + if !t.Valid() { + return plumbing.ErrInvalidType + } + if size < 0 { + return ErrNegativeSize + } + + b := t.Bytes() + b = append(b, ' ') + b = append(b, []byte(strconv.FormatInt(size, 10))...) + b = append(b, 0) + + defer w.prepareForWrite(t, size) + _, err := w.zlib.Write(b) + + return err +} + +func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { + w.pending = size + + w.hasher = plumbing.NewHasher(t, size) + w.multi = io.MultiWriter(w.zlib, w.hasher) +} + +// Write writes the object's contents. Write returns the error ErrOverflow if +// more than size bytes are written after WriteHeader. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, ErrClosed + } + + overwrite := false + if int64(len(p)) > w.pending { + p = p[0:w.pending] + overwrite = true + } + + n, err = w.multi.Write(p) + w.pending -= int64(n) + if err == nil && overwrite { + err = ErrOverflow + return + } + + return +} + +// Hash returns the hash of the object data stream that has been written so far. +// It can be called before or after Close. +func (w *Writer) Hash() plumbing.Hash { + return w.hasher.Sum() // Not yet closed, return hash of data written so far +} + +// Close releases any resources consumed by the Writer. +// +// Calling Close does not close the wrapped io.Writer originally passed to +// NewWriter. +func (w *Writer) Close() error { + if err := w.zlib.Close(); err != nil { + return err + } + + w.closed = true + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go new file mode 100644 index 0000000000000000000000000000000000000000..6402cd071ceaea10a29293503ce9db32e908315c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go @@ -0,0 +1,45 @@ +package packfile + +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +var signature = []byte{'P', 'A', 'C', 'K'} + +const ( + // VersionSupported is the packfile version supported by this package + VersionSupported uint32 = 2 + + firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length + lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length + maskFirstLength = 15 // 0000 1111 + maskContinue = 0x80 // 1000 0000 + maskLength = uint8(127) // 0111 1111 + maskType = uint8(112) // 0111 0000 +) + +// UpdateObjectStorage updates the given storer.EncodedObjectStorer with the contents of the +// packfile. +func UpdateObjectStorage(s storer.EncodedObjectStorer, packfile io.Reader) error { + if sw, ok := s.(storer.PackfileWriter); ok { + w, err := sw.PackfileWriter() + if err != nil { + return err + } + + defer w.Close() + _, err = io.Copy(w, packfile) + return err + } + + stream := NewScanner(packfile) + d, err := NewDecoder(stream, s) + if err != nil { + return err + } + + _, err = d.Decode() + return err +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/decoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/decoder.go new file mode 100644 index 0000000000000000000000000000000000000000..f3328efb7689d4b45e06caf452cbb586f2c83499 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/decoder.go @@ -0,0 +1,459 @@ +package packfile + +import ( + "bytes" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/cache" + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +// Format specifies if the packfile uses ref-deltas or ofs-deltas. +type Format int + +// Possible values of the Format type. +const ( + UnknownFormat Format = iota + OFSDeltaFormat + REFDeltaFormat +) + +var ( + // ErrMaxObjectsLimitReached is returned by Decode when the number + // of objects in the packfile is higher than + // Decoder.MaxObjectsLimit. + ErrMaxObjectsLimitReached = NewError("max. objects limit reached") + // ErrInvalidObject is returned by Decode when an invalid object is + // found in the packfile. + ErrInvalidObject = NewError("invalid git object") + // ErrPackEntryNotFound is returned by Decode when a reference in + // the packfile references and unknown object. + ErrPackEntryNotFound = NewError("can't find a pack entry") + // ErrZLib is returned by Decode when there was an error unzipping + // the packfile contents. + ErrZLib = NewError("zlib reading error") + // ErrCannotRecall is returned by RecallByOffset or RecallByHash if the object + // to recall cannot be returned. + ErrCannotRecall = NewError("cannot recall object") + // ErrResolveDeltasNotSupported is returned if a NewDecoder is used with a + // non-seekable scanner and without a plumbing.ObjectStorage + ErrResolveDeltasNotSupported = NewError("resolve delta is not supported") + // ErrNonSeekable is returned if a ReadObjectAt method is called without a + // seekable scanner + ErrNonSeekable = NewError("non-seekable scanner") + // ErrRollback error making Rollback over a transaction after an error + ErrRollback = NewError("rollback error, during set error") + // ErrAlreadyDecoded is returned if NewDecoder is called for a second time + ErrAlreadyDecoded = NewError("packfile was already decoded") +) + +// Decoder reads and decodes packfiles from an input Scanner, if an ObjectStorer +// was provided the decoded objects are store there. If not the decode object +// is destroyed. The Offsets and CRCs are calculated whether an +// ObjectStorer was provided or not. +type Decoder struct { + s *Scanner + o storer.EncodedObjectStorer + tx storer.Transaction + + isDecoded bool + offsetToHash map[int64]plumbing.Hash + hashToOffset map[plumbing.Hash]int64 + crcs map[plumbing.Hash]uint32 + + offsetToType map[int64]plumbing.ObjectType + decoderType plumbing.ObjectType + + cache cache.Object +} + +// NewDecoder returns a new Decoder that decodes a Packfile using the given +// Scanner and stores the objects in the provided EncodedObjectStorer. ObjectStorer can be nil, in this +// If the passed EncodedObjectStorer is nil, objects are not stored, but +// offsets on the Packfile and CRCs are calculated. +// +// If EncodedObjectStorer is nil and the Scanner is not Seekable, ErrNonSeekable is +// returned. +// +// If the ObjectStorer implements storer.Transactioner, a transaction is created +// during the Decode execution. If anything fails, Rollback is called +func NewDecoder(s *Scanner, o storer.EncodedObjectStorer) (*Decoder, error) { + return NewDecoderForType(s, o, plumbing.AnyObject) +} + +// NewDecoderForType returns a new Decoder but in this case for a specific object type. +// When an object is read using this Decoder instance and it is not of the same type of +// the specified one, nil will be returned. This is intended to avoid the content +// deserialization of all the objects +func NewDecoderForType(s *Scanner, o storer.EncodedObjectStorer, + t plumbing.ObjectType) (*Decoder, error) { + + if t == plumbing.OFSDeltaObject || + t == plumbing.REFDeltaObject || + t == plumbing.InvalidObject { + return nil, plumbing.ErrInvalidType + } + + if !canResolveDeltas(s, o) { + return nil, ErrResolveDeltasNotSupported + } + + return &Decoder{ + s: s, + o: o, + + offsetToHash: make(map[int64]plumbing.Hash, 0), + hashToOffset: make(map[plumbing.Hash]int64, 0), + crcs: make(map[plumbing.Hash]uint32, 0), + + offsetToType: make(map[int64]plumbing.ObjectType, 0), + decoderType: t, + + cache: cache.NewObjectFIFO(cache.MaxSize), + }, nil +} + +func canResolveDeltas(s *Scanner, o storer.EncodedObjectStorer) bool { + return s.IsSeekable || o != nil +} + +// Decode reads a packfile and stores it in the value pointed to by s. The +// offsets and the CRCs are calculated by this method +func (d *Decoder) Decode() (checksum plumbing.Hash, err error) { + defer func() { d.isDecoded = true }() + + if d.isDecoded { + return plumbing.ZeroHash, ErrAlreadyDecoded + } + + if err := d.doDecode(); err != nil { + return plumbing.ZeroHash, err + } + + return d.s.Checksum() +} + +func (d *Decoder) doDecode() error { + _, count, err := d.s.Header() + if err != nil { + return err + } + + _, isTxStorer := d.o.(storer.Transactioner) + switch { + case d.o == nil: + return d.decodeObjects(int(count)) + case isTxStorer: + return d.decodeObjectsWithObjectStorerTx(int(count)) + default: + return d.decodeObjectsWithObjectStorer(int(count)) + } +} + +func (d *Decoder) decodeObjects(count int) error { + for i := 0; i < count; i++ { + if _, err := d.DecodeObject(); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeObjectsWithObjectStorer(count int) error { + for i := 0; i < count; i++ { + obj, err := d.DecodeObject() + if err != nil { + return err + } + + if _, err := d.o.SetEncodedObject(obj); err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeObjectsWithObjectStorerTx(count int) error { + d.tx = d.o.(storer.Transactioner).Begin() + + for i := 0; i < count; i++ { + obj, err := d.DecodeObject() + if err != nil { + return err + } + + if _, err := d.tx.SetEncodedObject(obj); err != nil { + if rerr := d.tx.Rollback(); rerr != nil { + return ErrRollback.AddDetails( + "error: %s, during tx.Set error: %s", rerr, err, + ) + } + + return err + } + + } + + return d.tx.Commit() +} + +// DecodeObject reads the next object from the scanner and returns it. This +// method can be used in replacement of the Decode method, to work in a +// interactive way. If you created a new decoder instance using NewDecoderForType +// constructor, if the object decoded is not equals to the specified one, nil will +// be returned +func (d *Decoder) DecodeObject() (plumbing.EncodedObject, error) { + h, err := d.s.NextObjectHeader() + if err != nil { + return nil, err + } + + if d.decoderType == plumbing.AnyObject { + return d.decodeByHeader(h) + } + + return d.decodeIfSpecificType(h) +} + +func (d *Decoder) decodeIfSpecificType(h *ObjectHeader) (plumbing.EncodedObject, error) { + var realType plumbing.ObjectType + var err error + switch h.Type { + case plumbing.OFSDeltaObject: + realType, err = d.ofsDeltaType(h.OffsetReference) + case plumbing.REFDeltaObject: + realType, err = d.refDeltaType(h.Reference) + default: + realType = h.Type + } + + if err != nil { + return nil, err + } + + d.offsetToType[h.Offset] = realType + + if d.decoderType == realType { + return d.decodeByHeader(h) + } + + return nil, nil +} + +func (d *Decoder) ofsDeltaType(offset int64) (plumbing.ObjectType, error) { + t, ok := d.offsetToType[offset] + if !ok { + return plumbing.InvalidObject, plumbing.ErrObjectNotFound + } + + return t, nil +} + +func (d *Decoder) refDeltaType(ref plumbing.Hash) (plumbing.ObjectType, error) { + if o, ok := d.hashToOffset[ref]; ok { + return d.ofsDeltaType(o) + } + + obj, err := d.o.EncodedObject(plumbing.AnyObject, ref) + if err != nil { + return plumbing.InvalidObject, err + } + + return obj.Type(), nil +} + +func (d *Decoder) decodeByHeader(h *ObjectHeader) (plumbing.EncodedObject, error) { + obj := d.newObject() + obj.SetSize(h.Length) + obj.SetType(h.Type) + var crc uint32 + var err error + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + crc, err = d.fillRegularObjectContent(obj) + case plumbing.REFDeltaObject: + crc, err = d.fillREFDeltaObjectContent(obj, h.Reference) + case plumbing.OFSDeltaObject: + crc, err = d.fillOFSDeltaObjectContent(obj, h.OffsetReference) + default: + err = ErrInvalidObject.AddDetails("type %q", h.Type) + } + + if err != nil { + return obj, err + } + + hash := obj.Hash() + d.setOffset(hash, h.Offset) + d.setCRC(hash, crc) + + return obj, nil +} + +func (d *Decoder) newObject() plumbing.EncodedObject { + if d.o == nil { + return &plumbing.MemoryObject{} + } + + return d.o.NewEncodedObject() +} + +// DecodeObjectAt reads an object at the given location. Every EncodedObject +// returned is added into a internal index. This is intended to be able to regenerate +// objects from deltas (offset deltas or reference deltas) without an package index +// (.idx file). If Decode wasn't called previously objects offset should provided +// using the SetOffsets method. +func (d *Decoder) DecodeObjectAt(offset int64) (plumbing.EncodedObject, error) { + if !d.s.IsSeekable { + return nil, ErrNonSeekable + } + + beforeJump, err := d.s.Seek(offset) + if err != nil { + return nil, err + } + + defer func() { + _, seekErr := d.s.Seek(beforeJump) + if err == nil { + err = seekErr + } + }() + + return d.DecodeObject() +} + +func (d *Decoder) fillRegularObjectContent(obj plumbing.EncodedObject) (uint32, error) { + w, err := obj.Writer() + if err != nil { + return 0, err + } + + _, crc, err := d.s.NextObject(w) + return crc, err +} + +func (d *Decoder) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) (uint32, error) { + buf := bytes.NewBuffer(nil) + _, crc, err := d.s.NextObject(buf) + if err != nil { + return 0, err + } + + base := d.cache.Get(ref) + + if base == nil { + base, err = d.recallByHash(ref) + if err != nil { + return 0, err + } + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + d.cache.Add(obj) + + return crc, err +} + +func (d *Decoder) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) (uint32, error) { + buf := bytes.NewBuffer(nil) + _, crc, err := d.s.NextObject(buf) + if err != nil { + return 0, err + } + + h := d.offsetToHash[offset] + var base plumbing.EncodedObject + if h != plumbing.ZeroHash { + base = d.cache.Get(h) + } + + if base == nil { + base, err = d.recallByOffset(offset) + if err != nil { + return 0, err + } + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + d.cache.Add(obj) + + return crc, err +} + +func (d *Decoder) setOffset(h plumbing.Hash, offset int64) { + d.offsetToHash[offset] = h + d.hashToOffset[h] = offset +} + +func (d *Decoder) setCRC(h plumbing.Hash, crc uint32) { + d.crcs[h] = crc +} + +func (d *Decoder) recallByOffset(o int64) (plumbing.EncodedObject, error) { + if d.s.IsSeekable { + return d.DecodeObjectAt(o) + } + + if h, ok := d.offsetToHash[o]; ok { + return d.recallByHashNonSeekable(h) + } + + return nil, plumbing.ErrObjectNotFound +} + +func (d *Decoder) recallByHash(h plumbing.Hash) (plumbing.EncodedObject, error) { + if d.s.IsSeekable { + if o, ok := d.hashToOffset[h]; ok { + return d.DecodeObjectAt(o) + } + } + + return d.recallByHashNonSeekable(h) +} + +// recallByHashNonSeekable if we are in a transaction the objects are read from +// the transaction, if not are directly read from the ObjectStorer +func (d *Decoder) recallByHashNonSeekable(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { + if d.tx != nil { + obj, err = d.tx.EncodedObject(plumbing.AnyObject, h) + } else { + obj, err = d.o.EncodedObject(plumbing.AnyObject, h) + } + + if err != plumbing.ErrObjectNotFound { + return obj, err + } + + return nil, plumbing.ErrObjectNotFound +} + +// SetOffsets sets the offsets, required when using the method DecodeObjectAt, +// without decoding the full packfile +func (d *Decoder) SetOffsets(offsets map[plumbing.Hash]int64) { + d.hashToOffset = offsets +} + +// Offsets returns the objects read offset, Decode method should be called +// before to calculate the Offsets +func (d *Decoder) Offsets() map[plumbing.Hash]int64 { + return d.hashToOffset +} + +// CRCs returns the CRC-32 for each read object. Decode method should be called +// before to calculate the CRCs +func (d *Decoder) CRCs() map[plumbing.Hash]uint32 { + return d.crcs +} + +// Close closes the Scanner. usually this mean that the whole reader is read and +// discarded +func (d *Decoder) Close() error { + d.cache.Clear() + + return d.s.Close() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go new file mode 100644 index 0000000000000000000000000000000000000000..a73a2091856e1250cf94dfbb87661f94e665e3fe --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go @@ -0,0 +1,169 @@ +package packfile + +import ( + "sort" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +const ( + // deltas based on deltas, how many steps we can do. + // 50 is the default value used in JGit + maxDepth = int64(50) +) + +// applyDelta is the set of object types that we should apply deltas +var applyDelta = map[plumbing.ObjectType]bool{ + plumbing.BlobObject: true, + plumbing.TreeObject: true, +} + +type deltaSelector struct { + storer storer.EncodedObjectStorer +} + +func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { + return &deltaSelector{s} +} + +// ObjectsToPack creates a list of ObjectToPack from the hashes provided, +// creating deltas if it's suitable, using an specific internal logic +func (dw *deltaSelector) ObjectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, error) { + otp, err := dw.objectsToPack(hashes) + if err != nil { + return nil, err + } + + dw.sort(otp) + + if err := dw.walk(otp); err != nil { + return nil, err + } + + return otp, nil +} + +func (dw *deltaSelector) objectsToPack(hashes []plumbing.Hash) ([]*ObjectToPack, error) { + var objectsToPack []*ObjectToPack + for _, h := range hashes { + o, err := dw.storer.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return nil, err + } + + objectsToPack = append(objectsToPack, newObjectToPack(o)) + } + + return objectsToPack, nil +} + +func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { + sort.Sort(byTypeAndSize(objectsToPack)) +} + +func (dw *deltaSelector) walk(objectsToPack []*ObjectToPack) error { + for i := 0; i < len(objectsToPack); i++ { + target := objectsToPack[i] + + // We only want to create deltas from specific types + if !applyDelta[target.Original.Type()] { + continue + } + + for j := i - 1; j >= 0; j-- { + base := objectsToPack[j] + // Objects must use only the same type as their delta base. + if base.Original.Type() != target.Original.Type() { + break + } + + if err := dw.tryToDeltify(base, target); err != nil { + return err + } + } + } + + return nil +} + +func (dw *deltaSelector) tryToDeltify(base, target *ObjectToPack) error { + // If the sizes are radically different, this is a bad pairing. + if target.Original.Size() < base.Original.Size()>>4 { + return nil + } + + msz := dw.deltaSizeLimit( + target.Object.Size(), + base.Depth, + target.Depth, + target.IsDelta(), + ) + + // Nearly impossible to fit useful delta. + if msz <= 8 { + return nil + } + + // If we have to insert a lot to make this work, find another. + if base.Original.Size()-target.Object.Size() > msz { + return nil + } + + // Now we can generate the delta using originals + delta, err := GetDelta(base.Original, target.Original) + if err != nil { + return err + } + + // if delta better than target + if delta.Size() < msz { + target.SetDelta(base, delta) + } + + return nil +} + +func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, + targetDepth int, targetDelta bool) int64 { + if !targetDelta { + // Any delta should be no more than 50% of the original size + // (for text files deflate of whole form should shrink 50%). + n := targetSize >> 1 + + // Evenly distribute delta size limits over allowed depth. + // If src is non-delta (depth = 0), delta <= 50% of original. + // If src is almost at limit (9/10), delta <= 10% of original. + return n * (maxDepth - int64(baseDepth)) / maxDepth + } + + // With a delta base chosen any new delta must be "better". + // Retain the distribution described above. + d := int64(targetDepth) + n := targetSize + + // If src is whole (depth=0) and base is near limit (depth=9/10) + // any delta using src can be 10x larger and still be better. + // + // If src is near limit (depth=9/10) and base is whole (depth=0) + // a new delta dependent on src must be 1/10th the size. + return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) +} + +type byTypeAndSize []*ObjectToPack + +func (a byTypeAndSize) Len() int { return len(a) } + +func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a byTypeAndSize) Less(i, j int) bool { + if a[i].Object.Type() < a[j].Object.Type() { + return false + } + + if a[i].Object.Type() > a[j].Object.Type() { + return true + } + + return a[i].Object.Size() > a[j].Object.Size() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff.go new file mode 100644 index 0000000000000000000000000000000000000000..ff34329c1d88524458dd9236f0280b042675c844 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff.go @@ -0,0 +1,397 @@ +package packfile + +/* + +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*/ + +// Code based on https://github.com/pmezard/go-difflib +// Removed unnecessary code for this use case and changed string inputs to byte + +type match struct { + A int + B int + Size int +} + +type opCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of bytes. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "<P>" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" <wink>. +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type sequenceMatcher struct { + a []byte + b []byte + b2j map[byte][]int + IsJunk func(byte) bool + autoJunk bool + bJunk map[byte]struct{} + matchingBlocks []match + fullBCount map[byte]int + bPopular map[byte]struct{} + opCodes []opCode +} + +func newMatcher(a, b []byte) *sequenceMatcher { + m := sequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *sequenceMatcher) SetSeqs(a, b []byte) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *sequenceMatcher) SetSeq1(a []byte) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *sequenceMatcher) SetSeq2(b []byte) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *sequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[byte][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[byte]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[byte]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *sequenceMatcher) isBJunk(s byte) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *sequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *sequenceMatcher) GetMatchingBlocks() []match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []match) []match + matchBlocks = func(alo, ahi, blo, bhi int, matched []match) []match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +const ( + tagReplace = 'r' + tagDelete = 'd' + tagInsert = 'i' + tagEqual = 'e' +) + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *sequenceMatcher) GetOpCodes() []opCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]opCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = tagReplace + } else if i < ai { + tag = tagDelete + } else if j < bj { + tag = tagInsert + } + if tag > 0 { + opCodes = append(opCodes, opCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, opCode{tagEqual, ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go new file mode 100644 index 0000000000000000000000000000000000000000..40d450f938d26b43d56e7d6c5e79000ae83c1fdb --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go @@ -0,0 +1,142 @@ +package packfile + +import ( + "io/ioutil" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and +// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for more info + +const ( + maxCopyLen = 0xffff +) + +// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, +// will be loaded into memory to be able to create the delta object. +// To generate target again, you will need the obtained object and "base" one. +// Error will be returned if base or target object cannot be read. +func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { + br, err := base.Reader() + if err != nil { + return nil, err + } + tr, err := target.Reader() + if err != nil { + return nil, err + } + + bb, err := ioutil.ReadAll(br) + if err != nil { + return nil, err + } + + tb, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + + db := DiffDelta(bb, tb) + delta := &plumbing.MemoryObject{} + _, err = delta.Write(db) + if err != nil { + return nil, err + } + + delta.SetSize(int64(len(db))) + delta.SetType(plumbing.OFSDeltaObject) + + return delta, nil +} + +// DiffDelta returns the delta that transforms baseBuf into targetBuf. +func DiffDelta(baseBuf []byte, targetBuf []byte) []byte { + var outBuff []byte + + outBuff = append(outBuff, deltaEncodeSize(len(baseBuf))...) + outBuff = append(outBuff, deltaEncodeSize(len(targetBuf))...) + + sm := newMatcher(baseBuf, targetBuf) + for _, op := range sm.GetOpCodes() { + switch { + case op.Tag == tagEqual: + copyStart := op.I1 + copyLen := op.I2 - op.I1 + for { + if copyLen <= 0 { + break + } + var toCopy int + if copyLen < maxCopyLen { + toCopy = copyLen + } else { + toCopy = maxCopyLen + } + + outBuff = append(outBuff, encodeCopyOperation(copyStart, toCopy)...) + copyStart += toCopy + copyLen -= toCopy + } + case op.Tag == tagReplace || op.Tag == tagInsert: + s := op.J2 - op.J1 + o := op.J1 + for { + if s <= 127 { + break + } + outBuff = append(outBuff, byte(127)) + outBuff = append(outBuff, targetBuf[o:o+127]...) + s -= 127 + o += 127 + } + outBuff = append(outBuff, byte(s)) + outBuff = append(outBuff, targetBuf[o:o+s]...) + } + } + + return outBuff +} + +func deltaEncodeSize(size int) []byte { + var ret []byte + c := size & 0x7f + size >>= 7 + for { + if size == 0 { + break + } + + ret = append(ret, byte(c|0x80)) + c = size & 0x7f + size >>= 7 + } + ret = append(ret, byte(c)) + + return ret +} + +func encodeCopyOperation(offset, length int) []byte { + code := 0x80 + var opcodes []byte + + var i uint + for i = 0; i < 4; i++ { + f := 0xff << (i * 8) + if offset&f != 0 { + opcodes = append(opcodes, byte(offset&f>>(i*8))) + code |= 0x01 << i + } + } + + for i = 0; i < 3; i++ { + f := 0xff << (i * 8) + if length&f != 0 { + opcodes = append(opcodes, byte(length&f>>(i*8))) + code |= 0x10 << i + } + } + + return append([]byte{byte(code)}, opcodes...) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..2882a7f378268ec96039ef98e78fea9463d4d26b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go @@ -0,0 +1,39 @@ +// Package packfile implements encoding and decoding of packfile format. +// +// == pack-*.pack files have the following format: +// +// - A header appears at the beginning and consists of the following: +// +// 4-byte signature: +// The signature is: {'P', 'A', 'C', 'K'} +// +// 4-byte version number (network byte order): +// GIT currently accepts version number 2 or 3 but +// generates version 2 only. +// +// 4-byte number of objects contained in the pack (network byte order) +// +// Observation: we cannot have more than 4G versions ;-) and +// more than 4G objects in a pack. +// +// - The header is followed by number of object entries, each of +// which looks like this: +// +// (undeltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// compressed data +// +// (deltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// 20-byte base object name +// compressed delta data +// +// Observation: length of each object is encoded in a variable +// length format and is not constrained to 32-bit or anything. +// +// - The trailer records 20-byte SHA1 checksum of all of the above. +// +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt +package packfile diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..ae8375247912dad219fe52485e7254e47c9325e5 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go @@ -0,0 +1,183 @@ +package packfile + +import ( + "compress/zlib" + "crypto/sha1" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +// Encoder gets the data from the storage and write it into the writer in PACK +// format +type Encoder struct { + selector *deltaSelector + w *offsetWriter + zw *zlib.Writer + hasher plumbing.Hasher + offsets map[plumbing.Hash]int64 + useRefDeltas bool +} + +// NewEncoder creates a new packfile encoder using a specific Writer and +// EncodedObjectStorer. By default deltas used to generate the packfile will be +// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. +func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { + h := plumbing.Hasher{ + Hash: sha1.New(), + } + mw := io.MultiWriter(w, h) + ow := newOffsetWriter(mw) + zw := zlib.NewWriter(mw) + return &Encoder{ + selector: newDeltaSelector(s), + w: ow, + zw: zw, + hasher: h, + offsets: make(map[plumbing.Hash]int64), + useRefDeltas: useRefDeltas, + } +} + +// Encode creates a packfile containing all the objects referenced in hashes +// and writes it to the writer in the Encoder. +func (e *Encoder) Encode(hashes []plumbing.Hash) (plumbing.Hash, error) { + objects, err := e.selector.ObjectsToPack(hashes) + if err != nil { + return plumbing.ZeroHash, err + } + + return e.encode(objects) +} + +func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { + if err := e.head(len(objects)); err != nil { + return plumbing.ZeroHash, err + } + + for _, o := range objects { + if err := e.entry(o); err != nil { + return plumbing.ZeroHash, err + } + } + + return e.footer() +} + +func (e *Encoder) head(numEntries int) error { + return binary.Write( + e.w, + signature, + int32(VersionSupported), + int32(numEntries), + ) +} + +func (e *Encoder) entry(o *ObjectToPack) error { + offset := e.w.Offset() + + if o.IsDelta() { + if err := e.writeDeltaHeader(o, offset); err != nil { + return err + } + } else { + if err := e.entryHead(o.Object.Type(), o.Object.Size()); err != nil { + return err + } + } + + // Save the position using the original hash, maybe a delta will need it + e.offsets[o.Original.Hash()] = offset + + e.zw.Reset(e.w) + or, err := o.Object.Reader() + if err != nil { + return err + } + _, err = io.Copy(e.zw, or) + if err != nil { + return err + } + + return e.zw.Close() +} + +func (e *Encoder) writeDeltaHeader(o *ObjectToPack, offset int64) error { + // Write offset deltas by default + t := plumbing.OFSDeltaObject + if e.useRefDeltas { + t = plumbing.REFDeltaObject + } + + if err := e.entryHead(t, o.Object.Size()); err != nil { + return err + } + + if e.useRefDeltas { + return e.writeRefDeltaHeader(o.Base.Original.Hash()) + } else { + return e.writeOfsDeltaHeader(offset, o.Base.Original.Hash()) + } +} + +func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { + return binary.Write(e.w, base) +} + +func (e *Encoder) writeOfsDeltaHeader(deltaOffset int64, base plumbing.Hash) error { + // because it is an offset delta, we need the base + // object position + offset, ok := e.offsets[base] + if !ok { + return fmt.Errorf("delta base not found. Hash: %v", base) + } + + return binary.WriteVariableWidthInt(e.w, deltaOffset-offset) +} + +func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { + t := int64(typeNum) + header := []byte{} + c := (t << firstLengthBits) | (size & maskFirstLength) + size >>= firstLengthBits + for { + if size == 0 { + break + } + header = append(header, byte(c|maskContinue)) + c = size & int64(maskLength) + size >>= lengthBits + } + + header = append(header, byte(c)) + _, err := e.w.Write(header) + + return err +} + +func (e *Encoder) footer() (plumbing.Hash, error) { + h := e.hasher.Sum() + return h, binary.Write(e.w, h) +} + +type offsetWriter struct { + w io.Writer + offset int64 +} + +func newOffsetWriter(w io.Writer) *offsetWriter { + return &offsetWriter{w: w} +} + +func (ow *offsetWriter) Write(p []byte) (n int, err error) { + n, err = ow.w.Write(p) + ow.offset += int64(n) + return n, err +} + +func (ow *offsetWriter) Offset() int64 { + return ow.offset +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go new file mode 100644 index 0000000000000000000000000000000000000000..c0b9163313109ee34b2aa0503de3cb337d8bc900 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go @@ -0,0 +1,30 @@ +package packfile + +import "fmt" + +// Error specifies errors returned during packfile parsing. +type Error struct { + reason, details string +} + +// NewError returns a new error. +func NewError(reason string) *Error { + return &Error{reason: reason} +} + +// Error returns a text representation of the error. +func (e *Error) Error() string { + if e.details == "" { + return e.reason + } + + return fmt.Sprintf("%s: %s", e.reason, e.details) +} + +// AddDetails adds details to an error, with additional text. +func (e *Error) AddDetails(format string, args ...interface{}) *Error { + return &Error{ + reason: e.reason, + details: fmt.Sprintf(format, args...), + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go new file mode 100644 index 0000000000000000000000000000000000000000..a3e99c031b0dbcebefdf604e7e885360d9bcbf28 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go @@ -0,0 +1,54 @@ +package packfile + +import "gopkg.in/src-d/go-git.v4/plumbing" + +// ObjectToPack is a representation of an object that is going to be into a +// pack file. +type ObjectToPack struct { + // The main object to pack, it could be any object, including deltas + Object plumbing.EncodedObject + // Base is the object that a delta is based on (it could be also another delta). + // If the main object is not a delta, Base will be null + Base *ObjectToPack + // Original is the object that we can generate applying the delta to + // Base, or the same object as EncodedObject in the case of a non-delta + // object. + Original plumbing.EncodedObject + // Depth is the amount of deltas needed to resolve to obtain Original + // (delta based on delta based on ...) + Depth int +} + +// newObjectToPack creates a correct ObjectToPack based on a non-delta object +func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: o, + Original: o, + } +} + +// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on +// his base (could be another delta), the delta target (in this case called original), +// and the delta Object itself +func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: delta, + Base: base, + Original: original, + Depth: base.Depth + 1, + } +} + +func (o *ObjectToPack) IsDelta() bool { + if o.Base != nil { + return true + } + + return false +} + +func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { + o.Object = delta + o.Base = base + o.Depth = base.Depth + 1 +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go new file mode 100644 index 0000000000000000000000000000000000000000..840f84098f6ccc77a9d8eff09fb5c84acb58de16 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go @@ -0,0 +1,181 @@ +package packfile + +import ( + "io/ioutil" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h +// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, +// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for details about the delta format. + +const deltaSizeMin = 4 + +// ApplyDelta writes to target the result of applying the modification deltas in delta to base. +func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) error { + r, err := base.Reader() + if err != nil { + return err + } + + w, err := target.Writer() + if err != nil { + return err + } + + src, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + dst := PatchDelta(src, delta) + target.SetSize(int64(len(dst))) + + if _, err := w.Write(dst); err != nil { + return err + } + + return nil +} + +// PatchDelta returns the result of applying the modification deltas in delta to src. +func PatchDelta(src, delta []byte) []byte { + if len(delta) < deltaSizeMin { + return nil + } + + srcSz, delta := decodeLEB128(delta) + if srcSz != uint(len(src)) { + return nil + } + + targetSz, delta := decodeLEB128(delta) + remainingTargetSz := targetSz + + var dest []byte + var cmd byte + for { + cmd = delta[0] + delta = delta[1:] + if isCopyFromSrc(cmd) { + var offset, sz uint + offset, delta = decodeOffset(cmd, delta) + sz, delta = decodeSize(cmd, delta) + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + break + } + dest = append(dest, src[offset:offset+sz]...) + remainingTargetSz -= sz + } else if isCopyFromDelta(cmd) { + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + break + } + dest = append(dest, delta[0:sz]...) + remainingTargetSz -= sz + delta = delta[sz:] + } else { + return nil + } + + if remainingTargetSz <= 0 { + break + } + } + + return dest +} + +// Decodes a number encoded as an unsigned LEB128 at the start of some +// binary data and returns the decoded number and the rest of the +// stream. +// +// This must be called twice on the delta data buffer, first to get the +// expected source buffer size, and again to get the target buffer size. +func decodeLEB128(input []byte) (uint, []byte) { + var num, sz uint + var b byte + for { + b = input[sz] + num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks + sz++ + + if uint(b)&continuation == 0 || sz == uint(len(input)) { + break + } + } + + return num, input[sz:] +} + +const ( + payload = 0x7f // 0111 1111 + continuation = 0x80 // 1000 0000 +) + +func isCopyFromSrc(cmd byte) bool { + return (cmd & 0x80) != 0 +} + +func isCopyFromDelta(cmd byte) bool { + return (cmd&0x80) == 0 && cmd != 0 +} + +func decodeOffset(cmd byte, delta []byte) (uint, []byte) { + var offset uint + if (cmd & 0x01) != 0 { + offset = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x02) != 0 { + offset |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x04) != 0 { + offset |= uint(delta[0]) << 16 + delta = delta[1:] + } + if (cmd & 0x08) != 0 { + offset |= uint(delta[0]) << 24 + delta = delta[1:] + } + + return offset, delta +} + +func decodeSize(cmd byte, delta []byte) (uint, []byte) { + var sz uint + if (cmd & 0x10) != 0 { + sz = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x20) != 0 { + sz |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x40) != 0 { + sz |= uint(delta[0]) << 16 + delta = delta[1:] + } + if sz == 0 { + sz = 0x10000 + } + + return sz, delta +} + +func invalidSize(sz, targetSz uint) bool { + return sz > targetSz +} + +func invalidOffsetSize(offset, sz, srcSz uint) bool { + return sumOverflows(offset, sz) || + offset+sz > srcSz +} + +func sumOverflows(a, b uint) bool { + return a+b < a +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go new file mode 100644 index 0000000000000000000000000000000000000000..d8cece6776b01a877cf364380303fdd841bd4050 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go @@ -0,0 +1,419 @@ +package packfile + +import ( + "bufio" + "bytes" + "compress/zlib" + "fmt" + "hash" + "hash/crc32" + "io" + "io/ioutil" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/binary" +) + +var ( + // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile + ErrEmptyPackfile = NewError("empty packfile") + // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. + ErrBadSignature = NewError("malformed pack file signature") + // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is + // different than VersionSupported. + ErrUnsupportedVersion = NewError("unsupported packfile version") + // ErrSeekNotSupported returned if seek is not support + ErrSeekNotSupported = NewError("not seek support") +) + +// ObjectHeader contains the information related to the object, this information +// is collected from the previous bytes to the content of the object. +type ObjectHeader struct { + Type plumbing.ObjectType + Offset int64 + Length int64 + Reference plumbing.Hash + OffsetReference int64 +} + +type Scanner struct { + r reader + zr readerResetter + crc hash.Hash32 + + // pendingObject is used to detect if an object has been read, or still + // is waiting to be read + pendingObject *ObjectHeader + version, objects uint32 + + // lsSeekable says if this scanner can do Seek or not, to have a Scanner + // seekable a r implementing io.Seeker is required + IsSeekable bool +} + +// NewScanner returns a new Scanner based on a reader, if the given reader +// implements io.ReadSeeker the Scanner will be also Seekable +func NewScanner(r io.Reader) *Scanner { + seeker, ok := r.(io.ReadSeeker) + if !ok { + seeker = &trackableReader{Reader: r} + } + + crc := crc32.NewIEEE() + return &Scanner{ + r: &teeReader{ + newByteReadSeeker(seeker), + crc, + }, + crc: crc, + IsSeekable: ok, + } +} + +// Header reads the whole packfile header (signature, version and object count). +// It returns the version and the object count and performs checks on the +// validity of the signature and the version fields. +func (s *Scanner) Header() (version, objects uint32, err error) { + if s.version != 0 { + return s.version, s.objects, nil + } + + sig, err := s.readSignature() + if err != nil { + if err == io.EOF { + err = ErrEmptyPackfile + } + + return + } + + if !s.isValidSignature(sig) { + err = ErrBadSignature + return + } + + version, err = s.readVersion() + s.version = version + if err != nil { + return + } + + if !s.isSupportedVersion(version) { + err = ErrUnsupportedVersion.AddDetails("%d", version) + return + } + + objects, err = s.readCount() + s.objects = objects + return +} + +// readSignature reads an returns the signature field in the packfile. +func (s *Scanner) readSignature() ([]byte, error) { + var sig = make([]byte, 4) + if _, err := io.ReadFull(s.r, sig); err != nil { + return []byte{}, err + } + + return sig, nil +} + +// isValidSignature returns if sig is a valid packfile signature. +func (s *Scanner) isValidSignature(sig []byte) bool { + return bytes.Equal(sig, signature) +} + +// readVersion reads and returns the version field of a packfile. +func (s *Scanner) readVersion() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// isSupportedVersion returns whether version v is supported by the parser. +// The current supported version is VersionSupported, defined above. +func (s *Scanner) isSupportedVersion(v uint32) bool { + return v == VersionSupported +} + +// readCount reads and returns the count of objects field of a packfile. +func (s *Scanner) readCount() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// NextObjectHeader returns the ObjectHeader for the next object in the reader +func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { + if err := s.doPending(); err != nil { + return nil, err + } + + s.crc.Reset() + + h := &ObjectHeader{} + s.pendingObject = h + + var err error + h.Offset, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h.Type, h.Length, err = s.readObjectTypeAndLength() + if err != nil { + return nil, err + } + + switch h.Type { + case plumbing.OFSDeltaObject: + no, err := binary.ReadVariableWidthInt(s.r) + if err != nil { + return nil, err + } + + h.OffsetReference = h.Offset - no + case plumbing.REFDeltaObject: + var err error + h.Reference, err = binary.ReadHash(s.r) + if err != nil { + return nil, err + } + } + + return h, nil +} + +func (s *Scanner) doPending() error { + if s.version == 0 { + var err error + s.version, s.objects, err = s.Header() + if err != nil { + return err + } + } + + return s.discardObjectIfNeeded() +} + +func (s *Scanner) discardObjectIfNeeded() error { + if s.pendingObject == nil { + return nil + } + + h := s.pendingObject + n, _, err := s.NextObject(ioutil.Discard) + if err != nil { + return err + } + + if n != h.Length { + return fmt.Errorf( + "error discarding object, discarded %d, expected %d", + n, h.Length, + ) + } + + return nil +} + +// ReadObjectTypeAndLength reads and returns the object type and the +// length field from an object entry in a packfile. +func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { + t, c, err := s.readType() + if err != nil { + return t, 0, err + } + + l, err := s.readLength(c) + + return t, l, err +} + +func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { + var c byte + var err error + if c, err = s.r.ReadByte(); err != nil { + return plumbing.ObjectType(0), 0, err + } + + typ := parseType(c) + + return typ, c, nil +} + +func parseType(b byte) plumbing.ObjectType { + return plumbing.ObjectType((b & maskType) >> firstLengthBits) +} + +// the length is codified in the last 4 bits of the first byte and in +// the last 7 bits of subsequent bytes. Last byte has a 0 MSB. +func (s *Scanner) readLength(first byte) (int64, error) { + length := int64(first & maskFirstLength) + + c := first + shift := firstLengthBits + var err error + for c&maskContinue > 0 { + if c, err = s.r.ReadByte(); err != nil { + return 0, err + } + + length += int64(c&maskLength) << shift + shift += lengthBits + } + + return length, nil +} + +// NextObject writes the content of the next object into the reader, returns +// the number of bytes written, the CRC32 of the content and an error, if any +func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { + defer s.crc.Reset() + + s.pendingObject = nil + written, err = s.copyObject(w) + crc32 = s.crc.Sum32() + return +} + +// ReadRegularObject reads and write a non-deltified object +// from it zlib stream in an object entry in the packfile. +func (s *Scanner) copyObject(w io.Writer) (int64, error) { + var err error + if s.zr == nil { + zr, err := zlib.NewReader(s.r) + if err != nil { + return 0, fmt.Errorf("zlib initialization error: %s", err) + } + + s.zr = zr.(readerResetter) + } else { + if err := s.zr.Reset(s.r, nil); err != nil { + return 0, fmt.Errorf("zlib reset error: %s", err) + } + } + + defer func() { + closeErr := s.zr.Close() + if err == nil { + err = closeErr + } + }() + + return io.Copy(w, s.zr) +} + +// Seek sets a new offset from start, returns the old position before the change +func (s *Scanner) Seek(offset int64) (previous int64, err error) { + // if seeking we assume that you are not interested on the header + if s.version == 0 { + s.version = VersionSupported + } + + previous, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return -1, err + } + + _, err = s.r.Seek(offset, io.SeekStart) + return previous, err +} + +// Checksum returns the checksum of the packfile +func (s *Scanner) Checksum() (plumbing.Hash, error) { + err := s.discardObjectIfNeeded() + if err != nil { + return plumbing.ZeroHash, err + } + + return binary.ReadHash(s.r) +} + +// Close reads the reader until io.EOF +func (s *Scanner) Close() error { + _, err := io.Copy(ioutil.Discard, s.r) + return err +} + +type trackableReader struct { + count int64 + io.Reader +} + +// Read reads up to len(p) bytes into p. +func (r *trackableReader) Read(p []byte) (n int, err error) { + n, err = r.Reader.Read(p) + r.count += int64(n) + + return +} + +// Seek only supports io.SeekCurrent, any other operation fails +func (r *trackableReader) Seek(offset int64, whence int) (int64, error) { + if whence != io.SeekCurrent { + return -1, ErrSeekNotSupported + } + + return r.count, nil +} + +func newByteReadSeeker(r io.ReadSeeker) *bufferedSeeker { + return &bufferedSeeker{ + r: r, + Reader: *bufio.NewReader(r), + } +} + +type bufferedSeeker struct { + r io.ReadSeeker + bufio.Reader +} + +func (r *bufferedSeeker) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + current, err := r.r.Seek(offset, whence) + if err != nil { + return current, err + } + + return current - int64(r.Buffered()), nil + } + + defer r.Reader.Reset(r.r) + return r.r.Seek(offset, whence) +} + +type readerResetter interface { + io.ReadCloser + zlib.Resetter +} + +type reader interface { + io.Reader + io.ByteReader + io.Seeker +} + +type teeReader struct { + reader + w hash.Hash32 +} + +func (r *teeReader) Read(p []byte) (n int, err error) { + n, err = r.reader.Read(p) + if n > 0 { + if n, err := r.w.Write(p[:n]); err != nil { + return n, err + } + } + return +} + +func (r *teeReader) ReadByte() (b byte, err error) { + b, err = r.reader.ReadByte() + if err == nil { + _, err := r.w.Write([]byte{b}) + if err != nil { + return 0, err + } + } + + return +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go new file mode 100644 index 0000000000000000000000000000000000000000..797b813c6b4ce6c838812a1bc9c8a0246eb64c35 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go @@ -0,0 +1,125 @@ +// Package pktline implements reading payloads form pkt-lines and encoding +// pkt-lines from payloads. +package pktline + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// An Encoder writes pkt-lines to an output stream. +type Encoder struct { + w io.Writer +} + +const ( + // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. + MaxPayloadSize = 65516 +) + +var ( + // FlushPkt are the contents of a flush-pkt pkt-line. + FlushPkt = []byte{'0', '0', '0', '0'} + // Flush is the payload to use with the Encode method to encode a flush-pkt. + Flush = []byte{} + // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. + FlushString = "" + // ErrPayloadTooLong is returned by the Encode methods when any of the + // provided payloads is bigger than MaxPayloadSize. + ErrPayloadTooLong = errors.New("payload is too long") +) + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +// Flush encodes a flush-pkt to the output stream. +func (e *Encoder) Flush() error { + _, err := e.w.Write(FlushPkt) + return err +} + +// Encode encodes a pkt-line with the payload specified and write it to +// the output stream. If several payloads are specified, each of them +// will get streamed in their own pkt-lines. +func (e *Encoder) Encode(payloads ...[]byte) error { + for _, p := range payloads { + if err := e.encodeLine(p); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeLine(p []byte) error { + if len(p) > MaxPayloadSize { + return ErrPayloadTooLong + } + + if bytes.Equal(p, Flush) { + if err := e.Flush(); err != nil { + return err + } + return nil + } + + n := len(p) + 4 + if _, err := e.w.Write(asciiHex16(n)); err != nil { + return err + } + if _, err := e.w.Write(p); err != nil { + return err + } + + return nil +} + +// Returns the hexadecimal ascii representation of the 16 less +// significant bits of n. The length of the returned slice will always +// be 4. Example: if n is 1234 (0x4d2), the return value will be +// []byte{'0', '4', 'd', '2'}. +func asciiHex16(n int) []byte { + var ret [4]byte + ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) + ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) + ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) + ret[3] = byteToASCIIHex(byte(n & 0x000f)) + + return ret[:] +} + +// turns a byte into its hexadecimal ascii representation. Example: +// from 11 (0xb) to 'b'. +func byteToASCIIHex(n byte) byte { + if n < 10 { + return '0' + n + } + + return 'a' - 10 + n +} + +// EncodeString works similarly as Encode but payloads are specified as strings. +func (e *Encoder) EncodeString(payloads ...string) error { + for _, p := range payloads { + if err := e.Encode([]byte(p)); err != nil { + return err + } + } + + return nil +} + +// Encodef encodes a single pkt-line with the payload formatted as +// the format specifier. The rest of the arguments will be used in +// the format string. +func (e *Encoder) Encodef(format string, a ...interface{}) error { + return e.EncodeString( + fmt.Sprintf(format, a...), + ) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go new file mode 100644 index 0000000000000000000000000000000000000000..4af254f002042b546fed15ccd08cb3c40adc5bfb --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go @@ -0,0 +1,134 @@ +package pktline + +import ( + "errors" + "io" +) + +const ( + lenSize = 4 +) + +// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. +var ErrInvalidPktLen = errors.New("invalid pkt-len found") + +// Scanner provides a convenient interface for reading the payloads of a +// series of pkt-lines. It takes an io.Reader providing the source, +// which then can be tokenized through repeated calls to the Scan +// method. +// +// After each Scan call, the Bytes method will return the payload of the +// corresponding pkt-line on a shared buffer, which will be 65516 bytes +// or smaller. Flush pkt-lines are represented by empty byte slices. +// +// Scanning stops at EOF or the first I/O error. +type Scanner struct { + r io.Reader // The reader provided by the client + err error // Sticky error + payload []byte // Last pkt-payload + len [lenSize]byte // Last pkt-len +} + +// NewScanner returns a new Scanner to read from r. +func NewScanner(r io.Reader) *Scanner { + return &Scanner{ + r: r, + } +} + +// Err returns the first error encountered by the Scanner. +func (s *Scanner) Err() error { + return s.err +} + +// Scan advances the Scanner to the next pkt-line, whose payload will +// then be available through the Bytes method. Scanning stops at EOF +// or the first I/O error. After Scan returns false, the Err method +// will return any error that occurred during scanning, except that if +// it was io.EOF, Err will return nil. +func (s *Scanner) Scan() bool { + var l int + l, s.err = s.readPayloadLen() + if s.err == io.EOF { + s.err = nil + return false + } + if s.err != nil { + return false + } + + if cap(s.payload) < l { + s.payload = make([]byte, 0, l) + } + + if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { + return false + } + s.payload = s.payload[:l] + + return true +} + +// Bytes returns the most recent payload generated by a call to Scan. +// The underlying array may point to data that will be overwritten by a +// subsequent call to Scan. It does no allocation. +func (s *Scanner) Bytes() []byte { + return s.payload +} + +// Method readPayloadLen returns the payload length by reading the +// pkt-len and subtracting the pkt-len size. +func (s *Scanner) readPayloadLen() (int, error) { + if _, err := io.ReadFull(s.r, s.len[:]); err != nil { + if err == io.ErrUnexpectedEOF { + return 0, ErrInvalidPktLen + } + + return 0, err + } + + n, err := hexDecode(s.len) + if err != nil { + return 0, err + } + + switch { + case n == 0: + return 0, nil + case n <= lenSize: + return 0, ErrInvalidPktLen + case n > MaxPayloadSize+lenSize: + return 0, ErrInvalidPktLen + default: + return n - lenSize, nil + } +} + +// Turns the hexadecimal representation of a number in a byte slice into +// a number. This function substitute strconv.ParseUint(string(buf), 16, +// 16) and/or hex.Decode, to avoid generating new strings, thus helping the +// GC. +func hexDecode(buf [lenSize]byte) (int, error) { + var ret int + for i := 0; i < lenSize; i++ { + n, err := asciiHexToByte(buf[i]) + if err != nil { + return 0, ErrInvalidPktLen + } + ret = 16*ret + int(n) + } + return ret, nil +} + +// turns the hexadecimal ascii representation of a byte into its +// numerical value. Example: from 'b' to 11 (0xb). +func asciiHexToByte(b byte) (byte, error) { + switch { + case b >= '0' && b <= '9': + return b - '0', nil + case b >= 'a' && b <= 'f': + return b - 'a' + 10, nil + default: + return 0, ErrInvalidPktLen + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go new file mode 100644 index 0000000000000000000000000000000000000000..8e60877894abba0e00c40ba5d68f0fdc78a88d8f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go @@ -0,0 +1,73 @@ +package plumbing + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "hash" + "sort" + "strconv" +) + +// Hash SHA1 hased content +type Hash [20]byte + +// ZeroHash is Hash with value zero +var ZeroHash Hash + +// ComputeHash compute the hash for a given ObjectType and content +func ComputeHash(t ObjectType, content []byte) Hash { + h := NewHasher(t, int64(len(content))) + h.Write(content) + return h.Sum() +} + +// NewHash return a new Hash from a hexadecimal hash representation +func NewHash(s string) Hash { + b, _ := hex.DecodeString(s) + + var h Hash + copy(h[:], b) + + return h +} + +func (h Hash) IsZero() bool { + var empty Hash + return h == empty +} + +func (h Hash) String() string { + return hex.EncodeToString(h[:]) +} + +type Hasher struct { + hash.Hash +} + +func NewHasher(t ObjectType, size int64) Hasher { + h := Hasher{sha1.New()} + h.Write(t.Bytes()) + h.Write([]byte(" ")) + h.Write([]byte(strconv.FormatInt(size, 10))) + h.Write([]byte{0}) + return h +} + +func (h Hasher) Sum() (hash Hash) { + copy(hash[:], h.Hash.Sum(nil)) + return +} + +// HashesSort sorts a slice of Hashes in increasing order. +func HashesSort(a []Hash) { + sort.Sort(HashSlice(a)) +} + +// HashSlice attaches the methods of sort.Interface to []Hash, sorting in +// increasing order. +type HashSlice []Hash + +func (p HashSlice) Len() int { return len(p) } +func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } +func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go new file mode 100644 index 0000000000000000000000000000000000000000..c65ce1f643663a0ddbb50188594dfa0144d774c1 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go @@ -0,0 +1,59 @@ +package plumbing + +import ( + "bytes" + "io" + "io/ioutil" +) + +// MemoryObject on memory Object implementation +type MemoryObject struct { + t ObjectType + h Hash + cont []byte + sz int64 +} + +// Hash return the object Hash, the hash is calculated on-the-fly the first +// time is called, the subsequent calls the same Hash is returned even if the +// type or the content has changed. The Hash is only generated if the size of +// the content is exactly the Object.Size +func (o *MemoryObject) Hash() Hash { + if o.h == ZeroHash && int64(len(o.cont)) == o.sz { + o.h = ComputeHash(o.t, o.cont) + } + + return o.h +} + +// Type return the ObjectType +func (o *MemoryObject) Type() ObjectType { return o.t } + +// SetType sets the ObjectType +func (o *MemoryObject) SetType(t ObjectType) { o.t = t } + +// Size return the size of the object +func (o *MemoryObject) Size() int64 { return o.sz } + +// SetSize set the object size, a content of the given size should be written +// afterwards +func (o *MemoryObject) SetSize(s int64) { o.sz = s } + +// Reader returns a ObjectReader used to read the object's content. +func (o *MemoryObject) Reader() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewBuffer(o.cont)), nil +} + +// Writer returns a ObjectWriter used to write the object's content. +func (o *MemoryObject) Writer() (io.WriteCloser, error) { + return o, nil +} + +func (o *MemoryObject) Write(p []byte) (n int, err error) { + o.cont = append(o.cont, p...) + return len(p), nil +} + +// Close releases any resources consumed by the object when it is acting as a +// ObjectWriter. +func (o *MemoryObject) Close() error { return nil } diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go new file mode 100644 index 0000000000000000000000000000000000000000..3304da278cd8e71e9dbc51acc386b584c11dc030 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go @@ -0,0 +1,94 @@ +// package plumbing implement the core interfaces and structs used by go-git +package plumbing + +import ( + "errors" + "io" +) + +var ( + ErrObjectNotFound = errors.New("object not found") + // ErrInvalidType is returned when an invalid object type is provided. + ErrInvalidType = errors.New("invalid object type") +) + +// Object is a generic representation of any git object +type EncodedObject interface { + Hash() Hash + Type() ObjectType + SetType(ObjectType) + Size() int64 + SetSize(int64) + Reader() (io.ReadCloser, error) + Writer() (io.WriteCloser, error) +} + +// ObjectType internal object type +// Integer values from 0 to 7 map to those exposed by git. +// AnyObject is used to represent any from 0 to 7. +type ObjectType int8 + +const ( + InvalidObject ObjectType = 0 + CommitObject ObjectType = 1 + TreeObject ObjectType = 2 + BlobObject ObjectType = 3 + TagObject ObjectType = 4 + // 5 reserved for future expansion + OFSDeltaObject ObjectType = 6 + REFDeltaObject ObjectType = 7 + + AnyObject ObjectType = -127 +) + +func (t ObjectType) String() string { + switch t { + case CommitObject: + return "commit" + case TreeObject: + return "tree" + case BlobObject: + return "blob" + case TagObject: + return "tag" + case OFSDeltaObject: + return "ofs-delta" + case REFDeltaObject: + return "ref-delta" + case AnyObject: + return "any" + default: + return "unknown" + } +} + +func (t ObjectType) Bytes() []byte { + return []byte(t.String()) +} + +// Valid returns true if t is a valid ObjectType. +func (t ObjectType) Valid() bool { + return t >= CommitObject && t <= REFDeltaObject +} + +// ParseObjectType parses a string representation of ObjectType. It returns an +// error on parse failure. +func ParseObjectType(value string) (typ ObjectType, err error) { + switch value { + case "commit": + typ = CommitObject + case "tree": + typ = TreeObject + case "blob": + typ = BlobObject + case "tag": + typ = TagObject + case "ofs-delta": + typ = OFSDeltaObject + case "ref-delta": + typ = REFDeltaObject + default: + err = ErrInvalidType + } + return +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go new file mode 100644 index 0000000000000000000000000000000000000000..2771416787817201e268423b9b4499efcaa7aacc --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go @@ -0,0 +1,139 @@ +package object + +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// Blob is used to store arbitrary data - it is generally a file. +type Blob struct { + // Hash of the blob. + Hash plumbing.Hash + // Size of the (uncompressed) blob. + Size int64 + + obj plumbing.EncodedObject +} + +// GetBlob gets a blob from an object storer and decodes it. +func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { + o, err := s.EncodedObject(plumbing.BlobObject, h) + if err != nil { + return nil, err + } + + return DecodeBlob(o) +} + +// DecodeObject decodes an encoded object into a *Blob. +func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { + b := &Blob{} + if err := b.Decode(o); err != nil { + return nil, err + } + + return b, nil +} + +// ID returns the object ID of the blob. The returned value will always match +// the current value of Blob.Hash. +// +// ID is present to fulfill the Object interface. +func (b *Blob) ID() plumbing.Hash { + return b.Hash +} + +// Type returns the type of object. It always returns plumbing.BlobObject. +// +// Type is present to fulfill the Object interface. +func (b *Blob) Type() plumbing.ObjectType { + return plumbing.BlobObject +} + +// Decode transforms a plumbing.EncodedObject into a Blob struct. +func (b *Blob) Decode(o plumbing.EncodedObject) error { + if o.Type() != plumbing.BlobObject { + return ErrUnsupportedObject + } + + b.Hash = o.Hash() + b.Size = o.Size() + b.obj = o + + return nil +} + +// Encode transforms a Blob into a plumbing.EncodedObject. +func (b *Blob) Encode(o plumbing.EncodedObject) error { + w, err := o.Writer() + if err != nil { + return err + } + defer ioutil.CheckClose(w, &err) + r, err := b.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(r, &err) + _, err = io.Copy(w, r) + o.SetType(plumbing.BlobObject) + return err +} + +// Reader returns a reader allow the access to the content of the blob +func (b *Blob) Reader() (io.ReadCloser, error) { + return b.obj.Reader() +} + +// BlobIter provides an iterator for a set of blobs. +type BlobIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewBlobIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *BlobIter that iterates over all +// blobs contained in the storer.EncodedObjectIter. +// +// Any non-blob object returned by the storer.EncodedObjectIter is skipped. +func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { + return &BlobIter{iter, s} +} + +// Next moves the iterator to the next blob and returns a pointer to it. If +// there are no more blobs, it returns io.EOF. +func (iter *BlobIter) Next() (*Blob, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.BlobObject { + continue + } + + return DecodeBlob(obj) + } +} + +// ForEach call the cb function for each blob contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *BlobIter) ForEach(cb func(*Blob) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.BlobObject { + return nil + } + + b, err := DecodeBlob(obj) + if err != nil { + return err + } + + return cb(b) + }) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go new file mode 100644 index 0000000000000000000000000000000000000000..d29836de2a70439db9aa9f2d77f2b4ed8efb8ac8 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go @@ -0,0 +1,120 @@ +package object + +import ( + "bytes" + "fmt" + "strings" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie" +) + +// Change values represent a detected change between two git trees. For +// modifications, From is the original status of the node and To is its +// final status. For insertions, From is the zero value and for +// deletions To is the zero value. +type Change struct { + From ChangeEntry + To ChangeEntry +} + +var empty = ChangeEntry{} + +// Action returns the kind of action represented by the change, an +// insertion, a deletion or a modification. +func (c *Change) Action() (merkletrie.Action, error) { + if c.From == empty && c.To == empty { + return merkletrie.Action(0), + fmt.Errorf("malformed change: empty from and to") + } + if c.From == empty { + return merkletrie.Insert, nil + } + if c.To == empty { + return merkletrie.Delete, nil + } + + return merkletrie.Modify, nil +} + +// Files return the files before and after a change. +// For insertions from will be nil. For deletions to will be nil. +func (c *Change) Files() (from, to *File, err error) { + action, err := c.Action() + if err != nil { + return + } + + if action == merkletrie.Insert || action == merkletrie.Modify { + to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) + if err != nil { + return + } + } + + if action == merkletrie.Delete || action == merkletrie.Modify { + from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) + if err != nil { + return + } + } + + return +} + +func (c *Change) String() string { + action, err := c.Action() + if err != nil { + return fmt.Sprintf("malformed change") + } + + return fmt.Sprintf("<Action: %s, Path: %s>", action, c.name()) +} + +func (c *Change) name() string { + if c.From != empty { + return c.From.Name + } + + return c.To.Name +} + +// ChangeEntry values represent a node that has suffered a change. +type ChangeEntry struct { + // Full path of the node using "/" as separator. + Name string + // Parent tree of the node that has changed. + Tree *Tree + // The entry of the node. + TreeEntry TreeEntry +} + +// Changes represents a collection of changes between two git trees. +// Implements sort.Interface lexicographically over the path of the +// changed files. +type Changes []*Change + +func (c Changes) Len() int { + return len(c) +} + +func (c Changes) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +func (c Changes) Less(i, j int) bool { + return strings.Compare(c[i].name(), c[j].name()) < 0 +} + +func (c Changes) String() string { + var buffer bytes.Buffer + buffer.WriteString("[") + comma := "" + for _, v := range c { + buffer.WriteString(comma) + buffer.WriteString(v.String()) + comma = ", " + } + buffer.WriteString("]") + + return buffer.String() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go new file mode 100644 index 0000000000000000000000000000000000000000..cf8de6cd0c9cd058287200ce2857cd42228f5c6d --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go @@ -0,0 +1,60 @@ +package object + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie" + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// The folowing functions transform changes types form the merkletrie +// package to changes types from this package. + +func newChange(c merkletrie.Change) (*Change, error) { + ret := &Change{} + + var err error + if ret.From, err = newChangeEntry(c.From); err != nil { + return nil, fmt.Errorf("From field: ", err) + } + + if ret.To, err = newChangeEntry(c.To); err != nil { + return nil, fmt.Errorf("To field: ", err) + } + + return ret, nil +} + +func newChangeEntry(p noder.Path) (ChangeEntry, error) { + if p == nil { + return empty, nil + } + + asTreeNoder, ok := p.Last().(*treeNoder) + if !ok { + return ChangeEntry{}, fmt.Errorf("cannot transform non-TreeNoders") + } + + return ChangeEntry{ + Name: p.String(), + Tree: asTreeNoder.parent, + TreeEntry: TreeEntry{ + Name: asTreeNoder.name, + Mode: asTreeNoder.mode, + Hash: asTreeNoder.hash, + }, + }, nil +} + +func newChanges(src merkletrie.Changes) (Changes, error) { + ret := make(Changes, len(src)) + var err error + for i, e := range src { + ret[i], err = newChange(e) + if err != nil { + return nil, fmt.Errorf("change #%d: %s", err) + } + } + + return ret, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go new file mode 100644 index 0000000000000000000000000000000000000000..7507bc740c5f668a929f6d0a556a1d86e21f1b3f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go @@ -0,0 +1,300 @@ +package object + +import ( + "bufio" + "bytes" + "fmt" + "io" + "sort" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// Hash represents the hash of an object +type Hash plumbing.Hash + +// Commit points to a single tree, marking it as what the project looked like +// at a certain point in time. It contains meta-information about that point +// in time, such as a timestamp, the author of the changes since the last +// commit, a pointer to the previous commit(s), etc. +// http://schacon.github.io/gitbook/1_the_git_object_model.html +type Commit struct { + // Hash of the commit object. + Hash plumbing.Hash + // Author is the original author of the commit. + Author Signature + // Committer is the one performing the commit, might be different from + // Author. + Committer Signature + // Message is the commit message, contains arbitrary text. + Message string + + tree plumbing.Hash + parents []plumbing.Hash + s storer.EncodedObjectStorer +} + +// GetCommit gets a commit from an object storer and decodes it. +func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { + o, err := s.EncodedObject(plumbing.CommitObject, h) + if err != nil { + return nil, err + } + + return DecodeCommit(s, o) +} + +// DecodeCommit decodes an encoded object into a *Commit and associates it to +// the given object storer. +func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { + c := &Commit{s: s} + if err := c.Decode(o); err != nil { + return nil, err + } + + return c, nil +} + +// Tree returns the Tree from the commit. +func (c *Commit) Tree() (*Tree, error) { + return GetTree(c.s, c.tree) +} + +// Parents return a CommitIter to the parent Commits. +func (c *Commit) Parents() *CommitIter { + return NewCommitIter(c.s, + storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.parents), + ) +} + +// NumParents returns the number of parents in a commit. +func (c *Commit) NumParents() int { + return len(c.parents) +} + +// File returns the file with the specified "path" in the commit and a +// nil error if the file exists. If the file does not exist, it returns +// a nil file and the ErrFileNotFound error. +func (c *Commit) File(path string) (*File, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.File(path) +} + +// Files returns a FileIter allowing to iterate over the Tree +func (c *Commit) Files() (*FileIter, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.Files(), nil +} + +// ID returns the object ID of the commit. The returned value will always match +// the current value of Commit.Hash. +// +// ID is present to fulfill the Object interface. +func (c *Commit) ID() plumbing.Hash { + return c.Hash +} + +// Type returns the type of object. It always returns plumbing.CommitObject. +// +// Type is present to fulfill the Object interface. +func (c *Commit) Type() plumbing.ObjectType { + return plumbing.CommitObject +} + +// Decode transforms a plumbing.EncodedObject into a Commit struct. +func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.CommitObject { + return ErrUnsupportedObject + } + + c.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufio.NewReader(reader) + + var message bool + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + if !message { + line = bytes.TrimSpace(line) + if len(line) == 0 { + message = true + continue + } + + split := bytes.SplitN(line, []byte{' '}, 2) + switch string(split[0]) { + case "tree": + c.tree = plumbing.NewHash(string(split[1])) + case "parent": + c.parents = append(c.parents, plumbing.NewHash(string(split[1]))) + case "author": + c.Author.Decode(split[1]) + case "committer": + c.Committer.Decode(split[1]) + } + } else { + c.Message += string(line) + } + + if err == io.EOF { + return nil + } + } +} + +// History returns a slice with the previous commits in the history of this +// commit, sorted in reverse chronological order. +func (c *Commit) History() ([]*Commit, error) { + var commits []*Commit + err := WalkCommitHistory(c, func(commit *Commit) error { + commits = append(commits, commit) + return nil + }) + + ReverseSortCommits(commits) + return commits, err +} + +// Encode transforms a Commit into a plumbing.EncodedObject. +func (b *Commit) Encode(o plumbing.EncodedObject) error { + o.SetType(plumbing.CommitObject) + w, err := o.Writer() + if err != nil { + return err + } + defer ioutil.CheckClose(w, &err) + if _, err = fmt.Fprintf(w, "tree %s\n", b.tree.String()); err != nil { + return err + } + for _, parent := range b.parents { + if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { + return err + } + } + if _, err = fmt.Fprint(w, "author "); err != nil { + return err + } + if err = b.Author.Encode(w); err != nil { + return err + } + if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { + return err + } + if err = b.Committer.Encode(w); err != nil { + return err + } + if _, err = fmt.Fprintf(w, "\n\n%s", b.Message); err != nil { + return err + } + return err +} + +func (c *Commit) String() string { + return fmt.Sprintf( + "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", + plumbing.CommitObject, c.Hash, c.Author.String(), + c.Author.When.Format(DateFormat), indent(c.Message), + ) +} + +func indent(t string) string { + var output []string + for _, line := range strings.Split(t, "\n") { + if len(line) != 0 { + line = " " + line + } + + output = append(output, line) + } + + return strings.Join(output, "\n") +} + +// CommitIter provides an iterator for a set of commits. +type CommitIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewCommitIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *CommitIter that iterates over all +// commits contained in the storer.EncodedObjectIter. +// +// Any non-commit object returned by the storer.EncodedObjectIter is skipped. +func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *CommitIter { + return &CommitIter{iter, s} +} + +// Next moves the iterator to the next commit and returns a pointer to it. If +// there are no more commits, it returns io.EOF. +func (iter *CommitIter) Next() (*Commit, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeCommit(iter.s, obj) +} + +// ForEach call the cb function for each commit contained on this iter until +// an error appends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *CommitIter) ForEach(cb func(*Commit) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + c, err := DecodeCommit(iter.s, obj) + if err != nil { + return err + } + + return cb(c) + }) +} + +type commitSorterer struct { + l []*Commit +} + +func (s commitSorterer) Len() int { + return len(s.l) +} + +func (s commitSorterer) Less(i, j int) bool { + return s.l[i].Committer.When.Before(s.l[j].Committer.When) +} + +func (s commitSorterer) Swap(i, j int) { + s.l[i], s.l[j] = s.l[j], s.l[i] +} + +// SortCommits sorts a commit list by commit date, from older to newer. +func SortCommits(l []*Commit) { + s := &commitSorterer{l} + sort.Sort(s) +} + +// ReverseSortCommits sorts a commit list by commit date, from newer to older. +func ReverseSortCommits(l []*Commit) { + s := &commitSorterer{l} + sort.Sort(sort.Reverse(s)) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go new file mode 100644 index 0000000000000000000000000000000000000000..681ea5ec8dc004d518993e8429b158b72ba171cf --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go @@ -0,0 +1,96 @@ +package object + +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +type commitWalker struct { + seen map[plumbing.Hash]bool + stack []*CommitIter + start *Commit + cb func(*Commit) error +} + +// WalkCommitHistory walks the commit history, starting at the given commit and +// visiting its parents in pre-order. The given callback will be called for each +// visited commit. Each commit will be visited only once. If the callback returns +// an error, walking will stop and will return the error. Other errors might be +// returned if the history cannot be traversed (e.g. missing objects). +func WalkCommitHistory(c *Commit, cb func(*Commit) error) error { + w := &commitWalker{ + seen: make(map[plumbing.Hash]bool), + stack: make([]*CommitIter, 0), + start: c, + cb: cb, + } + + return w.walk() +} + +func (w *commitWalker) walk() error { + var commit *Commit + + if w.start != nil { + commit = w.start + w.start = nil + } else { + current := len(w.stack) - 1 + if current < 0 { + return nil + } + + var err error + commit, err = w.stack[current].Next() + if err == io.EOF { + w.stack = w.stack[:current] + return w.walk() + } + + if err != nil { + return err + } + } + + // check and update seen + if w.seen[commit.Hash] { + return w.walk() + } + + w.seen[commit.Hash] = true + if commit.NumParents() > 0 { + w.stack = append(w.stack, commit.Parents()) + } + + if err := w.cb(commit); err != nil { + return err + } + + return w.walk() +} + +// WalkCommitHistoryPost walks the commit history like WalkCommitHistory +// but in post-order. This means that after walking a merge commit, the +// merged commit will be walked before the base it was merged on. This +// can be useful if you wish to see the history in chronological order. +func WalkCommitHistoryPost(c *Commit, cb func(*Commit) error) error { + stack := []*Commit{c} + seen := make(map[plumbing.Hash]bool) + for len(stack) > 0 { + c := stack[len(stack)-1] + stack = stack[:len(stack)-1] + if seen[c.Hash] { + continue + } + seen[c.Hash] = true + if err := cb(c); err != nil { + return err + } + c.Parents().ForEach(func(pcm *Commit) error { + stack = append(stack, pcm) + return nil + }) + } + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go new file mode 100644 index 0000000000000000000000000000000000000000..87a7153e2049a9c6b42fdce301a986650d0644c2 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go @@ -0,0 +1,26 @@ +package object + +import ( + "bytes" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie" + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// DiffTree compares the content and mode of the blobs found via two +// tree objects. +func DiffTree(a, b *Tree) (Changes, error) { + from := newTreeNoder(a) + to := newTreeNoder(b) + + hashEqual := func(a, b noder.Hasher) bool { + return bytes.Equal(a.Hash(), b.Hash()) + } + + merkletrieChanges, err := merkletrie.DiffTree(from, to, hashEqual) + if err != nil { + return nil, err + } + + return newChanges(merkletrieChanges) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go new file mode 100644 index 0000000000000000000000000000000000000000..6932c31ababda13032d1b62ceb96268454653555 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go @@ -0,0 +1,125 @@ +package object + +import ( + "bytes" + "io" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing/filemode" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// File represents git file objects. +type File struct { + // Name is the path of the file. It might be relative to a tree, + // depending of the function that generates it. + Name string + // Mode is the file mode. + Mode filemode.FileMode + // Blob with the contents of the file. + Blob +} + +// NewFile returns a File based on the given blob object +func NewFile(name string, m filemode.FileMode, b *Blob) *File { + return &File{Name: name, Mode: m, Blob: *b} +} + +// Contents returns the contents of a file as a string. +func (f *File) Contents() (content string, err error) { + reader, err := f.Reader() + if err != nil { + return "", err + } + defer ioutil.CheckClose(reader, &err) + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(reader); err != nil { + return "", err + } + + return buf.String(), nil +} + +// Lines returns a slice of lines from the contents of a file, stripping +// all end of line characters. If the last line is empty (does not end +// in an end of line), it is also stripped. +func (f *File) Lines() ([]string, error) { + content, err := f.Contents() + if err != nil { + return nil, err + } + + splits := strings.Split(content, "\n") + // remove the last line if it is empty + if splits[len(splits)-1] == "" { + return splits[:len(splits)-1], nil + } + + return splits, nil +} + +// FileIter provides an iterator for the files in a tree. +type FileIter struct { + s storer.EncodedObjectStorer + w TreeWalker +} + +// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a +// *FileIter that iterates over all files contained in the tree, recursively. +func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { + return &FileIter{s: s, w: *NewTreeWalker(t, true)} +} + +// Next moves the iterator to the next file and returns a pointer to it. If +// there are no more files, it returns io.EOF. +func (iter *FileIter) Next() (*File, error) { + for { + name, entry, err := iter.w.Next() + if err != nil { + return nil, err + } + + if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { + continue + } + + blob, err := GetBlob(iter.s, entry.Hash) + if err != nil { + return nil, err + } + + return NewFile(name, entry.Mode, blob), nil + } +} + +// ForEach call the cb function for each file contained in this iter until +// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *FileIter) ForEach(cb func(*File) error) error { + defer iter.Close() + + for { + f, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(f); err != nil { + if err == storer.ErrStop { + return nil + } + + return err + } + } +} + +func (iter *FileIter) Close() { + iter.w.Close() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go new file mode 100644 index 0000000000000000000000000000000000000000..701145d05679d51c558a90788b506395582b63b2 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go @@ -0,0 +1,228 @@ +// Package object contains implementations of all Git objects and utility +// functions to work with them. +package object + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +// ErrUnsupportedObject trigger when a non-supported object is being decoded. +var ErrUnsupportedObject = errors.New("unsupported object type") + +// Object is a generic representation of any git object. It is implemented by +// Commit, Tree, Blob, and Tag, and includes the functions that are common to +// them. +// +// Object is returned when an object can be of any type. It is frequently used +// with a type cast to acquire the specific type of object: +// +// func process(obj Object) { +// switch o := obj.(type) { +// case *Commit: +// // o is a Commit +// case *Tree: +// // o is a Tree +// case *Blob: +// // o is a Blob +// case *Tag: +// // o is a Tag +// } +// } +// +// This interface is intentionally different from plumbing.EncodedObject, which +// is a lower level interface used by storage implementations to read and write +// objects in its encoded form. +type Object interface { + ID() plumbing.Hash + Type() plumbing.ObjectType + Decode(plumbing.EncodedObject) error + Encode(plumbing.EncodedObject) error +} + +// GetObject gets an object from an object storer and decodes it. +func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { + o, err := s.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return nil, err + } + + return DecodeObject(s, o) +} + +// DecodeObject decodes an encoded object into an Object and associates it to +// the given object storer. +func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { + switch o.Type() { + case plumbing.CommitObject: + return DecodeCommit(s, o) + case plumbing.TreeObject: + return DecodeTree(s, o) + case plumbing.BlobObject: + return DecodeBlob(o) + case plumbing.TagObject: + return DecodeTag(s, o) + default: + return nil, plumbing.ErrInvalidType + } +} + +// DateFormat is the format being used in the original git implementation +const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" + +// Signature is used to identify who and when created a commit or tag. +type Signature struct { + // Name represents a person name. It is an arbitrary string. + Name string + // Email is an email, but it cannot be assumed to be well-formed. + Email string + // When is the timestamp of the signature. + When time.Time +} + +// Decode decodes a byte slice into a signature +func (s *Signature) Decode(b []byte) { + open := bytes.LastIndexByte(b, '<') + close := bytes.LastIndexByte(b, '>') + if open == -1 || close == -1 { + return + } + + if close < open { + return + } + + s.Name = string(bytes.Trim(b[:open], " ")) + s.Email = string(b[open+1 : close]) + + hasTime := close+2 < len(b) + if hasTime { + s.decodeTimeAndTimeZone(b[close+2:]) + } +} + +// Encode encodes a Signature into a writer. +func (s *Signature) Encode(w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { + return err + } + if err := s.encodeTimeAndTimeZone(w); err != nil { + return err + } + return nil +} + +var timeZoneLength = 5 + +func (s *Signature) decodeTimeAndTimeZone(b []byte) { + space := bytes.IndexByte(b, ' ') + if space == -1 { + space = len(b) + } + + ts, err := strconv.ParseInt(string(b[:space]), 10, 64) + if err != nil { + return + } + + s.When = time.Unix(ts, 0).In(time.UTC) + var tzStart = space + 1 + if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { + return + } + + tl, err := time.Parse("-0700", string(b[tzStart:tzStart+timeZoneLength])) + if err != nil { + return + } + + s.When = s.When.In(tl.Location()) +} + +func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { + _, err := fmt.Fprintf(w, "%d %s", s.When.Unix(), s.When.Format("-0700")) + return err +} + +func (s *Signature) String() string { + return fmt.Sprintf("%s <%s>", s.Name, s.Email) +} + +// ObjectIter provides an iterator for a set of objects. +type ObjectIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewObjectIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all +// objects contained in the storer.EncodedObjectIter. +func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { + return &ObjectIter{iter, s} +} + +// Next moves the iterator to the next object and returns a pointer to it. If +// there are no more objects, it returns io.EOF. +func (iter *ObjectIter) Next() (Object, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + continue + } + + if err != nil { + return nil, err + } + + return o, nil + } +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *ObjectIter) ForEach(cb func(Object) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + return nil + } + + if err != nil { + return err + } + + return cb(o) + }) +} + +func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { + switch obj.Type() { + case plumbing.BlobObject: + blob := &Blob{} + return blob, blob.Decode(obj) + case plumbing.TreeObject: + tree := &Tree{s: iter.s} + return tree, tree.Decode(obj) + case plumbing.CommitObject: + commit := &Commit{} + return commit, commit.Decode(obj) + case plumbing.TagObject: + tag := &Tag{} + return tag, tag.Decode(obj) + default: + return nil, plumbing.ErrInvalidType + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go new file mode 100644 index 0000000000000000000000000000000000000000..7b091d076dfc023d8d1530bda480dae67d541928 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go @@ -0,0 +1,275 @@ +package object + +import ( + "bufio" + "bytes" + "fmt" + "io" + stdioutil "io/ioutil" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// Tag represents an annotated tag object. It points to a single git object of +// any type, but tags typically are applied to commit or blob objects. It +// provides a reference that associates the target with a tag name. It also +// contains meta-information about the tag, including the tagger, tag date and +// message. +// +// Note that this is not used for lightweight tags. +// +// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags +type Tag struct { + // Hash of the tag. + Hash plumbing.Hash + // Name of the tag. + Name string + // Tagger is the one who created the tag. + Tagger Signature + // Message is an arbitrary text message. + Message string + // TargetType is the object type of the target. + TargetType plumbing.ObjectType + // Target is the hash of the target object. + Target plumbing.Hash + + s storer.EncodedObjectStorer +} + +// GetTag gets a tag from an object storer and decodes it. +func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { + o, err := s.EncodedObject(plumbing.TagObject, h) + if err != nil { + return nil, err + } + + return DecodeTag(s, o) +} + +// DecodeTag decodes an encoded object into a *Commit and associates it to the +// given object storer. +func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { + t := &Tag{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// ID returns the object ID of the tag, not the object that the tag references. +// The returned value will always match the current value of Tag.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tag) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TagObject. +// +// Type is present to fulfill the Object interface. +func (t *Tag) Type() plumbing.ObjectType { + return plumbing.TagObject +} + +// Decode transforms a plumbing.EncodedObject into a Tag struct. +func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TagObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufio.NewReader(reader) + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + line = bytes.TrimSpace(line) + if len(line) == 0 { + break // Start of message + } + + split := bytes.SplitN(line, []byte{' '}, 2) + switch string(split[0]) { + case "object": + t.Target = plumbing.NewHash(string(split[1])) + case "type": + t.TargetType, err = plumbing.ParseObjectType(string(split[1])) + if err != nil { + return err + } + case "tag": + t.Name = string(split[1]) + case "tagger": + t.Tagger.Decode(split[1]) + } + + if err == io.EOF { + return nil + } + } + + data, err := stdioutil.ReadAll(r) + if err != nil { + return err + } + t.Message = string(data) + + return nil +} + +// Encode transforms a Tag into a plumbing.EncodedObject. +func (t *Tag) Encode(o plumbing.EncodedObject) error { + o.SetType(plumbing.TagObject) + w, err := o.Writer() + if err != nil { + return err + } + defer ioutil.CheckClose(w, &err) + + if _, err = fmt.Fprintf(w, + "object %s\ntype %s\ntag %s\ntagger ", + t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { + return err + } + + if err = t.Tagger.Encode(w); err != nil { + return err + } + + if _, err = fmt.Fprint(w, "\n\n"); err != nil { + return err + } + + if _, err = fmt.Fprint(w, t.Message); err != nil { + return err + } + + return err +} + +// Commit returns the commit pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Commit() (*Commit, error) { + if t.TargetType != plumbing.CommitObject { + return nil, ErrUnsupportedObject + } + + o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) + if err != nil { + return nil, err + } + + return DecodeCommit(t.s, o) +} + +// Tree returns the tree pointed to by the tag. If the tag points to a commit +// object the tree of that commit will be returned. If the tag does not point +// to a commit or tree object ErrUnsupportedObject will be returned. +func (t *Tag) Tree() (*Tree, error) { + switch t.TargetType { + case plumbing.CommitObject: + c, err := t.Commit() + if err != nil { + return nil, err + } + + return c.Tree() + case plumbing.TreeObject: + return GetTree(t.s, t.Target) + default: + return nil, ErrUnsupportedObject + } +} + +// Blob returns the blob pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Blob() (*Blob, error) { + if t.TargetType != plumbing.BlobObject { + return nil, ErrUnsupportedObject + } + + return GetBlob(t.s, t.Target) +} + +// Object returns the object pointed to by the tag. +func (t *Tag) Object() (Object, error) { + o, err := t.s.EncodedObject(t.TargetType, t.Target) + if err != nil { + return nil, err + } + + return DecodeObject(t.s, o) +} + +// String returns the meta information contained in the tag as a formatted +// string. +func (t *Tag) String() string { + obj, _ := t.Object() + + return fmt.Sprintf( + "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", + plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), + t.Message, objectAsString(obj), + ) +} + +// TagIter provides an iterator for a set of tags. +type TagIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTagIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TagIter that iterates over all +// tags contained in the storer.EncodedObjectIter. +// +// Any non-tag object returned by the storer.EncodedObjectIter is skipped. +func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { + return &TagIter{iter, s} +} + +// Next moves the iterator to the next tag and returns a pointer to it. If +// there are no more tags, it returns io.EOF. +func (iter *TagIter) Next() (*Tag, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeTag(iter.s, obj) +} + +// ForEach call the cb function for each tag contained on this iter until +// an error happends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TagIter) ForEach(cb func(*Tag) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + t, err := DecodeTag(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} + +func objectAsString(obj Object) string { + switch o := obj.(type) { + case *Commit: + return o.String() + default: + return "" + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go new file mode 100644 index 0000000000000000000000000000000000000000..e70b5cd32ead7118be99dc83d32ababc4de14116 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go @@ -0,0 +1,449 @@ +package object + +import ( + "bufio" + "errors" + "fmt" + "io" + "path" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/filemode" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +const ( + maxTreeDepth = 1024 + startingStackSize = 8 +) + +// New errors defined by this package. +var ( + ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") + ErrFileNotFound = errors.New("file not found") + ErrDirectoryNotFound = errors.New("directory not found") +) + +// Tree is basically like a directory - it references a bunch of other trees +// and/or blobs (i.e. files and sub-directories) +type Tree struct { + Entries []TreeEntry + Hash plumbing.Hash + + s storer.EncodedObjectStorer + m map[string]*TreeEntry +} + +// GetTree gets a tree from an object storer and decodes it. +func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { + o, err := s.EncodedObject(plumbing.TreeObject, h) + if err != nil { + return nil, err + } + + return DecodeTree(s, o) +} + +// DecodeTree decodes an encoded object into a *Tree and associates it to the +// given object storer. +func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { + t := &Tree{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// TreeEntry represents a file +type TreeEntry struct { + Name string + Mode filemode.FileMode + Hash plumbing.Hash +} + +// File returns the hash of the file identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) File(path string) (*File, error) { + e, err := t.findEntry(path) + if err != nil { + return nil, ErrFileNotFound + } + + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + if err == plumbing.ErrObjectNotFound { + return nil, ErrFileNotFound + } + return nil, err + } + + return NewFile(path, e.Mode, blob), nil +} + +// Tree returns the tree identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) Tree(path string) (*Tree, error) { + e, err := t.findEntry(path) + if err != nil { + return nil, ErrDirectoryNotFound + } + + tree, err := GetTree(t.s, e.Hash) + if err == plumbing.ErrObjectNotFound { + return nil, ErrDirectoryNotFound + } + + return tree, err +} + +// TreeEntryFile returns the *File for a given *TreeEntry. +func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + return nil, err + } + + return NewFile(e.Name, e.Mode, blob), nil +} + +func (t *Tree) findEntry(path string) (*TreeEntry, error) { + pathParts := strings.Split(path, "/") + + var tree *Tree + var err error + for tree = t; len(pathParts) > 1; pathParts = pathParts[1:] { + if tree, err = tree.dir(pathParts[0]); err != nil { + return nil, err + } + } + + return tree.entry(pathParts[0]) +} + +func (t *Tree) dir(baseName string) (*Tree, error) { + entry, err := t.entry(baseName) + if err != nil { + return nil, ErrDirectoryNotFound + } + + obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) + if err != nil { + return nil, err + } + + tree := &Tree{s: t.s} + tree.Decode(obj) + + return tree, nil +} + +var errEntryNotFound = errors.New("entry not found") + +func (t *Tree) entry(baseName string) (*TreeEntry, error) { + if t.m == nil { + t.buildMap() + } + entry, ok := t.m[baseName] + if !ok { + return nil, errEntryNotFound + } + + return entry, nil +} + +// Files returns a FileIter allowing to iterate over the Tree +func (t *Tree) Files() *FileIter { + return NewFileIter(t.s, t) +} + +// ID returns the object ID of the tree. The returned value will always match +// the current value of Tree.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tree) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TreeObject. +func (t *Tree) Type() plumbing.ObjectType { + return plumbing.TreeObject +} + +// Decode transform an plumbing.EncodedObject into a Tree struct +func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TreeObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + if o.Size() == 0 { + return nil + } + + t.Entries = nil + t.m = nil + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufio.NewReader(reader) + for { + str, err := r.ReadString(' ') + if err != nil { + if err == io.EOF { + break + } + + return err + } + str = str[:len(str)-1] // strip last byte (' ') + + mode, err := filemode.New(str) + if err != nil { + return err + } + + name, err := r.ReadString(0) + if err != nil && err != io.EOF { + return err + } + + var hash plumbing.Hash + if _, err = io.ReadFull(r, hash[:]); err != nil { + return err + } + + baseName := name[:len(name)-1] + t.Entries = append(t.Entries, TreeEntry{ + Hash: hash, + Mode: mode, + Name: baseName, + }) + } + + return nil +} + +// Encode transforms a Tree into a plumbing.EncodedObject. +func (t *Tree) Encode(o plumbing.EncodedObject) error { + o.SetType(plumbing.TreeObject) + w, err := o.Writer() + if err != nil { + return err + } + + var size int + defer ioutil.CheckClose(w, &err) + for _, entry := range t.Entries { + n, err := fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name) + if err != nil { + return err + } + + size += n + n, err = w.Write([]byte{0x00}) + if err != nil { + return err + } + + size += n + n, err = w.Write([]byte(entry.Hash[:])) + if err != nil { + return err + } + size += n + } + + o.SetSize(int64(size)) + return err +} + +func (t *Tree) buildMap() { + t.m = make(map[string]*TreeEntry) + for i := 0; i < len(t.Entries); i++ { + t.m[t.Entries[i].Name] = &t.Entries[i] + } +} + +// Diff returns a list of changes between this tree and the provided one +func (from *Tree) Diff(to *Tree) (Changes, error) { + return DiffTree(from, to) +} + +// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. +type treeEntryIter struct { + t *Tree + pos int +} + +func (iter *treeEntryIter) Next() (TreeEntry, error) { + if iter.pos >= len(iter.t.Entries) { + return TreeEntry{}, io.EOF + } + iter.pos++ + return iter.t.Entries[iter.pos-1], nil +} + +// TreeWalker provides a means of walking through all of the entries in a Tree. +type TreeWalker struct { + stack []treeEntryIter + base string + recursive bool + + s storer.EncodedObjectStorer + t *Tree +} + +// NewTreeWalker returns a new TreeWalker for the given tree. +// +// It is the caller's responsibility to call Close() when finished with the +// tree walker. +func NewTreeWalker(t *Tree, recursive bool) *TreeWalker { + stack := make([]treeEntryIter, 0, startingStackSize) + stack = append(stack, treeEntryIter{t, 0}) + + return &TreeWalker{ + stack: stack, + recursive: recursive, + + s: t.s, + t: t, + } +} + +// Next returns the next object from the tree. Objects are returned in order +// and subtrees are included. After the last object has been returned further +// calls to Next() will return io.EOF. +// +// In the current implementation any objects which cannot be found in the +// underlying repository will be skipped automatically. It is possible that this +// may change in future versions. +func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { + var obj Object + for { + current := len(w.stack) - 1 + if current < 0 { + // Nothing left on the stack so we're finished + err = io.EOF + return + } + + if current > maxTreeDepth { + // We're probably following bad data or some self-referencing tree + err = ErrMaxTreeDepth + return + } + + entry, err = w.stack[current].Next() + if err == io.EOF { + // Finished with the current tree, move back up to the parent + w.stack = w.stack[:current] + w.base, _ = path.Split(w.base) + w.base = path.Clean(w.base) // Remove trailing slash + continue + } + + if err != nil { + return + } + + if entry.Mode == filemode.Dir { + obj, err = GetTree(w.s, entry.Hash) + } + + name = path.Join(w.base, entry.Name) + + if err != nil { + err = io.EOF + return + } + + break + } + + if !w.recursive { + return + } + + if t, ok := obj.(*Tree); ok { + w.stack = append(w.stack, treeEntryIter{t, 0}) + w.base = path.Join(w.base, entry.Name) + } + + return +} + +// Tree returns the tree that the tree walker most recently operated on. +func (w *TreeWalker) Tree() *Tree { + current := len(w.stack) - 1 + if w.stack[current].pos == 0 { + current-- + } + + if current < 0 { + return nil + } + + return w.stack[current].t +} + +// Close releases any resources used by the TreeWalker. +func (w *TreeWalker) Close() { + w.stack = nil +} + +// TreeIter provides an iterator for a set of trees. +type TreeIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTreeIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TreeIter that iterates over all +// tree contained in the storer.EncodedObjectIter. +// +// Any non-tree object returned by the storer.EncodedObjectIter is skipped. +func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { + return &TreeIter{iter, s} +} + +// Next moves the iterator to the next tree and returns a pointer to it. If +// there are no more trees, it returns io.EOF. +func (iter *TreeIter) Next() (*Tree, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.TreeObject { + continue + } + + return DecodeTree(iter.s, obj) + } +} + +// ForEach call the cb function for each tree contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TreeIter) ForEach(cb func(*Tree) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.TreeObject { + return nil + } + + t, err := DecodeTree(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go new file mode 100644 index 0000000000000000000000000000000000000000..4da82989d900125d904d91e15f7502b872bf1203 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go @@ -0,0 +1,142 @@ +package object + +// A treenoder is a helper type that wraps git trees into merkletrie +// noders. +// +// As a merkletrie noder doesn't understand the concept of modes (e.g. +// file permissions), the treenoder includes the mode of the git tree in +// the hash, so changes in the modes will be detected as modifications +// to the file contents by the merkletrie difftree algorithm. This is +// consistent with how the "git diff-tree" command works. +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/filemode" + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +type treeNoder struct { + parent *Tree // the root node is its own parent + name string // empty string for the root node + mode filemode.FileMode + hash plumbing.Hash + children []noder.Noder // memoized +} + +func newTreeNoder(t *Tree) *treeNoder { + if t == nil { + return &treeNoder{} + } + + return &treeNoder{ + parent: t, + name: "", + mode: filemode.Dir, + hash: t.Hash, + } +} + +func (t *treeNoder) isRoot() bool { + return t.name == "" +} + +func (t *treeNoder) String() string { + return "treeNoder <" + t.name + ">" +} + +// The hash of a treeNoder is the result of concatenating the hash of +// its contents and its mode; that way the difftree algorithm will +// detect changes in the contents of files and also in their mode. +// +// Files with Regular and Deprecated file modes are considered the same +// for the purpose of difftree, so Regular will be used as the mode for +// Deprecated files here. +func (t *treeNoder) Hash() []byte { + if t.mode == filemode.Deprecated { + return append(t.hash[:], filemode.Regular.Bytes()...) + } + return append(t.hash[:], t.mode.Bytes()...) +} + +func (t *treeNoder) Name() string { + return t.name +} + +func (t *treeNoder) IsDir() bool { + return t.mode == filemode.Dir +} + +// Children will return the children of a treenoder as treenoders, +// building them from the children of the wrapped git tree. +func (t *treeNoder) Children() ([]noder.Noder, error) { + if t.mode != filemode.Dir { + return noder.NoChildren, nil + } + + // children are memoized for efficiency + if t.children != nil { + return t.children, nil + } + + // the parent of the returned children will be ourself as a tree if + // we are a not the root treenoder. The root is special as it + // is is own parent. + parent := t.parent + if !t.isRoot() { + var err error + if parent, err = t.parent.Tree(t.name); err != nil { + return nil, err + } + } + + return transformChildren(parent) +} + +// Returns the children of a tree as treenoders. +// Efficiency is key here. +func transformChildren(t *Tree) ([]noder.Noder, error) { + var err error + var e TreeEntry + + // there will be more tree entries than children in the tree, + // due to submodules and empty directories, but I think it is still + // worth it to pre-allocate the whole array now, even if sometimes + // is bigger than needed. + ret := make([]noder.Noder, 0, len(t.Entries)) + + walker := NewTreeWalker(t, false) // don't recurse + // don't defer walker.Close() for efficiency reasons. + for { + _, e, err = walker.Next() + if err == io.EOF { + break + } + if err != nil { + walker.Close() + return nil, err + } + + ret = append(ret, &treeNoder{ + parent: t, + name: e.Name, + mode: e.Mode, + hash: e.Hash, + }) + } + walker.Close() + + return ret, nil +} + +// len(t.tree.Entries) != the number of elements walked by treewalker +// for some reason because of empty directories, submodules, etc, so we +// have to walk here. +func (t *treeNoder) NumChildren() (int, error) { + children, err := t.Children() + if err != nil { + return 0, err + } + + return len(children), nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go new file mode 100644 index 0000000000000000000000000000000000000000..7d644bcb0de88f5155c76a0d921aa9d95e79539c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go @@ -0,0 +1,113 @@ +package packp + +import ( + "fmt" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage/memory" +) + +// AdvRefs values represent the information transmitted on an +// advertised-refs message. Values from this type are not zero-value +// safe, use the New function instead. +type AdvRefs struct { + // Prefix stores prefix payloads. + // + // When using this message over (smart) HTTP, you have to add a pktline + // before the whole thing with the following payload: + // + // '# service=$servicename" LF + // + // Moreover, some (all) git HTTP smart servers will send a flush-pkt + // just after the first pkt-line. + // + // To accommodate both situations, the Prefix field allow you to store + // any data you want to send before the actual pktlines. It will also + // be filled up with whatever is found on the line. + Prefix [][]byte + // Head stores the resolved HEAD reference if present. + // This can be present with git-upload-pack, not with git-receive-pack. + Head *plumbing.Hash + // Capabilities are the capabilities. + Capabilities *capability.List + // References are the hash references. + References map[string]plumbing.Hash + // Peeled are the peeled hash references. + Peeled map[string]plumbing.Hash + // Shallows are the shallow object ids. + Shallows []plumbing.Hash +} + +// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. +func NewAdvRefs() *AdvRefs { + return &AdvRefs{ + Prefix: [][]byte{}, + Capabilities: capability.NewList(), + References: make(map[string]plumbing.Hash), + Peeled: make(map[string]plumbing.Hash), + Shallows: []plumbing.Hash{}, + } +} + +func (a *AdvRefs) AddReference(r *plumbing.Reference) error { + switch r.Type() { + case plumbing.SymbolicReference: + v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) + a.Capabilities.Add(capability.SymRef, v) + case plumbing.HashReference: + a.References[r.Name().String()] = r.Hash() + default: + return plumbing.ErrInvalidType + } + + return nil +} + +func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { + s := memory.ReferenceStorage{} + if err := addRefs(s, a); err != nil { + return s, plumbing.NewUnexpectedError(err) + } + + return s, nil +} + +func addRefs(s storer.ReferenceStorer, ar *AdvRefs) error { + for name, hash := range ar.References { + ref := plumbing.NewReferenceFromStrings(name, hash.String()) + if err := s.SetReference(ref); err != nil { + return err + } + } + + return addSymbolicRefs(s, ar) +} + +func addSymbolicRefs(s storer.ReferenceStorer, ar *AdvRefs) error { + if !hasSymrefs(ar) { + return nil + } + + for _, symref := range ar.Capabilities.Get(capability.SymRef) { + chunks := strings.Split(symref, ":") + if len(chunks) != 2 { + err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) + return plumbing.NewUnexpectedError(err) + } + name := plumbing.ReferenceName(chunks[0]) + target := plumbing.ReferenceName(chunks[1]) + ref := plumbing.NewSymbolicReference(name, target) + if err := s.SetReference(ref); err != nil { + return nil + } + } + + return nil +} + +func hasSymrefs(ar *AdvRefs) bool { + return ar.Capabilities.Supports(capability.SymRef) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go new file mode 100644 index 0000000000000000000000000000000000000000..e0a449ef64a5b54fd811de9aa1109202237bbc79 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go @@ -0,0 +1,288 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +// Decode reads the next advertised-refs message form its input and +// stores it in the AdvRefs. +func (a *AdvRefs) Decode(r io.Reader) error { + d := newAdvRefsDecoder(r) + return d.Decode(a) +} + +type advRefsDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + hash plumbing.Hash // last hash read + err error // sticky error, use the parser.error() method to fill this out + data *AdvRefs // parsed data is stored here +} + +var ( + // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised + // references message. + ErrEmptyAdvRefs = errors.New("empty advertised-ref message") + // ErrEmptyInput is returned by Decode if the input is empty. + ErrEmptyInput = errors.New("empty input") +) + +func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { + return &advRefsDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *advRefsDecoder) Decode(v *AdvRefs) error { + d.data = v + + for state := decodePrefix; state != nil; { + state = state(d) + } + + return d.err +} + +type decoderStateFn func(*advRefsDecoder) decoderStateFn + +// fills out the parser stiky error +func (d *advRefsDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *advRefsDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + if d.nLine == 1 { + d.err = ErrEmptyInput + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// The HTTP smart prefix is often followed by a flush-pkt. +func decodePrefix(d *advRefsDecoder) decoderStateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !isPrefix(d.line) { + return decodeFirstHash + } + + tmp := make([]byte, len(d.line)) + copy(tmp, d.line) + d.data.Prefix = append(d.data.Prefix, tmp) + if ok := d.nextLine(); !ok { + return nil + } + + if !isFlush(d.line) { + return decodeFirstHash + } + + d.data.Prefix = append(d.data.Prefix, pktline.Flush) + if ok := d.nextLine(); !ok { + return nil + } + + return decodeFirstHash +} + +func isPrefix(payload []byte) bool { + return len(payload) > 0 && payload[0] == '#' +} + +// If the first hash is zero, then a no-refs is coming. Otherwise, a +// list-of-refs is coming, and the hash will be followed by the first +// advertised ref. +func decodeFirstHash(p *advRefsDecoder) decoderStateFn { + // If the repository is empty, we receive a flush here (HTTP). + if isFlush(p.line) { + p.err = ErrEmptyAdvRefs + return nil + } + + if len(p.line) < hashSize { + p.error("cannot read hash, pkt-line too short") + return nil + } + + if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.line = p.line[hashSize:] + + if p.hash.IsZero() { + return decodeSkipNoRefs + } + + return decodeFirstRef +} + +// Skips SP "capabilities^{}" NUL +func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { + if len(p.line) < len(noHeadMark) { + p.error("too short zero-id ref") + return nil + } + + if !bytes.HasPrefix(p.line, noHeadMark) { + p.error("malformed zero-id ref") + return nil + } + + p.line = p.line[len(noHeadMark):] + + return decodeCaps +} + +// decode the refname, expectes SP refname NULL +func decodeFirstRef(l *advRefsDecoder) decoderStateFn { + if len(l.line) < 3 { + l.error("line too short after hash") + return nil + } + + if !bytes.HasPrefix(l.line, sp) { + l.error("no space after hash") + return nil + } + l.line = l.line[1:] + + chunks := bytes.SplitN(l.line, null, 2) + if len(chunks) < 2 { + l.error("NULL not found") + return nil + } + ref := chunks[0] + l.line = chunks[1] + + if bytes.Equal(ref, []byte(head)) { + l.data.Head = &l.hash + } else { + l.data.References[string(ref)] = l.hash + } + + return decodeCaps +} + +func decodeCaps(p *advRefsDecoder) decoderStateFn { + if err := p.data.Capabilities.Decode(p.line); err != nil { + p.error("invalid capabilities: %s", err) + return nil + } + + return decodeOtherRefs +} + +// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). +// If there are no refs, then there might be a shallow or flush-ptk. +func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { + if ok := p.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(p.line, shallow) { + return decodeShallow + } + + if len(p.line) == 0 { + return nil + } + + saveTo := p.data.References + if bytes.HasSuffix(p.line, peeled) { + p.line = bytes.TrimSuffix(p.line, peeled) + saveTo = p.data.Peeled + } + + ref, hash, err := readRef(p.line) + if err != nil { + p.error("%s", err) + return nil + } + saveTo[ref] = hash + + return decodeOtherRefs +} + +// Reads a ref-name +func readRef(data []byte) (string, plumbing.Hash, error) { + chunks := bytes.Split(data, sp) + switch { + case len(chunks) == 1: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") + case len(chunks) > 2: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") + default: + return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil + } +} + +// Keeps reading shallows until a flush-pkt is found +func decodeShallow(p *advRefsDecoder) decoderStateFn { + if !bytes.HasPrefix(p.line, shallow) { + p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) + return nil + } + p.line = bytes.TrimPrefix(p.line, shallow) + + if len(p.line) != hashSize { + p.error(fmt.Sprintf( + "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", + len(p.line))) + return nil + } + + text := p.line[:hashSize] + var h plumbing.Hash + if _, err := hex.Decode(h[:], text); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.data.Shallows = append(p.data.Shallows, h) + + if ok := p.nextLine(); !ok { + return nil + } + + if len(p.line) == 0 { + return nil // succesfull parse of the advertised-refs message + } + + return decodeShallow +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go new file mode 100644 index 0000000000000000000000000000000000000000..e981120e5425ff4824e53080d5052671cc8bca2f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go @@ -0,0 +1,156 @@ +package packp + +import ( + "bytes" + "io" + "sort" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +// Encode writes the AdvRefs encoding to a writer. +// +// All the payloads will end with a newline character. Capabilities, +// references and shallows are written in alphabetical order, except for +// peeled references that always follow their corresponding references. +func (a *AdvRefs) Encode(w io.Writer) error { + e := newAdvRefsEncoder(w) + return e.Encode(a) +} + +type advRefsEncoder struct { + data *AdvRefs // data to encode + pe *pktline.Encoder // where to write the encoded data + err error // sticky error +} + +func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { + return &advRefsEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *advRefsEncoder) Encode(v *AdvRefs) error { + e.data = v + + for state := encodePrefix; state != nil; { + state = state(e) + } + + return e.err +} + +type encoderStateFn func(*advRefsEncoder) encoderStateFn + +func encodePrefix(e *advRefsEncoder) encoderStateFn { + for _, p := range e.data.Prefix { + if bytes.Equal(p, pktline.Flush) { + if e.err = e.pe.Flush(); e.err != nil { + return nil + } + continue + } + if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { + return nil + } + } + + return encodeFirstLine +} + +// Adds the first pkt-line payload: head hash, head ref and capabilities. +// Also handle the special case when no HEAD ref is found. +func encodeFirstLine(e *advRefsEncoder) encoderStateFn { + head := formatHead(e.data.Head) + separator := formatSeparator(e.data.Head) + capabilities := formatCaps(e.data.Capabilities) + + if e.err = e.pe.Encodef("%s %s\x00%s\n", head, separator, capabilities); e.err != nil { + return nil + } + + return encodeRefs +} + +func formatHead(h *plumbing.Hash) string { + if h == nil { + return plumbing.ZeroHash.String() + } + + return h.String() +} + +func formatSeparator(h *plumbing.Hash) string { + if h == nil { + return noHead + } + + return head +} + +func formatCaps(c *capability.List) string { + if c == nil { + return "" + } + + return c.String() +} + +// Adds the (sorted) refs: hash SP refname EOL +// and their peeled refs if any. +func encodeRefs(e *advRefsEncoder) encoderStateFn { + refs := sortRefs(e.data.References) + for _, r := range refs { + hash, _ := e.data.References[r] + if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { + return nil + } + + if hash, ok := e.data.Peeled[r]; ok { + if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { + return nil + } + } + } + + return encodeShallow +} + +func sortRefs(m map[string]plumbing.Hash) []string { + ret := make([]string, 0, len(m)) + for k := range m { + ret = append(ret, k) + } + sort.Strings(ret) + + return ret +} + +// Adds the (sorted) shallows: "shallow" SP hash EOL +func encodeShallow(e *advRefsEncoder) encoderStateFn { + sorted := sortShallows(e.data.Shallows) + for _, hash := range sorted { + if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { + return nil + } + } + + return encodeFlush +} + +func sortShallows(c []plumbing.Hash) []string { + ret := []string{} + for _, h := range c { + ret = append(ret, h.String()) + } + sort.Strings(ret) + + return ret +} + +func encodeFlush(e *advRefsEncoder) encoderStateFn { + e.err = e.pe.Flush() + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go new file mode 100644 index 0000000000000000000000000000000000000000..96d93f697c3907fbc5ae711f5d49bb426d95ff2d --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go @@ -0,0 +1,252 @@ +// Package capability defines the server and client capabilities. +package capability + +// Capability describes a server or client capability. +type Capability string + +func (n Capability) String() string { + return string(n) +} + +const ( + // MultiACK capability allows the server to return "ACK obj-id continue" as + // soon as it finds a commit that it can use as a common base, between the + // client's wants and the client's have set. + // + // By sending this early, the server can potentially head off the client + // from walking any further down that particular branch of the client's + // repository history. The client may still need to walk down other + // branches, sending have lines for those, until the server has a + // complete cut across the DAG, or the client has said "done". + // + // Without multi_ack, a client sends have lines in --date-order until + // the server has found a common base. That means the client will send + // have lines that are already known by the server to be common, because + // they overlap in time with another branch that the server hasn't found + // a common base on yet. + // + // For example suppose the client has commits in caps that the server + // doesn't and the server has commits in lower case that the client + // doesn't, as in the following diagram: + // + // +---- u ---------------------- x + // / +----- y + // / / + // a -- b -- c -- d -- E -- F + // \ + // +--- Q -- R -- S + // + // If the client wants x,y and starts out by saying have F,S, the server + // doesn't know what F,S is. Eventually the client says "have d" and + // the server sends "ACK d continue" to let the client know to stop + // walking down that line (so don't send c-b-a), but it's not done yet, + // it needs a base for x. The client keeps going with S-R-Q, until a + // gets reached, at which point the server has a clear base and it all + // ends. + // + // Without multi_ack the client would have sent that c-b-a chain anyway, + // interleaved with S-R-Q. + MultiACK Capability = "multi_ack" + // MultiACKDetailed is an extension of multi_ack that permits client to + // better understand the server's in-memory state. + MultiACKDetailed Capability = "multi_ack_detailed" + // NoDone should only be used with the smart HTTP protocol. If + // multi_ack_detailed and no-done are both present, then the sender is + // free to immediately send a pack following its first "ACK obj-id ready" + // message. + // + // Without no-done in the smart HTTP protocol, the server session would + // end and the client has to make another trip to send "done" before + // the server can send the pack. no-done removes the last round and + // thus slightly reduces latency. + NoDone Capability = "no-done" + // ThinPack is one with deltas which reference base objects not + // contained within the pack (but are known to exist at the receiving + // end). This can reduce the network traffic significantly, but it + // requires the receiving end to know how to "thicken" these packs by + // adding the missing bases to the pack. + // + // The upload-pack server advertises 'thin-pack' when it can generate + // and send a thin pack. A client requests the 'thin-pack' capability + // when it understands how to "thicken" it, notifying the server that + // it can receive such a pack. A client MUST NOT request the + // 'thin-pack' capability if it cannot turn a thin pack into a + // self-contained pack. + // + // Receive-pack, on the other hand, is assumed by default to be able to + // handle thin packs, but can ask the client not to use the feature by + // advertising the 'no-thin' capability. A client MUST NOT send a thin + // pack if the server advertises the 'no-thin' capability. + // + // The reasons for this asymmetry are historical. The receive-pack + // program did not exist until after the invention of thin packs, so + // historically the reference implementation of receive-pack always + // understood thin packs. Adding 'no-thin' later allowed receive-pack + // to disable the feature in a backwards-compatible manner. + ThinPack Capability = "thin-pack" + // Sideband means that server can send, and client understand multiplexed + // progress reports and error info interleaved with the packfile itself. + // + // These two options are mutually exclusive. A modern client always + // favors Sideband64k. + // + // Either mode indicates that the packfile data will be streamed broken + // up into packets of up to either 1000 bytes in the case of 'side_band', + // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up + // of a leading 4-byte pkt-line length of how much data is in the packet, + // followed by a 1-byte stream code, followed by the actual data. + // + // The stream code can be one of: + // + // 1 - pack data + // 2 - progress messages + // 3 - fatal error message just before stream aborts + // + // The "side-band-64k" capability came about as a way for newer clients + // that can handle much larger packets to request packets that are + // actually crammed nearly full, while maintaining backward compatibility + // for the older clients. + // + // Further, with side-band and its up to 1000-byte messages, it's actually + // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, + // same deal, you have up to 65519 bytes of data and 1 byte for the stream + // code. + // + // The client MUST send only maximum of one of "side-band" and "side- + // band-64k". Server MUST diagnose it as an error if client requests + // both. + Sideband Capability = "side-band" + Sideband64k Capability = "side-band-64k" + // OFSDelta server can send, and client understand PACKv2 with delta + // referring to its base by position in pack rather than by an obj-id. That + // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. + OFSDelta Capability = "ofs-delta" + // Agent the server may optionally send this capability to notify the client + // that the server is running version `X`. The client may optionally return + // its own agent string by responding with an `agent=Y` capability (but it + // MUST NOT do so if the server did not mention the agent capability). The + // `X` and `Y` strings may contain any printable ASCII characters except + // space (i.e., the byte range 32 < x < 127), and are typically of the form + // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely + // informative for statistics and debugging purposes, and MUST NOT be used + // to programmatically assume the presence or absence of particular features. + Agent Capability = "agent" + // Shallow capability adds "deepen", "shallow" and "unshallow" commands to + // the fetch-pack/upload-pack protocol so clients can request shallow + // clones. + Shallow Capability = "shallow" + // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific time, instead of depth. Internally it's equivalent of doing + // "rev-list --max-age=<timestamp>" on the server side. "deepen-since" + // cannot be used with "deepen". + DeepenSince Capability = "deepen-since" + // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific revision, instead of depth. Internally it's equivalent of + // doing "rev-list --not <rev>" on the server side. "deepen-not" + // cannot be used with "deepen", but can be used with "deepen-since". + DeepenNot Capability = "deepen-not" + // DeepenRelative if this capability is requested by the client, the + // semantics of "deepen" command is changed. The "depth" argument is the + // depth from the current shallow boundary, instead of the depth from + // remote refs. + DeepenRelative Capability = "deepen-relative" + // NoProgress the client was started with "git clone -q" or something, and + // doesn't want that side band 2. Basically the client just says "I do not + // wish to receive stream 2 on sideband, so do not send it to me, and if + // you did, I will drop it on the floor anyway". However, the sideband + // channel 3 is still used for error responses. + NoProgress Capability = "no-progress" + // IncludeTag capability is about sending annotated tags if we are + // sending objects they point to. If we pack an object to the client, and + // a tag object points exactly at that object, we pack the tag object too. + // In general this allows a client to get all new annotated tags when it + // fetches a branch, in a single network connection. + // + // Clients MAY always send include-tag, hardcoding it into a request when + // the server advertises this capability. The decision for a client to + // request include-tag only has to do with the client's desires for tag + // data, whether or not a server had advertised objects in the + // refs/tags/* namespace. + // + // Servers MUST pack the tags if their referrant is packed and the client + // has requested include-tags. + // + // Clients MUST be prepared for the case where a server has ignored + // include-tag and has not actually sent tags in the pack. In such + // cases the client SHOULD issue a subsequent fetch to acquire the tags + // that include-tag would have otherwise given the client. + // + // The server SHOULD send include-tag, if it supports it, regardless + // of whether or not there are tags available. + IncludeTag Capability = "include-tag" + // ReportStatus the receive-pack process can receive a 'report-status' + // capability, which tells it that the client wants a report of what + // happened after a packfile upload and reference update. If the pushing + // client requests this capability, after unpacking and updating references + // the server will respond with whether the packfile unpacked successfully + // and if each reference was updated successfully. If any of those were not + // successful, it will send back an error message. See pack-protocol.txt + // for example messages. + ReportStatus Capability = "report-status" + // DeleteRefs If the server sends back this capability, it means that + // it is capable of accepting a zero-id value as the target + // value of a reference update. It is not sent back by the client, it + // simply informs the client that it can be sent zero-id values + // to delete references + DeleteRefs Capability = "delete-refs" + // Quiet If the receive-pack server advertises this capability, it is + // capable of silencing human-readable progress output which otherwise may + // be shown when processing the received pack. A send-pack client should + // respond with the 'quiet' capability to suppress server-side progress + // reporting if the local progress reporting is also being suppressed + // (e.g., via `push -q`, or if stderr does not go to a tty). + Quiet Capability = "quiet" + // Atomic If the server sends this capability it is capable of accepting + // atomic pushes. If the pushing client requests this capability, the server + // will update the refs in one atomic transaction. Either all refs are + // updated or none. + Atomic Capability = "atomic" + // PushOptions If the server sends this capability it is able to accept + // push options after the update commands have been sent, but before the + // packfile is streamed. If the pushing client requests this capability, + // the server will pass the options to the pre- and post- receive hooks + // that process this push request. + PushOptions Capability = "push-options" + // AllowTipSHA1InWant if the upload-pack server advertises this capability, + // fetch-pack may send "want" lines with SHA-1s that exist at the server but + // are not advertised by upload-pack. + AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" + // AllowReachableSHA1InWant if the upload-pack server advertises this + // capability, fetch-pack may send "want" lines with SHA-1s that exist at + // the server but are not advertised by upload-pack. + AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" + // PushCert the receive-pack server that advertises this capability is + // willing to accept a signed push certificate, and asks the <nonce> to be + // included in the push certificate. A send-pack client MUST NOT + // send a push-cert packet unless the receive-pack server advertises + // this capability. + PushCert Capability = "push-cert" + // SymRef symbolic reference support for better negotiation. + SymRef Capability = "symref" +) + +const DefaultAgent = "go-git/4.x" + +var valid = map[Capability]bool{ + MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, + Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, + Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, + NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, + Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, + AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, +} + +var requiresArgument = map[Capability]bool{ + Agent: true, PushCert: true, SymRef: true, +} + +var multipleArgument = map[Capability]bool{ + SymRef: true, +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go new file mode 100644 index 0000000000000000000000000000000000000000..69fdb5134b8c487b33fa8c7ef890997d9dc2ecd9 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go @@ -0,0 +1,197 @@ +package capability + +import ( + "bytes" + "errors" + "fmt" + "strings" +) + +var ( + // ErrUnknownCapability is returned if a unknown capability is given + ErrUnknownCapability = errors.New("unknown capability") + // ErrArgumentsRequired is returned if no arguments are giving with a + // capability that requires arguments + ErrArgumentsRequired = errors.New("arguments required") + // ErrArguments is returned if arguments are given with a capabilities that + // not supports arguments + ErrArguments = errors.New("arguments not allowed") + // ErrEmtpyArgument is returned when an empty value is given + ErrEmtpyArgument = errors.New("empty argument") + // ErrMultipleArguments multiple argument given to a capabilities that not + // support it + ErrMultipleArguments = errors.New("multiple arguments not allowed") +) + +// List represents a list of capabilities +type List struct { + m map[Capability]*entry + sort []string +} + +type entry struct { + Name Capability + Values []string +} + +// NewList returns a new List of capabilities +func NewList() *List { + return &List{ + m: make(map[Capability]*entry), + } +} + +// IsEmpty returns true if the List is empty +func (l *List) IsEmpty() bool { + return len(l.sort) == 0 +} + +// Decode decodes list of capabilities from raw into the list +func (l *List) Decode(raw []byte) error { + // git 1.x receive pack used to send a leading space on its + // git-receive-pack capabilities announcement. We just trim space to be + // tolerant to space changes in different versions. + raw = bytes.TrimSpace(raw) + + if len(raw) == 0 { + return nil + } + + for _, data := range bytes.Split(raw, []byte{' '}) { + pair := bytes.SplitN(data, []byte{'='}, 2) + + c := Capability(pair[0]) + if len(pair) == 1 { + if err := l.Add(c); err != nil { + return err + } + + continue + } + + if err := l.Add(c, string(pair[1])); err != nil { + return err + } + } + + return nil +} + +// Get returns the values for a capability +func (l *List) Get(capability Capability) []string { + if _, ok := l.m[capability]; !ok { + return nil + } + + return l.m[capability].Values +} + +// Set sets a capability removing the previous values +func (l *List) Set(capability Capability, values ...string) error { + if _, ok := l.m[capability]; ok { + delete(l.m, capability) + } + + return l.Add(capability, values...) +} + +// Add adds a capability, values are optional +func (l *List) Add(c Capability, values ...string) error { + if err := l.validate(c, values); err != nil { + return err + } + + if !l.Supports(c) { + l.m[c] = &entry{Name: c} + l.sort = append(l.sort, c.String()) + } + + if len(values) == 0 { + return nil + } + + if !multipleArgument[c] && len(l.m[c].Values) > 0 { + return ErrMultipleArguments + } + + l.m[c].Values = append(l.m[c].Values, values...) + return nil +} + +func (l *List) validate(c Capability, values []string) error { + if _, ok := valid[c]; !ok { + return ErrUnknownCapability + } + + if requiresArgument[c] && len(values) == 0 { + return ErrArgumentsRequired + } + + if !requiresArgument[c] && len(values) != 0 { + return ErrArguments + } + + if !multipleArgument[c] && len(values) > 1 { + return ErrMultipleArguments + } + + for _, v := range values { + if v == "" { + return ErrEmtpyArgument + } + } + + return nil +} + +// Supports returns true if capability is present +func (l *List) Supports(capability Capability) bool { + _, ok := l.m[capability] + return ok +} + +// Delete deletes a capability from the List +func (l *List) Delete(capability Capability) { + if !l.Supports(capability) { + return + } + + delete(l.m, capability) + for i, c := range l.sort { + if c != string(capability) { + continue + } + + l.sort = append(l.sort[:i], l.sort[i+1:]...) + return + } +} + +// All returns a slice with all defined capabilities. +func (l *List) All() []Capability { + var cs []Capability + for _, key := range l.sort { + cs = append(cs, Capability(key)) + } + + return cs +} + +// String generates the capabilities strings, the capabilities are sorted in +// insertion order +func (l *List) String() string { + var o []string + for _, key := range l.sort { + cap := l.m[Capability(key)] + if len(cap.Values) == 0 { + o = append(o, key) + continue + } + + for _, value := range cap.Values { + o = append(o, fmt.Sprintf("%s=%s", key, value)) + } + } + + return strings.Join(o, " ") +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go new file mode 100644 index 0000000000000000000000000000000000000000..ab07ac8f74fd7fe63aaef2ee895f8c79c02acb36 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go @@ -0,0 +1,70 @@ +package packp + +import ( + "fmt" +) + +type stateFn func() stateFn + +const ( + // common + hashSize = 40 + + // advrefs + head = "HEAD" + noHead = "capabilities^{}" +) + +var ( + // common + sp = []byte(" ") + eol = []byte("\n") + eq = []byte{'='} + + // advertised-refs + null = []byte("\x00") + peeled = []byte("^{}") + noHeadMark = []byte(" capabilities^{}\x00") + + // upload-request + want = []byte("want ") + shallow = []byte("shallow ") + deepen = []byte("deepen") + deepenCommits = []byte("deepen ") + deepenSince = []byte("deepen-since ") + deepenReference = []byte("deepen-not ") + + // shallow-update + unshallow = []byte("unshallow ") + + // server-response + ack = []byte("ACK") + nak = []byte("NAK") + + // updreq + shallowNoSp = []byte("shallow") +) + +func isFlush(payload []byte) bool { + return len(payload) == 0 +} + +// ErrUnexpectedData represents an unexpected data decoding a message +type ErrUnexpectedData struct { + Msg string + Data []byte +} + +// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and +// the message given +func NewErrUnexpectedData(msg string, data []byte) error { + return &ErrUnexpectedData{Msg: msg, Data: data} +} + +func (err *ErrUnexpectedData) Error() string { + if len(err.Data) == 0 { + return err.Msg + } + + return fmt.Sprintf("%s (%s)", err.Msg, err.Data) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..4950d1d6625b91ba50bbc2ba3c3d324080b601f2 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go @@ -0,0 +1,724 @@ +package packp + +/* + +A nice way to trace the real data transmitted and received by git, use: + +GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git +GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git + +Here follows a copy of the current protocol specification at the time of +this writing. + +(Please notice that most http git servers will add a flush-pkt after the +first pkt-line when using HTTP smart.) + + +Documentation Common to Pack and Http Protocols +=============================================== + +ABNF Notation +------------- + +ABNF notation as described by RFC 5234 is used within the protocol documents, +except the following replacement core rules are used: +---- + HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" +---- + +We also define the following common rules: +---- + NUL = %x00 + zero-id = 40*"0" + obj-id = 40*(HEXDIGIT) + + refname = "HEAD" + refname /= "refs/" <see discussion below> +---- + +A refname is a hierarchical octet string beginning with "refs/" and +not violating the 'git-check-ref-format' command's validation rules. +More specifically, they: + +. They can include slash `/` for hierarchical (directory) + grouping, but no slash-separated component can begin with a + dot `.`. + +. They must contain at least one `/`. This enforces the presence of a + category like `heads/`, `tags/` etc. but the actual names are not + restricted. + +. They cannot have two consecutive dots `..` anywhere. + +. They cannot have ASCII control characters (i.e. bytes whose + values are lower than \040, or \177 `DEL`), space, tilde `~`, + caret `^`, colon `:`, question-mark `?`, asterisk `*`, + or open bracket `[` anywhere. + +. They cannot end with a slash `/` or a dot `.`. + +. They cannot end with the sequence `.lock`. + +. They cannot contain a sequence `@{`. + +. They cannot contain a `\\`. + + +pkt-line Format +--------------- + +Much (but not all) of the payload is described around pkt-lines. + +A pkt-line is a variable length binary string. The first four bytes +of the line, the pkt-len, indicates the total length of the line, +in hexadecimal. The pkt-len includes the 4 bytes used to contain +the length's hexadecimal representation. + +A pkt-line MAY contain binary data, so implementors MUST ensure +pkt-line parsing/formatting routines are 8-bit clean. + +A non-binary line SHOULD BE terminated by an LF, which if present +MUST be included in the total length. Receivers MUST treat pkt-lines +with non-binary data the same whether or not they contain the trailing +LF (stripping the LF if present, and not complaining when it is +missing). + +The maximum length of a pkt-line's data component is 65516 bytes. +Implementations MUST NOT send pkt-line whose length exceeds 65520 +(65516 bytes of payload + 4 bytes of length data). + +Implementations SHOULD NOT send an empty pkt-line ("0004"). + +A pkt-line with a length field of 0 ("0000"), called a flush-pkt, +is a special case and MUST be handled differently than an empty +pkt-line ("0004"). + +---- + pkt-line = data-pkt / flush-pkt + + data-pkt = pkt-len pkt-payload + pkt-len = 4*(HEXDIG) + pkt-payload = (pkt-len - 4)*(OCTET) + + flush-pkt = "0000" +---- + +Examples (as C-style strings): + +---- + pkt-line actual value + --------------------------------- + "0006a\n" "a\n" + "0005a" "a" + "000bfoobar\n" "foobar\n" + "0004" "" +---- + +Packfile transfer protocols +=========================== + +Git supports transferring data in packfiles over the ssh://, git://, http:// and +file:// transports. There exist two sets of protocols, one for pushing +data from a client to a server and another for fetching data from a +server to a client. The three transports (ssh, git, file) use the same +protocol to transfer data. http is documented in http-protocol.txt. + +The processes invoked in the canonical Git implementation are 'upload-pack' +on the server side and 'fetch-pack' on the client side for fetching data; +then 'receive-pack' on the server and 'send-pack' on the client for pushing +data. The protocol functions to have a server tell a client what is +currently on the server, then for the two to negotiate the smallest amount +of data to send in order to fully update one or the other. + +pkt-line Format +--------------- + +The descriptions below build on the pkt-line format described in +protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless +otherwise noted the usual pkt-line LF rules apply: the sender SHOULD +include a LF, but the receiver MUST NOT complain if it is not present. + +Transports +---------- +There are three transports over which the packfile protocol is +initiated. The Git transport is a simple, unauthenticated server that +takes the command (almost always 'upload-pack', though Git +servers can be configured to be globally writable, in which 'receive- +pack' initiation is also allowed) with which the client wishes to +communicate and executes it and connects it to the requesting +process. + +In the SSH transport, the client just runs the 'upload-pack' +or 'receive-pack' process on the server over the SSH protocol and then +communicates with that invoked process over the SSH connection. + +The file:// transport runs the 'upload-pack' or 'receive-pack' +process locally and communicates with it over a pipe. + +Git Transport +------------- + +The Git transport starts off by sending the command and repository +on the wire using the pkt-line format, followed by a NUL byte and a +hostname parameter, terminated by a NUL byte. + + 0032git-upload-pack /project.git\0host=myserver.com\0 + +-- + git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] + request-command = "git-upload-pack" / "git-receive-pack" / + "git-upload-archive" ; case sensitive + pathname = *( %x01-ff ) ; exclude NUL + host-parameter = "host=" hostname [ ":" port ] +-- + +Only host-parameter is allowed in the git-proto-request. Clients +MUST NOT attempt to send additional parameters. It is used for the +git-daemon name based virtual hosting. See --interpolated-path +option to git daemon, with the %H/%CH format characters. + +Basically what the Git client is doing to connect to an 'upload-pack' +process on the server side over the Git protocol is this: + + $ echo -e -n \ + "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + +If the server refuses the request for some reasons, it could abort +gracefully with an error message. + +---- + error-line = PKT-LINE("ERR" SP explanation-text) +---- + + +SSH Transport +------------- + +Initiating the upload-pack or receive-pack processes over SSH is +executing the binary on the server via SSH remote execution. +It is basically equivalent to running this: + + $ ssh git.example.com "git-upload-pack '/project.git'" + +For a server to support Git pushing and pulling for a given user over +SSH, that user needs to be able to execute one or both of those +commands via the SSH shell that they are provided on login. On some +systems, that shell access is limited to only being able to run those +two commands, or even just one of them. + +In an ssh:// format URI, it's absolute in the URI, so the '/' after +the host name (or port number) is sent as an argument, which is then +read by the remote git-upload-pack exactly as is, so it's effectively +an absolute path in the remote filesystem. + + git clone ssh://user@example.com/project.git + | + v + ssh user@example.com "git-upload-pack '/project.git'" + +In a "user@host:path" format URI, its relative to the user's home +directory, because the Git client will run: + + git clone user@example.com:project.git + | + v + ssh user@example.com "git-upload-pack 'project.git'" + +The exception is if a '~' is used, in which case +we execute it without the leading '/'. + + ssh://user@example.com/~alice/project.git, + | + v + ssh user@example.com "git-upload-pack '~alice/project.git'" + +A few things to remember here: + +- The "command name" is spelled with dash (e.g. git-upload-pack), but + this can be overridden by the client; + +- The repository path is always quoted with single quotes. + +Fetching Data From a Server +--------------------------- + +When one Git repository wants to get data that a second repository +has, the first can 'fetch' from the second. This operation determines +what data the server has that the client does not then streams that +data down to the client in packfile format. + + +Reference Discovery +------------------- + +When the client initially connects the server will immediately respond +with a listing of each reference it has (all branches and tags) along +with the object name that each reference currently points to. + + $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack + side-band side-band-64k ofs-delta shallow no-progress include-tag + 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration + 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master + 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 + 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 + 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} + 0000 + +The returned response is a pkt-line stream describing each ref and +its current value. The stream MUST be sorted by name according to +the C locale ordering. + +If HEAD is a valid ref, HEAD MUST appear as the first advertised +ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the +advertisement list at all, but other refs may still appear. + +The stream MUST include capability declarations behind a NUL on the +first ref. The peeled value of a ref (that is "ref^{}") MUST be +immediately after the ref itself, if presented. A conforming server +MUST peel the ref if it's an annotated tag. + +---- + advertised-refs = (no-refs / list-of-refs) + *shallow + flush-pkt + + no-refs = PKT-LINE(zero-id SP "capabilities^{}" + NUL capability-list) + + list-of-refs = first-ref *other-ref + first-ref = PKT-LINE(obj-id SP refname + NUL capability-list) + + other-ref = PKT-LINE(other-tip / other-peeled) + other-tip = obj-id SP refname + other-peeled = obj-id SP refname "^{}" + + shallow = PKT-LINE("shallow" SP obj-id) + + capability-list = capability *(SP capability) + capability = 1*(LC_ALPHA / DIGIT / "-" / "_") + LC_ALPHA = %x61-7A +---- + +Server and client MUST use lowercase for obj-id, both MUST treat obj-id +as case-insensitive. + +See protocol-capabilities.txt for a list of allowed server capabilities +and descriptions. + +Packfile Negotiation +-------------------- +After reference and capabilities discovery, the client can decide to +terminate the connection by sending a flush-pkt, telling the server it can +now gracefully terminate, and disconnect, when it does not need any pack +data. This can happen with the ls-remote command, and also can happen when +the client already is up-to-date. + +Otherwise, it enters the negotiation phase, where the client and +server determine what the minimal packfile necessary for transport is, +by telling the server what objects it wants, its shallow objects +(if any), and the maximum commit depth it wants (if any). The client +will also send a list of the capabilities it wants to be in effect, +out of what the server said it could do with the first 'want' line. + +---- + upload-request = want-list + *shallow-line + *1depth-request + flush-pkt + + want-list = first-want + *additional-want + + shallow-line = PKT-LINE("shallow" SP obj-id) + + depth-request = PKT-LINE("deepen" SP depth) / + PKT-LINE("deepen-since" SP timestamp) / + PKT-LINE("deepen-not" SP ref) + + first-want = PKT-LINE("want" SP obj-id SP capability-list) + additional-want = PKT-LINE("want" SP obj-id) + + depth = 1*DIGIT +---- + +Clients MUST send all the obj-ids it wants from the reference +discovery phase as 'want' lines. Clients MUST send at least one +'want' command in the request body. Clients MUST NOT mention an +obj-id in a 'want' command which did not appear in the response +obtained through ref discovery. + +The client MUST write all obj-ids which it only has shallow copies +of (meaning that it does not have the parents of a commit) as +'shallow' lines so that the server is aware of the limitations of +the client's history. + +The client now sends the maximum commit history depth it wants for +this transaction, which is the number of commits it wants from the +tip of the history, if any, as a 'deepen' line. A depth of 0 is the +same as not making a depth request. The client does not want to receive +any commits beyond this depth, nor does it want objects needed only to +complete those commits. Commits whose parents are not received as a +result are defined as shallow and marked as such in the server. This +information is sent back to the client in the next step. + +Once all the 'want's and 'shallow's (and optional 'deepen') are +transferred, clients MUST send a flush-pkt, to tell the server side +that it is done sending the list. + +Otherwise, if the client sent a positive depth request, the server +will determine which commits will and will not be shallow and +send this information to the client. If the client did not request +a positive depth, this step is skipped. + +---- + shallow-update = *shallow-line + *unshallow-line + flush-pkt + + shallow-line = PKT-LINE("shallow" SP obj-id) + + unshallow-line = PKT-LINE("unshallow" SP obj-id) +---- + +If the client has requested a positive depth, the server will compute +the set of commits which are no deeper than the desired depth. The set +of commits start at the client's wants. + +The server writes 'shallow' lines for each +commit whose parents will not be sent as a result. The server writes +an 'unshallow' line for each commit which the client has indicated is +shallow, but is no longer shallow at the currently requested depth +(that is, its parents will now be sent). The server MUST NOT mark +as unshallow anything which the client has not indicated was shallow. + +Now the client will send a list of the obj-ids it has using 'have' +lines, so the server can make a packfile that only contains the objects +that the client needs. In multi_ack mode, the canonical implementation +will send up to 32 of these at a time, then will send a flush-pkt. The +canonical implementation will skip ahead and send the next 32 immediately, +so that there is always a block of 32 "in-flight on the wire" at a time. + +---- + upload-haves = have-list + compute-end + + have-list = *have-line + have-line = PKT-LINE("have" SP obj-id) + compute-end = flush-pkt / PKT-LINE("done") +---- + +If the server reads 'have' lines, it then will respond by ACKing any +of the obj-ids the client said it had that the server also has. The +server will ACK obj-ids differently depending on which ack mode is +chosen by the client. + +In multi_ack mode: + + * the server will respond with 'ACK obj-id continue' for any common + commits. + + * once the server has found an acceptable common base commit and is + ready to make a packfile, it will blindly ACK all 'have' obj-ids + back to the client. + + * the server will then send a 'NAK' and then wait for another response + from the client - either a 'done' or another list of 'have' lines. + +In multi_ack_detailed mode: + + * the server will differentiate the ACKs where it is signaling + that it is ready to send data with 'ACK obj-id ready' lines, and + signals the identified common commits with 'ACK obj-id common' lines. + +Without either multi_ack or multi_ack_detailed: + + * upload-pack sends "ACK obj-id" on the first common object it finds. + After that it says nothing until the client gives it a "done". + + * upload-pack sends "NAK" on a flush-pkt if no common object + has been found yet. If one has been found, and thus an ACK + was already sent, it's silent on the flush-pkt. + +After the client has gotten enough ACK responses that it can determine +that the server has enough information to send an efficient packfile +(in the canonical implementation, this is determined when it has received +enough ACKs that it can color everything left in the --date-order queue +as common with the server, or the --date-order queue is empty), or the +client determines that it wants to give up (in the canonical implementation, +this is determined when the client sends 256 'have' lines without getting +any of them ACKed by the server - meaning there is nothing in common and +the server should just send all of its objects), then the client will send +a 'done' command. The 'done' command signals to the server that the client +is ready to receive its packfile data. + +However, the 256 limit *only* turns on in the canonical client +implementation if we have received at least one "ACK %s continue" +during a prior round. This helps to ensure that at least one common +ancestor is found before we give up entirely. + +Once the 'done' line is read from the client, the server will either +send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object +name of the last commit determined to be common. The server only sends +ACK after 'done' if there is at least one common base and multi_ack or +multi_ack_detailed is enabled. The server always sends NAK after 'done' +if there is no common base found. + +Then the server will start sending its packfile data. + +---- + server-response = *ack_multi ack / nak + ack_multi = PKT-LINE("ACK" SP obj-id ack_status) + ack_status = "continue" / "common" / "ready" + ack = PKT-LINE("ACK" SP obj-id) + nak = PKT-LINE("NAK") +---- + +A simple clone may look like this (with no 'have' lines): + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + C: 0009done\n + + S: 0008NAK\n + S: [PACKFILE] +---- + +An incremental update (fetch) response might look like this: + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0000 + C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: [30 more have lines] + C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + + S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n + S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n + S: 0008NAK\n + + C: 0009done\n + + S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n + S: [PACKFILE] +---- + + +Packfile Data +------------- + +Now that the client and server have finished negotiation about what +the minimal amount of data that needs to be sent to the client is, the server +will construct and send the required data in packfile format. + +See pack-format.txt for what the packfile itself actually looks like. + +If 'side-band' or 'side-band-64k' capabilities have been specified by +the client, the server will send the packfile data multiplexed. + +Each packet starting with the packet-line length of the amount of data +that follows, followed by a single byte specifying the sideband the +following data is coming in on. + +In 'side-band' mode, it will send up to 999 data bytes plus 1 control +code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' +mode it will send up to 65519 data bytes plus 1 control code, for a +total of up to 65520 bytes in a pkt-line. + +The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain +packfile data, sideband '2' will be used for progress information that the +client will generally print to stderr and sideband '3' is used for error +information. + +If no 'side-band' capability was specified, the server will stream the +entire packfile without multiplexing. + + +Pushing Data To a Server +------------------------ + +Pushing data to a server will invoke the 'receive-pack' process on the +server, which will allow the client to tell it which references it should +update and then send all the data the server will need for those new +references to be complete. Once all the data is received and validated, +the server will then update its references to what the client specified. + +Authentication +-------------- + +The protocol itself contains no authentication mechanisms. That is to be +handled by the transport, such as SSH, before the 'receive-pack' process is +invoked. If 'receive-pack' is configured over the Git transport, those +repositories will be writable by anyone who can access that port (9418) as +that transport is unauthenticated. + +Reference Discovery +------------------- + +The reference discovery phase is done nearly the same way as it is in the +fetching protocol. Each reference obj-id and name on the server is sent +in packet-line format to the client, followed by a flush-pkt. The only +real difference is that the capability listing is different - the only +possible values are 'report-status', 'delete-refs', 'ofs-delta' and +'push-options'. + +Reference Update Request and Packfile Transfer +---------------------------------------------- + +Once the client knows what references the server is at, it can send a +list of reference update requests. For each reference on the server +that it wants to update, it sends a line listing the obj-id currently on +the server, the obj-id the client would like to update it to and the name +of the reference. + +This list is followed by a flush-pkt. Then the push options are transmitted +one per packet followed by another flush-pkt. After that the packfile that +should contain all the objects that the server will need to complete the new +references will be sent. + +---- + update-request = *shallow ( command-list | push-cert ) [packfile] + + shallow = PKT-LINE("shallow" SP obj-id) + + command-list = PKT-LINE(command NUL capability-list) + *PKT-LINE(command) + flush-pkt + + command = create / delete / update + create = zero-id SP new-id SP name + delete = old-id SP zero-id SP name + update = old-id SP new-id SP name + + old-id = obj-id + new-id = obj-id + + push-cert = PKT-LINE("push-cert" NUL capability-list LF) + PKT-LINE("certificate version 0.1" LF) + PKT-LINE("pusher" SP ident LF) + PKT-LINE("pushee" SP url LF) + PKT-LINE("nonce" SP nonce LF) + PKT-LINE(LF) + *PKT-LINE(command LF) + *PKT-LINE(gpg-signature-lines LF) + PKT-LINE("push-cert-end" LF) + + packfile = "PACK" 28*(OCTET) +---- + +If the receiving end does not support delete-refs, the sending end MUST +NOT ask for delete command. + +If the receiving end does not support push-cert, the sending end +MUST NOT send a push-cert command. When a push-cert command is +sent, command-list MUST NOT be sent; the commands recorded in the +push certificate is used instead. + +The packfile MUST NOT be sent if the only command used is 'delete'. + +A packfile MUST be sent if either create or update command is used, +even if the server already has all the necessary objects. In this +case the client MUST send an empty packfile. The only time this +is likely to happen is if the client is creating +a new branch or a tag that points to an existing obj-id. + +The server will receive the packfile, unpack it, then validate each +reference that is being updated that it hasn't changed while the request +was being processed (the obj-id is still the same as the old-id), and +it will run any update hooks to make sure that the update is acceptable. +If all of that is fine, the server will then update the references. + +Push Certificate +---------------- + +A push certificate begins with a set of header lines. After the +header and an empty line, the protocol commands follow, one per +line. Note that the trailing LF in push-cert PKT-LINEs is _not_ +optional; it must be present. + +Currently, the following header fields are defined: + +`pusher` ident:: + Identify the GPG key in "Human Readable Name <email@address>" + format. + +`pushee` url:: + The repository URL (anonymized, if the URL contains + authentication material) the user who ran `git push` + intended to push into. + +`nonce` nonce:: + The 'nonce' string the receiving repository asked the + pushing user to include in the certificate, to prevent + replay attacks. + +The GPG signature lines are a detached signature for the contents +recorded in the push certificate before the signature block begins. +The detached signature is used to certify that the commands were +given by the pusher, who must be the signer. + +Report Status +------------- + +After receiving the pack data from the sender, the receiver sends a +report if 'report-status' capability is in effect. +It is a short listing of what happened in that update. It will first +list the status of the packfile unpacking as either 'unpack ok' or +'unpack [error]'. Then it will list the status for each of the references +that it tried to update. Each line is either 'ok [refname]' if the +update was successful, or 'ng [refname] [error]' if the update was not. + +---- + report-status = unpack-status + 1*(command-status) + flush-pkt + + unpack-status = PKT-LINE("unpack" SP unpack-result) + unpack-result = "ok" / error-msg + + command-status = command-ok / command-fail + command-ok = PKT-LINE("ok" SP refname) + command-fail = PKT-LINE("ng" SP refname SP error-msg) + + error-msg = 1*(OCTECT) ; where not "ok" +---- + +Updates can be unsuccessful for a number of reasons. The reference can have +changed since the reference discovery phase was originally sent, meaning +someone pushed in the meantime. The reference being pushed could be a +non-fast-forward reference and the update hooks or configuration could be +set to not allow that, etc. Also, some references can be updated while others +can be rejected. + +An example client/server communication might look like this: + +---- + S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n + S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n + S: 0000 + + C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n + C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n + C: 0000 + C: [PACKDATA] + + S: 000eunpack ok\n + S: 0018ok refs/heads/debug\n + S: 002ang refs/heads/master non-fast-forward\n +---- +*/ diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go new file mode 100644 index 0000000000000000000000000000000000000000..29c1a4cd8630ac25cae5c80721a64dc41bc2339e --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go @@ -0,0 +1,165 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +const ( + ok = "ok" +) + +// ReportStatus is a report status message, as used in the git-receive-pack +// process whenever the 'report-status' capability is negotiated. +type ReportStatus struct { + UnpackStatus string + CommandStatuses []*CommandStatus +} + +// NewReportStatus creates a new ReportStatus message. +func NewReportStatus() *ReportStatus { + return &ReportStatus{} +} + +// Error returns the first error if any. +func (s *ReportStatus) Error() error { + if s.UnpackStatus != ok { + return fmt.Errorf("unpack error: %s", s.UnpackStatus) + } + + for _, s := range s.CommandStatuses { + if err := s.Error(); err != nil { + return err + } + } + + return nil +} + +// Encode writes the report status to a writer. +func (s *ReportStatus) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { + return err + } + + for _, cs := range s.CommandStatuses { + if err := cs.encode(w); err != nil { + return err + } + } + + return e.Flush() +} + +// Decode reads from the given reader and decodes a report-status message. It +// does not read more input than what is needed to fill the report status. +func (s *ReportStatus) Decode(r io.Reader) error { + scan := pktline.NewScanner(r) + if err := s.scanFirstLine(scan); err != nil { + return err + } + + if err := s.decodeReportStatus(scan.Bytes()); err != nil { + return err + } + + flushed := false + for scan.Scan() { + b := scan.Bytes() + if isFlush(b) { + flushed = true + break + } + + if err := s.decodeCommandStatus(b); err != nil { + return err + } + } + + if !flushed { + return fmt.Errorf("missing flush") + } + + return scan.Err() +} + +func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { + if scan.Scan() { + return nil + } + + if scan.Err() != nil { + return scan.Err() + } + + return io.ErrUnexpectedEOF +} + +func (s *ReportStatus) decodeReportStatus(b []byte) error { + if isFlush(b) { + return fmt.Errorf("premature flush") + } + + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 || fields[0] != "unpack" { + return fmt.Errorf("malformed unpack status: %s", line) + } + + s.UnpackStatus = fields[1] + return nil +} + +func (s *ReportStatus) decodeCommandStatus(b []byte) error { + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 3) + status := ok + if len(fields) == 3 && fields[0] == "ng" { + status = fields[2] + } else if len(fields) != 2 || fields[0] != "ok" { + return fmt.Errorf("malformed command status: %s", line) + } + + cs := &CommandStatus{ + ReferenceName: plumbing.ReferenceName(fields[1]), + Status: status, + } + s.CommandStatuses = append(s.CommandStatuses, cs) + return nil +} + +// CommandStatus is the status of a reference in a report status. +// See ReportStatus struct. +type CommandStatus struct { + ReferenceName plumbing.ReferenceName + Status string +} + +// Error returns the error, if any. +func (s *CommandStatus) Error() error { + if s.Status == ok { + return nil + } + + return fmt.Errorf("command error on %s: %s", + s.ReferenceName.String(), s.Status) +} + +func (s *CommandStatus) encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if s.Error() == nil { + return e.Encodef("ok %s\n", s.ReferenceName.String()) + } + + return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go new file mode 100644 index 0000000000000000000000000000000000000000..40f58e88311e211b119acbc0e561cb7e405e67b2 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go @@ -0,0 +1,92 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +const ( + shallowLineLen = 48 + unshallowLineLen = 50 +) + +type ShallowUpdate struct { + Shallows []plumbing.Hash + Unshallows []plumbing.Hash +} + +func (r *ShallowUpdate) Decode(reader io.Reader) error { + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + line = bytes.TrimSpace(line) + + var err error + switch { + case bytes.HasPrefix(line, shallow): + err = r.decodeShallowLine(line) + case bytes.HasPrefix(line, unshallow): + err = r.decodeUnshallowLine(line) + case bytes.Compare(line, pktline.Flush) == 0: + return nil + } + + if err != nil { + return err + } + } + + return s.Err() +} + +func (r *ShallowUpdate) decodeShallowLine(line []byte) error { + hash, err := r.decodeLine(line, shallow, shallowLineLen) + if err != nil { + return err + } + + r.Shallows = append(r.Shallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { + hash, err := r.decodeLine(line, unshallow, unshallowLineLen) + if err != nil { + return err + } + + r.Unshallows = append(r.Unshallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { + if len(line) != expLen { + return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) + } + + raw := string(line[expLen-40 : expLen]) + return plumbing.NewHash(raw), nil +} + +func (r *ShallowUpdate) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + + for _, h := range r.Shallows { + if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { + return err + } + } + + for _, h := range r.Unshallows { + if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { + return err + } + } + + return e.Flush() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go new file mode 100644 index 0000000000000000000000000000000000000000..de5001281fd50bd3f933d57793b4e0b3bdc7d46f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go @@ -0,0 +1,33 @@ +package sideband + +// Type sideband type "side-band" or "side-band-64k" +type Type int8 + +const ( + // Sideband legacy sideband type up to 1000-byte messages + Sideband Type = iota + // Sideband64k sideband type up to 65519-byte messages + Sideband64k Type = iota + + // MaxPackedSize for Sideband type + MaxPackedSize = 1000 + // MaxPackedSize64k for Sideband64k type + MaxPackedSize64k = 65520 +) + +// Channel sideband channel +type Channel byte + +// WithPayload encode the payload as a message +func (ch Channel) WithPayload(payload []byte) []byte { + return append([]byte{byte(ch)}, payload...) +} + +const ( + // PackData packfile content + PackData Channel = 1 + // ProgressMessage progress messages + ProgressMessage Channel = 2 + // ErrorMessage fatal error message just before stream aborts + ErrorMessage Channel = 3 +) diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go new file mode 100644 index 0000000000000000000000000000000000000000..352336dc68b9367b255624b4d05f97ba3bd68bef --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go @@ -0,0 +1,148 @@ +package sideband + +import ( + "errors" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded +var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") + +// Progress where the progress information is stored +type Progress interface { + io.Writer +} + +// Demuxer demultiplexes the progress reports and error info interleaved with the +// packfile itself. +// +// A sideband has three different channels the main one, called PackData, contains +// the packfile data; the ErrorMessage channel, that contains server errors; and +// the last one, ProgressMessage channel, containing information about the ongoing +// task happening in the server (optional, can be suppressed sending NoProgress +// or Quiet capabilities to the server) +// +// In order to demultiplex the data stream, method `Read` should be called to +// retrieve the PackData channel, the incoming data from the ProgressMessage is +// written at `Progress` (if any), if any message is retrieved from the +// ErrorMessage channel an error is returned and we can assume that the +// connection has been closed. +type Demuxer struct { + t Type + r io.Reader + s *pktline.Scanner + + max int + pending []byte + + // Progress is where the progress messages are stored + Progress Progress +} + +// NewDemuxer returns a new Demuxer for the given t and read from r +func NewDemuxer(t Type, r io.Reader) *Demuxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Demuxer{ + t: t, + r: r, + max: max, + s: pktline.NewScanner(r), + } +} + +// Read reads up to len(p) bytes from the PackData channel into p, an error can +// be return if an error happens when reading or if a message is sent in the +// ErrorMessage channel. +// +// When a ProgressMessage is read, is not copy to b, instead of this is written +// to the Progress +func (d *Demuxer) Read(b []byte) (n int, err error) { + var read, req int + + req = len(b) + for read < req { + n, err := d.doRead(b[read:req]) + read += n + + if err != nil { + return read, err + } + } + + return read, nil +} + +func (d *Demuxer) doRead(b []byte) (int, error) { + read, err := d.nextPackData() + size := len(read) + wanted := len(b) + + if size > wanted { + d.pending = read[wanted:] + } + + if wanted > size { + wanted = size + } + + size = copy(b, read[:wanted]) + return size, err +} + +func (d *Demuxer) nextPackData() ([]byte, error) { + content := d.getPending() + if len(content) != 0 { + return content, nil + } + + if !d.s.Scan() { + if err := d.s.Err(); err != nil { + return nil, err + } + + return nil, io.EOF + } + + content = d.s.Bytes() + + size := len(content) + if size == 0 { + return nil, nil + } else if size > d.max { + return nil, ErrMaxPackedExceeded + } + + switch Channel(content[0]) { + case PackData: + return content[1:], nil + case ProgressMessage: + if d.Progress != nil { + _, err := d.Progress.Write(content[1:]) + return nil, err + } + case ErrorMessage: + return nil, fmt.Errorf("unexpected error: %s", content[1:]) + default: + return nil, fmt.Errorf("unknown channel %s", content) + } + + return nil, nil +} + +func (d *Demuxer) getPending() (b []byte) { + if len(d.pending) == 0 { + return nil + } + + content := d.pending + d.pending = nil + + return content +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..c5d2429529164ef35f6f318586f93aa33a996ad8 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go @@ -0,0 +1,31 @@ +// Package sideband implements a sideband mutiplex/demultiplexer +package sideband + +// If 'side-band' or 'side-band-64k' capabilities have been specified by +// the client, the server will send the packfile data multiplexed. +// +// Either mode indicates that the packfile data will be streamed broken +// up into packets of up to either 1000 bytes in the case of 'side_band', +// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up +// of a leading 4-byte pkt-line length of how much data is in the packet, +// followed by a 1-byte stream code, followed by the actual data. +// +// The stream code can be one of: +// +// 1 - pack data +// 2 - progress messages +// 3 - fatal error message just before stream aborts +// +// The "side-band-64k" capability came about as a way for newer clients +// that can handle much larger packets to request packets that are +// actually crammed nearly full, while maintaining backward compatibility +// for the older clients. +// +// Further, with side-band and its up to 1000-byte messages, it's actually +// 999 bytes of payload and 1 byte for the stream code. With side-band-64k, +// same deal, you have up to 65519 bytes of data and 1 byte for the stream +// code. +// +// The client MUST send only maximum of one of "side-band" and "side- +// band-64k". Server MUST diagnose it as an error if client requests +// both. diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go new file mode 100644 index 0000000000000000000000000000000000000000..45fecc2cbd2d0e3dd77fb46fcc8f0c6431ea01de --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go @@ -0,0 +1,65 @@ +package sideband + +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +// Muxer multiplex the packfile along with the progress messages and the error +// information. The multiplex is perform using pktline format. +type Muxer struct { + max int + e *pktline.Encoder +} + +const chLen = 1 + +// NewMuxer returns a new Muxer for the given t that writes on w. +// +// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any +// other value is given, max pack is set to MaxPackedSize64k, that is the +// maximum length of a line in pktline format. +func NewMuxer(t Type, w io.Writer) *Muxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Muxer{ + max: max - chLen, + e: pktline.NewEncoder(w), + } +} + +// Write writes p in the PackData channel +func (m *Muxer) Write(p []byte) (int, error) { + return m.WriteChannel(PackData, p) +} + +// WriteChannel writes p in the given channel. This method can be used with any +// channel, but is recommend use it only for the ProgressMessage and +// ErrorMessage channels and use Write for the PackData channel +func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { + wrote := 0 + size := len(p) + for wrote < size { + n, err := m.doWrite(t, p[wrote:]) + wrote += n + + if err != nil { + return wrote, err + } + } + + return wrote, nil +} + +func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { + sz := len(p) + if sz > m.max { + sz = m.max + } + + return sz, m.e.Encode(ch.WithPayload(p[:sz])) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go new file mode 100644 index 0000000000000000000000000000000000000000..0c89e47da93ed5f7b68c36cb9f662c5fe2f3465c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go @@ -0,0 +1,84 @@ +package packp + +import ( + "bytes" + "errors" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +const ackLineLen = 44 + +// ServerResponse object acknowledgement from upload-pack service +type ServerResponse struct { + ACKs []plumbing.Hash +} + +// Decode decodes the response into the struct, isMultiACK should be true, if +// the request was done with multi_ack or multi_ack_detailed capabilities +func (r *ServerResponse) Decode(reader io.Reader, isMultiACK bool) error { + // TODO: implement support for multi_ack or multi_ack_detailed responses + if isMultiACK { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + + if err := r.decodeLine(line); err != nil { + return err + } + + if !isMultiACK { + break + } + } + + return s.Err() +} + +func (r *ServerResponse) decodeLine(line []byte) error { + if len(line) == 0 { + return fmt.Errorf("unexpected flush") + } + + if bytes.Compare(line[0:3], ack) == 0 { + return r.decodeACKLine(line) + } + + if bytes.Compare(line[0:3], nak) == 0 { + return nil + } + + return fmt.Errorf("unexpected content %q", string(line)) +} + +func (r *ServerResponse) decodeACKLine(line []byte) error { + if len(line) < ackLineLen { + return fmt.Errorf("malformed ACK %q", line) + } + + sp := bytes.Index(line, []byte(" ")) + h := plumbing.NewHash(string(line[sp+1 : sp+41])) + r.ACKs = append(r.ACKs, h) + return nil +} + +// Encode encodes the ServerResponse into a writer. +func (r *ServerResponse) Encode(w io.Writer) error { + if len(r.ACKs) > 1 { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + e := pktline.NewEncoder(w) + if len(r.ACKs) == 0 { + return e.Encodef("%s\n", nak) + } + + return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go new file mode 100644 index 0000000000000000000000000000000000000000..7832007b195fd063c3415e0d21b43e18161b35d6 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go @@ -0,0 +1,168 @@ +package packp + +import ( + "fmt" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +// UploadRequest values represent the information transmitted on a +// upload-request message. Values from this type are not zero-value +// safe, use the New function instead. +// This is a low level type, use UploadPackRequest instead. +type UploadRequest struct { + Capabilities *capability.List + Wants []plumbing.Hash + Shallows []plumbing.Hash + Depth Depth +} + +// Depth values stores the desired depth of the requested packfile: see +// DepthCommit, DepthSince and DepthReference. +type Depth interface { + isDepth() + IsZero() bool +} + +// DepthCommits values stores the maximum number of requested commits in +// the packfile. Zero means infinite. A negative value will have +// undefined consecuences. +type DepthCommits int + +func (d DepthCommits) isDepth() {} + +func (d DepthCommits) IsZero() bool { + return d == 0 +} + +// DepthSince values requests only commits newer than the specified time. +type DepthSince time.Time + +func (d DepthSince) isDepth() {} + +func (d DepthSince) IsZero() bool { + return time.Time(d).IsZero() +} + +// DepthReference requests only commits not to found in the specified reference. +type DepthReference string + +func (d DepthReference) isDepth() {} + +func (d DepthReference) IsZero() bool { + return string(d) == "" +} + +// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be +// used. It has no capabilities, wants or shallows and an infinite depth. Please +// note that to encode an upload-request it has to have at least one wanted hash. +func NewUploadRequest() *UploadRequest { + return &UploadRequest{ + Capabilities: capability.NewList(), + Wants: []plumbing.Hash{}, + Shallows: []plumbing.Hash{}, + Depth: DepthCommits(0), + } +} + +// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest +// value, the request capabilities are filled with the most optiomal ones, based +// on the adv value (advertaised capabilities), the UploadRequest generated it +// has no wants or shallows and an infinite depth. +func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { + r := NewUploadRequest() + + if adv.Supports(capability.MultiACKDetailed) { + r.Capabilities.Set(capability.MultiACKDetailed) + } else if adv.Supports(capability.MultiACK) { + r.Capabilities.Set(capability.MultiACK) + } + + if adv.Supports(capability.Sideband64k) { + r.Capabilities.Set(capability.Sideband64k) + } else if adv.Supports(capability.Sideband) { + r.Capabilities.Set(capability.Sideband) + } + + if adv.Supports(capability.ThinPack) { + r.Capabilities.Set(capability.ThinPack) + } + + if adv.Supports(capability.OFSDelta) { + r.Capabilities.Set(capability.OFSDelta) + } + + if adv.Supports(capability.Agent) { + r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + } + + return r +} + +// Validate validates the content of UploadRequest, following the next rules: +// - Wants MUST have at least one reference +// - capability.Shallow MUST be present if Shallows is not empty +// - is a non-zero DepthCommits is given capability.Shallow MUST be present +// - is a DepthSince is given capability.Shallow MUST be present +// - is a DepthReference is given capability.DeepenNot MUST be present +// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k +// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed +func (r *UploadRequest) Validate() error { + if len(r.Wants) == 0 { + return fmt.Errorf("want can't be empty") + } + + if err := r.validateRequiredCapabilities(); err != nil { + return err + } + + if err := r.validateConflictCapabilities(); err != nil { + return err + } + + return nil +} + +func (r *UploadRequest) validateRequiredCapabilities() error { + msg := "missing capability %s" + + if len(r.Shallows) != 0 && !r.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + + switch r.Depth.(type) { + case DepthCommits: + if r.Depth != DepthCommits(0) { + if !r.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + } + case DepthSince: + if !r.Capabilities.Supports(capability.DeepenSince) { + return fmt.Errorf(msg, capability.DeepenSince) + } + case DepthReference: + if !r.Capabilities.Supports(capability.DeepenNot) { + return fmt.Errorf(msg, capability.DeepenNot) + } + } + + return nil +} + +func (r *UploadRequest) validateConflictCapabilities() error { + msg := "capabilities %s and %s are mutually exclusive" + if r.Capabilities.Supports(capability.Sideband) && + r.Capabilities.Supports(capability.Sideband64k) { + return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) + } + + if r.Capabilities.Supports(capability.MultiACK) && + r.Capabilities.Supports(capability.MultiACKDetailed) { + return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go new file mode 100644 index 0000000000000000000000000000000000000000..bcd642db2a5b52c20421fb6fd5db698384eecf33 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go @@ -0,0 +1,257 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +// Decode reads the next upload-request form its input and +// stores it in the UploadRequest. +func (u *UploadRequest) Decode(r io.Reader) error { + d := newUlReqDecoder(r) + return d.Decode(u) +} + +type ulReqDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + err error // sticky error, use the parser.error() method to fill this out + data *UploadRequest // parsed data is stored here +} + +func newUlReqDecoder(r io.Reader) *ulReqDecoder { + return &ulReqDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *ulReqDecoder) Decode(v *UploadRequest) error { + d.data = v + + for state := d.decodeFirstWant; state != nil; { + state = state() + } + + return d.err +} + +// fills out the parser stiky error +func (d *ulReqDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *ulReqDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// Expected format: want <hash>[ capabilities] +func (d *ulReqDecoder) decodeFirstWant() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("missing 'want ' prefix") + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeCaps +} + +func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { + if len(d.line) < hashSize { + d.err = fmt.Errorf("malformed hash: %v", d.line) + return plumbing.ZeroHash, false + } + + var hash plumbing.Hash + if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { + d.error("invalid hash text: %s", err) + return plumbing.ZeroHash, false + } + d.line = d.line[hashSize:] + + return hash, true +} + +// Expected format: sp cap1 sp cap2 sp cap3... +func (d *ulReqDecoder) decodeCaps() stateFn { + d.line = bytes.TrimPrefix(d.line, sp) + if err := d.data.Capabilities.Decode(d.line); err != nil { + d.error("invalid capabilities: %s", err) + } + + return d.decodeOtherWants +} + +// Expected format: want <hash> +func (d *ulReqDecoder) decodeOtherWants() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(d.line, shallow) { + return d.decodeShallow + } + + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("unexpected payload while expecting a want: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeOtherWants +} + +// Expected format: shallow <hash> +func (d *ulReqDecoder) decodeShallow() stateFn { + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, shallow) { + d.error("unexpected payload while expecting a shallow: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, shallow) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Shallows = append(d.data.Shallows, hash) + + if ok := d.nextLine(); !ok { + return nil + } + + return d.decodeShallow +} + +// Expected format: deepen <n> / deepen-since <ul> / deepen-not <ref> +func (d *ulReqDecoder) decodeDeepen() stateFn { + if bytes.HasPrefix(d.line, deepenCommits) { + return d.decodeDeepenCommits + } + + if bytes.HasPrefix(d.line, deepenSince) { + return d.decodeDeepenSince + } + + if bytes.HasPrefix(d.line, deepenReference) { + return d.decodeDeepenReference + } + + if len(d.line) == 0 { + return nil + } + + d.error("unexpected deepen specification: %q", d.line) + return nil +} + +func (d *ulReqDecoder) decodeDeepenCommits() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenCommits) + + var n int + if n, d.err = strconv.Atoi(string(d.line)); d.err != nil { + return nil + } + if n < 0 { + d.err = fmt.Errorf("negative depth") + return nil + } + d.data.Depth = DepthCommits(n) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeDeepenSince() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenSince) + + var secs int64 + secs, d.err = strconv.ParseInt(string(d.line), 10, 64) + if d.err != nil { + return nil + } + t := time.Unix(secs, 0).UTC() + d.data.Depth = DepthSince(t) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeDeepenReference() stateFn { + d.line = bytes.TrimPrefix(d.line, deepenReference) + + d.data.Depth = DepthReference(string(d.line)) + + return d.decodeFlush +} + +func (d *ulReqDecoder) decodeFlush() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if len(d.line) != 0 { + d.err = fmt.Errorf("unexpected payload while expecting a flush-pkt: %q", d.line) + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go new file mode 100644 index 0000000000000000000000000000000000000000..4a26e74d573b490e3f0c61f4cd3e70c23cc60b5e --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go @@ -0,0 +1,145 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +// Encode writes the UlReq encoding of u to the stream. +// +// All the payloads will end with a newline character. Wants and +// shallows are sorted alphabetically. A depth of 0 means no depth +// request is sent. +func (u *UploadRequest) Encode(w io.Writer) error { + e := newUlReqEncoder(w) + return e.Encode(u) +} + +type ulReqEncoder struct { + pe *pktline.Encoder // where to write the encoded data + data *UploadRequest // the data to encode + err error // sticky error +} + +func newUlReqEncoder(w io.Writer) *ulReqEncoder { + return &ulReqEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *ulReqEncoder) Encode(v *UploadRequest) error { + e.data = v + + if len(v.Wants) == 0 { + return fmt.Errorf("empty wants provided") + } + + plumbing.HashesSort(e.data.Wants) + for state := e.encodeFirstWant; state != nil; { + state = state() + } + + return e.err +} + +func (e *ulReqEncoder) encodeFirstWant() stateFn { + var err error + if e.data.Capabilities.IsEmpty() { + err = e.pe.Encodef("want %s\n", e.data.Wants[0]) + } else { + err = e.pe.Encodef( + "want %s %s\n", + e.data.Wants[0], + e.data.Capabilities.String(), + ) + } + + if err != nil { + e.err = fmt.Errorf("encoding first want line: %s", err) + return nil + } + + return e.encodeAditionalWants +} + +func (e *ulReqEncoder) encodeAditionalWants() stateFn { + last := e.data.Wants[0] + for _, w := range e.data.Wants[1:] { + if bytes.Compare(last[:], w[:]) == 0 { + continue + } + + if err := e.pe.Encodef("want %s\n", w); err != nil { + e.err = fmt.Errorf("encoding want %q: %s", w, err) + return nil + } + + last = w + } + + return e.encodeShallows +} + +func (e *ulReqEncoder) encodeShallows() stateFn { + plumbing.HashesSort(e.data.Shallows) + + var last plumbing.Hash + for _, s := range e.data.Shallows { + if bytes.Compare(last[:], s[:]) == 0 { + continue + } + + if err := e.pe.Encodef("shallow %s\n", s); err != nil { + e.err = fmt.Errorf("encoding shallow %q: %s", s, err) + return nil + } + + last = s + } + + return e.encodeDepth +} + +func (e *ulReqEncoder) encodeDepth() stateFn { + switch depth := e.data.Depth.(type) { + case DepthCommits: + if depth != 0 { + commits := int(depth) + if err := e.pe.Encodef("deepen %d\n", commits); err != nil { + e.err = fmt.Errorf("encoding depth %d: %s", depth, err) + return nil + } + } + case DepthSince: + when := time.Time(depth).UTC() + if err := e.pe.Encodef("deepen-since %d\n", when.Unix()); err != nil { + e.err = fmt.Errorf("encoding depth %s: %s", when, err) + return nil + } + case DepthReference: + reference := string(depth) + if err := e.pe.Encodef("deepen-not %s\n", reference); err != nil { + e.err = fmt.Errorf("encoding depth %s: %s", reference, err) + return nil + } + default: + e.err = fmt.Errorf("unsupported depth type") + return nil + } + + return e.encodeFlush +} + +func (e *ulReqEncoder) encodeFlush() stateFn { + if err := e.pe.Flush(); err != nil { + e.err = fmt.Errorf("encoding flush-pkt: %s", err) + return nil + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go new file mode 100644 index 0000000000000000000000000000000000000000..0624930289468cc78ebf7fb3e6bbe37783c85f2d --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go @@ -0,0 +1,117 @@ +package packp + +import ( + "errors" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +var ( + ErrEmptyCommands = errors.New("commands cannot be empty") + ErrMalformedCommand = errors.New("malformed command") +) + +// ReferenceUpdateRequest values represent reference upload requests. +// Values from this type are not zero-value safe, use the New function instead. +type ReferenceUpdateRequest struct { + Capabilities *capability.List + Commands []*Command + Shallow *plumbing.Hash + // Packfile contains an optional packfile reader. + Packfile io.ReadCloser +} + +// New returns a pointer to a new ReferenceUpdateRequest value. +func NewReferenceUpdateRequest() *ReferenceUpdateRequest { + return &ReferenceUpdateRequest{ + // TODO: Add support for push-cert + Capabilities: capability.NewList(), + Commands: nil, + } +} + +// NewReferenceUpdateRequestFromCapabilities returns a pointer to a new +// ReferenceUpdateRequest value, the request capabilities are filled with the +// most optimal ones, based on the adv value (advertised capabilities), the +// ReferenceUpdateRequest contains no commands +// +// It does set the following capabilities: +// - agent +// - report-status +// - ofs-delta +// - ref-delta +// It leaves up to the user to add the following capabilities later: +// - atomic +// - ofs-delta +// - side-band +// - side-band-64k +// - quiet +// - push-cert +func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceUpdateRequest { + r := NewReferenceUpdateRequest() + + if adv.Supports(capability.Agent) { + r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + } + + if adv.Supports(capability.ReportStatus) { + r.Capabilities.Set(capability.ReportStatus) + } + + return r +} + +func (r *ReferenceUpdateRequest) validate() error { + if len(r.Commands) == 0 { + return ErrEmptyCommands + } + + for _, c := range r.Commands { + if err := c.validate(); err != nil { + return err + } + } + + return nil +} + +type Action string + +const ( + Create Action = "create" + Update = "update" + Delete = "delete" + Invalid = "invalid" +) + +type Command struct { + Name plumbing.ReferenceName + Old plumbing.Hash + New plumbing.Hash +} + +func (c *Command) Action() Action { + if c.Old == plumbing.ZeroHash && c.New == plumbing.ZeroHash { + return Invalid + } + + if c.Old == plumbing.ZeroHash { + return Create + } + + if c.New == plumbing.ZeroHash { + return Delete + } + + return Update +} + +func (c *Command) validate() error { + if c.Action() == Invalid { + return ErrMalformedCommand + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go new file mode 100644 index 0000000000000000000000000000000000000000..c15d49cfb752bbc5e37271b2abb23dc6c4571d1c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go @@ -0,0 +1,250 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" +) + +var ( + shallowLineLength = len(shallow) + hashSize + minCommandLength = hashSize*2 + 2 + 1 + minCommandAndCapsLenth = minCommandLength + 1 +) + +var ( + ErrEmpty = errors.New("empty update-request message") + errNoCommands = errors.New("unexpected EOF before any command") + errMissingCapabilitiesDelimiter = errors.New("capabilities delimiter not found") +) + +func errMalformedRequest(reason string) error { + return fmt.Errorf("malformed request: %s", reason) +} + +func errInvalidHashSize(got int) error { + return fmt.Errorf("invalid hash size: expected %d, got %d", + hashSize, got) +} + +func errInvalidHash(err error) error { + return fmt.Errorf("invalid hash: %s", err.Error()) +} + +func errInvalidShallowLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid shallow line length: expected %d, got %d", + shallowLineLength, got)) +} + +func errInvalidCommandCapabilitiesLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid command and capabilities line length: expected at least %d, got %d", + minCommandAndCapsLenth, got)) +} + +func errInvalidCommandLineLength(got int) error { + return errMalformedRequest(fmt.Sprintf( + "invalid command line length: expected at least %d, got %d", + minCommandLength, got)) +} + +func errInvalidShallowObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid shallow object id: %s", err.Error())) +} + +func errInvalidOldObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid old object id: %s", err.Error())) +} + +func errInvalidNewObjId(err error) error { + return errMalformedRequest( + fmt.Sprintf("invalid new object id: %s", err.Error())) +} + +func errMalformedCommand(err error) error { + return errMalformedRequest(fmt.Sprintf( + "malformed command: %s", err.Error())) +} + +// Decode reads the next update-request message form the reader and wr +func (req *ReferenceUpdateRequest) Decode(r io.Reader) error { + var rc io.ReadCloser + var ok bool + rc, ok = r.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(r) + } + + d := &updReqDecoder{r: rc, s: pktline.NewScanner(r)} + return d.Decode(req) +} + +type updReqDecoder struct { + r io.ReadCloser + s *pktline.Scanner + req *ReferenceUpdateRequest +} + +func (d *updReqDecoder) Decode(req *ReferenceUpdateRequest) error { + d.req = req + funcs := []func() error{ + d.scanLine, + d.decodeShallow, + d.decodeCommandAndCapabilities, + d.decodeCommands, + d.setPackfile, + req.validate, + } + + for _, f := range funcs { + if err := f(); err != nil { + return err + } + } + + return nil +} + +func (d *updReqDecoder) scanLine() error { + if ok := d.s.Scan(); !ok { + return d.scanErrorOr(ErrEmpty) + } + + return nil +} + +func (d *updReqDecoder) decodeShallow() error { + b := d.s.Bytes() + + if !bytes.HasPrefix(b, shallowNoSp) { + return nil + } + + if len(b) != shallowLineLength { + return errInvalidShallowLineLength(len(b)) + } + + h, err := parseHash(string(b[len(shallow):])) + if err != nil { + return errInvalidShallowObjId(err) + } + + if ok := d.s.Scan(); !ok { + return d.scanErrorOr(errNoCommands) + } + + d.req.Shallow = &h + + return nil +} + +func (d *updReqDecoder) decodeCommands() error { + for { + b := d.s.Bytes() + if bytes.Equal(b, pktline.Flush) { + return nil + } + + c, err := parseCommand(b) + if err != nil { + return err + } + + d.req.Commands = append(d.req.Commands, c) + + if ok := d.s.Scan(); !ok { + return d.s.Err() + } + } +} + +func (d *updReqDecoder) decodeCommandAndCapabilities() error { + b := d.s.Bytes() + i := bytes.IndexByte(b, 0) + if i == -1 { + return errMissingCapabilitiesDelimiter + } + + if len(b) < minCommandAndCapsLenth { + return errInvalidCommandCapabilitiesLineLength(len(b)) + } + + cmd, err := parseCommand(b[:i]) + if err != nil { + return err + } + + d.req.Commands = append(d.req.Commands, cmd) + + if err := d.req.Capabilities.Decode(b[i+1:]); err != nil { + return err + } + + if err := d.scanLine(); err != nil { + return err + } + + return nil +} + +func (d *updReqDecoder) setPackfile() error { + d.req.Packfile = d.r + + return nil +} + +func parseCommand(b []byte) (*Command, error) { + if len(b) < minCommandLength { + return nil, errInvalidCommandLineLength(len(b)) + } + + var ( + os, ns string + n plumbing.ReferenceName + ) + if _, err := fmt.Sscanf(string(b), "%s %s %s", &os, &ns, &n); err != nil { + return nil, errMalformedCommand(err) + } + + oh, err := parseHash(os) + if err != nil { + return nil, errInvalidOldObjId(err) + } + + nh, err := parseHash(ns) + if err != nil { + return nil, errInvalidNewObjId(err) + } + + return &Command{Old: oh, New: nh, Name: plumbing.ReferenceName(n)}, nil +} + +func parseHash(s string) (plumbing.Hash, error) { + if len(s) != hashSize { + return plumbing.ZeroHash, errInvalidHashSize(len(s)) + } + + if _, err := hex.DecodeString(s); err != nil { + return plumbing.ZeroHash, errInvalidHash(err) + } + + h := plumbing.NewHash(s) + return h, nil +} + +func (d *updReqDecoder) scanErrorOr(origErr error) error { + if err := d.s.Err(); err != nil { + return err + } + + return origErr +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go new file mode 100644 index 0000000000000000000000000000000000000000..44c05739d8db776eef79811c3d2f0e9914d00514 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go @@ -0,0 +1,75 @@ +package packp + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +var ( + zeroHashString = plumbing.ZeroHash.String() +) + +// Encode writes the ReferenceUpdateRequest encoding to the stream. +func (r *ReferenceUpdateRequest) Encode(w io.Writer) error { + if err := r.validate(); err != nil { + return err + } + + e := pktline.NewEncoder(w) + + if err := r.encodeShallow(e, r.Shallow); err != nil { + return err + } + + if err := r.encodeCommands(e, r.Commands, r.Capabilities); err != nil { + return err + } + + if r.Packfile != nil { + if _, err := io.Copy(w, r.Packfile); err != nil { + return err + } + + return r.Packfile.Close() + } + + return nil +} + +func (r *ReferenceUpdateRequest) encodeShallow(e *pktline.Encoder, + h *plumbing.Hash) error { + + if h == nil { + return nil + } + + objId := []byte(h.String()) + return e.Encodef("%s%s", shallow, objId) +} + +func (r *ReferenceUpdateRequest) encodeCommands(e *pktline.Encoder, + cmds []*Command, cap *capability.List) error { + + if err := e.Encodef("%s\x00%s", + formatCommand(cmds[0]), cap.String()); err != nil { + return err + } + + for _, cmd := range cmds[1:] { + if err := e.Encodef(formatCommand(cmd)); err != nil { + return err + } + } + + return e.Flush() +} + +func formatCommand(cmd *Command) string { + o := cmd.Old.String() + n := cmd.New.String() + return fmt.Sprintf("%s %s %s", o, n, cmd.Name) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go new file mode 100644 index 0000000000000000000000000000000000000000..4bb22d0a066d2c5b31c78d2700e01ad3937e5b41 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go @@ -0,0 +1,98 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +// UploadPackRequest represents a upload-pack request. +// Zero-value is not safe, use NewUploadPackRequest instead. +type UploadPackRequest struct { + UploadRequest + UploadHaves +} + +// NewUploadPackRequest creates a new UploadPackRequest and returns a pointer. +func NewUploadPackRequest() *UploadPackRequest { + ur := NewUploadRequest() + return &UploadPackRequest{ + UploadHaves: UploadHaves{}, + UploadRequest: *ur, + } +} + +// NewUploadPackRequestFromCapabilities creates a new UploadPackRequest and +// returns a pointer. The request capabilities are filled with the most optiomal +// ones, based on the adv value (advertaised capabilities), the UploadPackRequest +// it has no wants, haves or shallows and an infinite depth +func NewUploadPackRequestFromCapabilities(adv *capability.List) *UploadPackRequest { + ur := NewUploadRequestFromCapabilities(adv) + return &UploadPackRequest{ + UploadHaves: UploadHaves{}, + UploadRequest: *ur, + } +} + +// IsEmpty a request if empty if Haves are contained in the Wants, or if Wants +// length is zero +func (r *UploadPackRequest) IsEmpty() bool { + return isSubset(r.Wants, r.Haves) +} + +func isSubset(needle []plumbing.Hash, haystack []plumbing.Hash) bool { + for _, h := range needle { + found := false + for _, oh := range haystack { + if h == oh { + found = true + break + } + } + + if !found { + return false + } + } + + return true +} + +// UploadHaves is a message to signal the references that a client has in a +// upload-pack. Do not use this directly. Use UploadPackRequest request instead. +type UploadHaves struct { + Haves []plumbing.Hash +} + +// Encode encodes the UploadHaves into the Writer. If flush is true, a flush +// command will be encoded at the end of the writer content. +func (u *UploadHaves) Encode(w io.Writer, flush bool) error { + e := pktline.NewEncoder(w) + + plumbing.HashesSort(u.Haves) + + var last plumbing.Hash + for _, have := range u.Haves { + if bytes.Compare(last[:], have[:]) == 0 { + continue + } + + if err := e.Encodef("have %s\n", have); err != nil { + return fmt.Errorf("sending haves for %q: %s", have, err) + } + + last = have + } + + if flush && len(u.Haves) != 0 { + if err := e.Flush(); err != nil { + return fmt.Errorf("sending flush-pkt after haves: %s", err) + } + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go new file mode 100644 index 0000000000000000000000000000000000000000..ac456f3dc34f421157fce154dae1304491f4910f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go @@ -0,0 +1,105 @@ +package packp + +import ( + "errors" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// ErrUploadPackResponseNotDecoded is returned if Read is called without +// decoding first +var ErrUploadPackResponseNotDecoded = errors.New("upload-pack-response should be decoded") + +// UploadPackResponse contains all the information responded by the upload-pack +// service, the response implements io.ReadCloser that allows to read the +// packfile directly from it. +type UploadPackResponse struct { + ShallowUpdate + ServerResponse + + r io.ReadCloser + isShallow bool + isMultiACK bool + isOk bool +} + +// NewUploadPackResponse create a new UploadPackResponse instance, the request +// being responded by the response is required. +func NewUploadPackResponse(req *UploadPackRequest) *UploadPackResponse { + isShallow := !req.Depth.IsZero() + isMultiACK := req.Capabilities.Supports(capability.MultiACK) || + req.Capabilities.Supports(capability.MultiACKDetailed) + + return &UploadPackResponse{ + isShallow: isShallow, + isMultiACK: isMultiACK, + } +} + +// NewUploadPackResponseWithPackfile creates a new UploadPackResponse instance, +// and sets its packfile reader. +func NewUploadPackResponseWithPackfile(req *UploadPackRequest, + pf io.ReadCloser) *UploadPackResponse { + + r := NewUploadPackResponse(req) + r.r = pf + return r +} + +// Decode decodes all the responses sent by upload-pack service into the struct +// and prepares it to read the packfile using the Read method +func (r *UploadPackResponse) Decode(reader io.ReadCloser) error { + if r.isShallow { + if err := r.ShallowUpdate.Decode(reader); err != nil { + return err + } + } + + if err := r.ServerResponse.Decode(reader, r.isMultiACK); err != nil { + return err + } + + // now the reader is ready to read the packfile content + r.r = reader + + return nil +} + +// Encode encodes an UploadPackResponse. +func (r *UploadPackResponse) Encode(w io.Writer) (err error) { + if r.isShallow { + if err := r.ShallowUpdate.Encode(w); err != nil { + return err + } + } + + if err := r.ServerResponse.Encode(w); err != nil { + return err + } + + defer ioutil.CheckClose(r.r, &err) + _, err = io.Copy(w, r.r) + return err +} + +// Read reads the packfile data, if the request was done with any Sideband +// capability the content read should be demultiplexed. If the methods wasn't +// called before the ErrUploadPackResponseNotDecoded will be return +func (r *UploadPackResponse) Read(p []byte) (int, error) { + if r.r == nil { + return 0, ErrUploadPackResponseNotDecoded + } + + return r.r.Read(p) +} + +// Close the underlying reader, if any +func (r *UploadPackResponse) Close() error { + if r.r == nil { + return nil + } + + return r.r.Close() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go new file mode 100644 index 0000000000000000000000000000000000000000..8fa103efb0243542ebec4d07f69f55e00f141059 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go @@ -0,0 +1,179 @@ +package plumbing + +import ( + "errors" + "fmt" + "strings" +) + +const ( + refPrefix = "refs/" + refHeadPrefix = refPrefix + "heads/" + refTagPrefix = refPrefix + "tags/" + refRemotePrefix = refPrefix + "remotes/" + refNotePrefix = refPrefix + "notes/" + symrefPrefix = "ref: " +) + +var ( + refPrefixes = []string{ + refHeadPrefix, + refTagPrefix, + refRemotePrefix, + refNotePrefix, + refPrefix, + } +) + +var ( + ErrReferenceNotFound = errors.New("reference not found") +) + +// ReferenceType reference type's +type ReferenceType int8 + +const ( + InvalidReference ReferenceType = 0 + HashReference ReferenceType = 1 + SymbolicReference ReferenceType = 2 +) + +func (r ReferenceType) String() string { + switch r { + case InvalidReference: + return "invalid-reference" + case HashReference: + return "hash-reference" + case SymbolicReference: + return "symbolic-reference" + } + + return "" +} + +// ReferenceName reference name's +type ReferenceName string + +func (r ReferenceName) String() string { + return string(r) +} + +// Short returns the short name of a ReferenceName +func (r ReferenceName) Short() string { + return r.removeRefPrefix() +} + +// Instead of hardcoding a number of components, we should remove the prefixes +// refHeadPrefix, refTagPrefix, refRemotePrefix, refNotePrefix and refPrefix +func (r ReferenceName) removeRefPrefix() string { + s := string(r) + for _, prefix := range refPrefixes { + s = strings.TrimPrefix(s, prefix) + } + return s +} + +const ( + HEAD ReferenceName = "HEAD" + Master ReferenceName = "refs/heads/master" +) + +// Reference is a representation of git reference +type Reference struct { + t ReferenceType + n ReferenceName + h Hash + target ReferenceName +} + +// NewReferenceFromStrings creates a reference from name and target as string, +// the resulting reference can be a SymbolicReference or a HashReference base +// on the target provided +func NewReferenceFromStrings(name, target string) *Reference { + n := ReferenceName(name) + + if strings.HasPrefix(target, symrefPrefix) { + target := ReferenceName(target[len(symrefPrefix):]) + return NewSymbolicReference(n, target) + } + + return NewHashReference(n, NewHash(target)) +} + +// NewSymbolicReference creates a new SymbolicReference reference +func NewSymbolicReference(n, target ReferenceName) *Reference { + return &Reference{ + t: SymbolicReference, + n: n, + target: target, + } +} + +// NewHashReference creates a new HashReference reference +func NewHashReference(n ReferenceName, h Hash) *Reference { + return &Reference{ + t: HashReference, + n: n, + h: h, + } +} + +// Type return the type of a reference +func (r *Reference) Type() ReferenceType { + return r.t +} + +// Name return the name of a reference +func (r *Reference) Name() ReferenceName { + return r.n +} + +// Hash return the hash of a hash reference +func (r *Reference) Hash() Hash { + return r.h +} + +// Target return the target of a symbolic reference +func (r *Reference) Target() ReferenceName { + return r.target +} + +// IsBranch check if a reference is a branch +func (r *Reference) IsBranch() bool { + return strings.HasPrefix(string(r.n), refHeadPrefix) +} + +// IsNote check if a reference is a note +func (r *Reference) IsNote() bool { + return strings.HasPrefix(string(r.n), refNotePrefix) +} + +// IsRemote check if a reference is a remote +func (r *Reference) IsRemote() bool { + return strings.HasPrefix(string(r.n), refRemotePrefix) +} + +// IsTag check if a reference is a tag +func (r *Reference) IsTag() bool { + return strings.HasPrefix(string(r.n), refTagPrefix) +} + +// Strings dump a reference as a [2]string +func (r *Reference) Strings() [2]string { + var o [2]string + o[0] = r.Name().String() + + switch r.Type() { + case HashReference: + o[1] = r.Hash().String() + case SymbolicReference: + o[1] = symrefPrefix + r.Target().String() + } + + return o +} + +func (r *Reference) String() string { + s := r.Strings() + return fmt.Sprintf("%s %s", s[1], s[0]) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go new file mode 100644 index 0000000000000000000000000000000000000000..5f053b200c0b25787aa6ff330b6134977e2d0db6 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go @@ -0,0 +1,11 @@ +package plumbing + +// Revision represents a git revision +// to get more details about git revisions +// please check git manual page : +// https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html +type Revision string + +func (r Revision) String() string { + return string(r) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go new file mode 100644 index 0000000000000000000000000000000000000000..eb9afaf59ffc2ec9ba19fd7ea7e80405a1cc1146 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go @@ -0,0 +1,149 @@ +// Package revlist provides support to access the ancestors of commits, in a +// similar way as the git-rev-list command. +package revlist + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/object" + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +// Objects applies a complementary set. It gets all the hashes from all +// the reachable objects from the given objects. Ignore param are object hashes +// that we want to ignore on the result. All that objects must be accessible +// from the object storer. +func Objects( + s storer.EncodedObjectStorer, + objects []plumbing.Hash, + ignore []plumbing.Hash) ([]plumbing.Hash, error) { + + seen := hashListToSet(ignore) + result := make(map[plumbing.Hash]bool) + + walkerFunc := func(h plumbing.Hash) { + if !seen[h] { + result[h] = true + seen[h] = true + } + } + + for _, h := range objects { + if err := processObject(s, h, seen, walkerFunc); err != nil { + return nil, err + } + } + + return hashSetToList(result), nil +} + +// processObject obtains the object using the hash an process it depending of its type +func processObject( + s storer.EncodedObjectStorer, + h plumbing.Hash, + seen map[plumbing.Hash]bool, + walkerFunc func(h plumbing.Hash), +) error { + o, err := s.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return err + } + + do, err := object.DecodeObject(s, o) + if err != nil { + return err + } + + switch do := do.(type) { + case *object.Commit: + return reachableObjects(do, seen, walkerFunc) + case *object.Tree: + return iterateCommitTrees(seen, do, walkerFunc) + case *object.Tag: + walkerFunc(do.Hash) + return processObject(s, do.Target, seen, walkerFunc) + case *object.Blob: + walkerFunc(do.Hash) + default: + return fmt.Errorf("object type not valid: %s. "+ + "Object reference: %s", o.Type(), o.Hash()) + } + + return nil +} + +// reachableObjects returns, using the callback function, all the reachable +// objects from the specified commit. To avoid to iterate over seen commits, +// if a commit hash is into the 'seen' set, we will not iterate all his trees +// and blobs objects. +func reachableObjects( + commit *object.Commit, + seen map[plumbing.Hash]bool, + cb func(h plumbing.Hash)) error { + return object.WalkCommitHistory(commit, func(commit *object.Commit) error { + if seen[commit.Hash] { + return nil + } + + cb(commit.Hash) + + tree, err := commit.Tree() + if err != nil { + return err + } + + return iterateCommitTrees(seen, tree, cb) + }) +} + +// iterateCommitTrees iterate all reachable trees from the given commit +func iterateCommitTrees( + seen map[plumbing.Hash]bool, + tree *object.Tree, + cb func(h plumbing.Hash)) error { + if seen[tree.Hash] { + return nil + } + + cb(tree.Hash) + + treeWalker := object.NewTreeWalker(tree, true) + + for { + _, e, err := treeWalker.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if seen[e.Hash] { + continue + } + + cb(e.Hash) + } + + return nil +} + +func hashSetToList(hashes map[plumbing.Hash]bool) []plumbing.Hash { + var result []plumbing.Hash + for key := range hashes { + result = append(result, key) + } + + return result +} + +func hashListToSet(hashes []plumbing.Hash) map[plumbing.Hash]bool { + result := make(map[plumbing.Hash]bool) + for _, h := range hashes { + result[h] = true + } + + return result +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..4d4f179c61839f9e3504f901d366f00fb8e1ad61 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go @@ -0,0 +1,2 @@ +// Package storer defines the interfaces to store objects, references, etc. +package storer diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go new file mode 100644 index 0000000000000000000000000000000000000000..e087296ec9dbd34210e76868b7caf79f682489d7 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go @@ -0,0 +1,9 @@ +package storer + +import "gopkg.in/src-d/go-git.v4/plumbing/format/index" + +// IndexStorer generic storage of index.Index +type IndexStorer interface { + SetIndex(*index.Index) error + Index() (*index.Index, error) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go new file mode 100644 index 0000000000000000000000000000000000000000..a733ee6f05f07492d5bbc94cc01d6fb34b8fa6cc --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go @@ -0,0 +1,248 @@ +package storer + +import ( + "errors" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +var ( + //ErrStop is used to stop a ForEach function in an Iter + ErrStop = errors.New("stop iter") +) + +// EncodedObjectStorer generic storage of objects +type EncodedObjectStorer interface { + // NewEncodedObject returns a new plumbing.EncodedObject, the real type + // of the object can be a custom implementation or the default one, + // plumbing.MemoryObject. + NewEncodedObject() plumbing.EncodedObject + // SetEncodedObject saves an object into the storage, the object should + // be create with the NewEncodedObject, method, and file if the type is + // not supported. + SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) + // EncodedObject gets an object by hash with the given + // plumbing.ObjectType. Implementors should return + // (nil, plumbing.ErrObjectNotFound) if an object doesn't exist with + // both the given hash and object type. + // + // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, + // TreeObject and AnyObject. If plumbing.AnyObject is given, the object must + // be looked up regardless of its type. + EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) + // IterObjects returns a custom EncodedObjectStorer over all the object + // on the storage. + // + // Valid plumbing.ObjectType values are CommitObject, BlobObject, TagObject, + IterEncodedObjects(plumbing.ObjectType) (EncodedObjectIter, error) +} + +// Transactioner is a optional method for ObjectStorer, it enable transaction +// base write and read operations in the storage +type Transactioner interface { + // Begin starts a transaction. + Begin() Transaction +} + +// PackfileWriter is a optional method for ObjectStorer, it enable direct write +// of packfile to the storage +type PackfileWriter interface { + // PackfileWriter returns a writer for writing a packfile to the storage + // + // If the Storer not implements PackfileWriter the objects should be written + // using the Set method. + PackfileWriter() (io.WriteCloser, error) +} + +// EncodedObjectIter is a generic closable interface for iterating over objects. +type EncodedObjectIter interface { + Next() (plumbing.EncodedObject, error) + ForEach(func(plumbing.EncodedObject) error) error + Close() +} + +// Transaction is an in-progress storage transaction. A transaction must end +// with a call to Commit or Rollback. +type Transaction interface { + SetEncodedObject(plumbing.EncodedObject) (plumbing.Hash, error) + EncodedObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error) + Commit() error + Rollback() error +} + +// EncodedObjectLookupIter implements EncodedObjectIter. It iterates over a +// series of object hashes and yields their associated objects by retrieving +// each one from object storage. The retrievals are lazy and only occur when the +// iterator moves forward with a call to Next(). +// +// The EncodedObjectLookupIter must be closed with a call to Close() when it is +// no longer needed. +type EncodedObjectLookupIter struct { + storage EncodedObjectStorer + series []plumbing.Hash + t plumbing.ObjectType + pos int +} + +// NewEncodedObjectLookupIter returns an object iterator given an object storage +// and a slice of object hashes. +func NewEncodedObjectLookupIter( + storage EncodedObjectStorer, t plumbing.ObjectType, series []plumbing.Hash) *EncodedObjectLookupIter { + return &EncodedObjectLookupIter{ + storage: storage, + series: series, + t: t, + } +} + +// Next returns the next object from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. If the object can't be found in +// the object storage, it will return plumbing.ErrObjectNotFound as an error. +// If the object is retreieved successfully error will be nil. +func (iter *EncodedObjectLookupIter) Next() (plumbing.EncodedObject, error) { + if iter.pos >= len(iter.series) { + return nil, io.EOF + } + + hash := iter.series[iter.pos] + obj, err := iter.storage.EncodedObject(iter.t, hash) + if err == nil { + iter.pos++ + } + + return obj, err +} + +// ForEach call the cb function for each object contained on this iter until +// an error happends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *EncodedObjectLookupIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *EncodedObjectLookupIter) Close() { + iter.pos = len(iter.series) +} + +// EncodedObjectSliceIter implements EncodedObjectIter. It iterates over a +// series of objects stored in a slice and yields each one in turn when Next() +// is called. +// +// The EncodedObjectSliceIter must be closed with a call to Close() when it is +// no longer needed. +type EncodedObjectSliceIter struct { + series []plumbing.EncodedObject + pos int +} + +// NewEncodedObjectSliceIter returns an object iterator for the given slice of +// objects. +func NewEncodedObjectSliceIter(series []plumbing.EncodedObject) *EncodedObjectSliceIter { + return &EncodedObjectSliceIter{ + series: series, + } +} + +// Next returns the next object from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. If the object is retreieved +// successfully error will be nil. +func (iter *EncodedObjectSliceIter) Next() (plumbing.EncodedObject, error) { + if len(iter.series) == 0 { + return nil, io.EOF + } + + obj := iter.series[0] + iter.series = iter.series[1:] + + return obj, nil +} + +// ForEach call the cb function for each object contained on this iter until +// an error happends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *EncodedObjectSliceIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *EncodedObjectSliceIter) Close() { + iter.series = []plumbing.EncodedObject{} +} + +// MultiEncodedObjectIter implements EncodedObjectIter. It iterates over several +// EncodedObjectIter, +// +// The MultiObjectIter must be closed with a call to Close() when it is no +// longer needed. +type MultiEncodedObjectIter struct { + iters []EncodedObjectIter + pos int +} + +// NewMultiEncodedObjectIter returns an object iterator for the given slice of +// objects. +func NewMultiEncodedObjectIter(iters []EncodedObjectIter) EncodedObjectIter { + return &MultiEncodedObjectIter{iters: iters} +} + +// Next returns the next object from the iterator, if one iterator reach io.EOF +// is removed and the next one is used. +func (iter *MultiEncodedObjectIter) Next() (plumbing.EncodedObject, error) { + if len(iter.iters) == 0 { + return nil, io.EOF + } + + obj, err := iter.iters[0].Next() + if err == io.EOF { + iter.iters[0].Close() + iter.iters = iter.iters[1:] + return iter.Next() + } + + return obj, err +} + +// ForEach call the cb function for each object contained on this iter until +// an error happends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *MultiEncodedObjectIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return ForEachIterator(iter, cb) +} + +// Close releases any resources used by the iterator. +func (iter *MultiEncodedObjectIter) Close() { + for _, i := range iter.iters { + i.Close() + } +} + +type bareIterator interface { + Next() (plumbing.EncodedObject, error) + Close() +} + +// ForEachIterator is a helper function to build iterators without need to +// rewrite the same ForEach function each time. +func ForEachIterator(iter bareIterator, cb func(plumbing.EncodedObject) error) error { + defer iter.Close() + for { + obj, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(obj); err != nil { + if err == ErrStop { + return nil + } + + return err + } + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go new file mode 100644 index 0000000000000000000000000000000000000000..988c78466787d6a5d5bfca38a1cde74ececf2404 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go @@ -0,0 +1,171 @@ +package storer + +import ( + "errors" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +const MaxResolveRecursion = 1024 + +// ErrMaxResolveRecursion is returned by ResolveReference is MaxResolveRecursion +// is exceeded +var ErrMaxResolveRecursion = errors.New("max. recursion level reached") + +// ReferenceStorer is a generic storage of references. +type ReferenceStorer interface { + SetReference(*plumbing.Reference) error + Reference(plumbing.ReferenceName) (*plumbing.Reference, error) + IterReferences() (ReferenceIter, error) + RemoveReference(plumbing.ReferenceName) error +} + +// ReferenceIter is a generic closable interface for iterating over references. +type ReferenceIter interface { + Next() (*plumbing.Reference, error) + ForEach(func(*plumbing.Reference) error) error + Close() +} + +type referenceFilteredIter struct { + ff func(r *plumbing.Reference) bool + iter ReferenceIter +} + +// NewReferenceFilteredIter returns a reference iterator for the given reference +// Iterator. This iterator will iterate only references that accomplish the +// provided function. +func NewReferenceFilteredIter( + ff func(r *plumbing.Reference) bool, iter ReferenceIter) ReferenceIter { + return &referenceFilteredIter{ff, iter} +} + +// Next returns the next reference from the iterator. If the iterator has reached +// the end it will return io.EOF as an error. +func (iter *referenceFilteredIter) Next() (*plumbing.Reference, error) { + for { + r, err := iter.iter.Next() + if err != nil { + return nil, err + } + + if iter.ff(r) { + return r, nil + } + + continue + } +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stopped but no error is returned. The iterator is closed. +func (iter *referenceFilteredIter) ForEach(cb func(*plumbing.Reference) error) error { + defer iter.Close() + for { + r, err := iter.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + if err := cb(r); err != nil { + if err == ErrStop { + break + } + + return err + } + } + + return nil +} + +// Close releases any resources used by the iterator. +func (iter *referenceFilteredIter) Close() { + iter.iter.Close() +} + +// ReferenceSliceIter implements ReferenceIter. It iterates over a series of +// references stored in a slice and yields each one in turn when Next() is +// called. +// +// The ReferenceSliceIter must be closed with a call to Close() when it is no +// longer needed. +type ReferenceSliceIter struct { + series []*plumbing.Reference + pos int +} + +// NewReferenceSliceIter returns a reference iterator for the given slice of +// objects. +func NewReferenceSliceIter(series []*plumbing.Reference) ReferenceIter { + return &ReferenceSliceIter{ + series: series, + } +} + +// Next returns the next reference from the iterator. If the iterator has +// reached the end it will return io.EOF as an error. +func (iter *ReferenceSliceIter) Next() (*plumbing.Reference, error) { + if iter.pos >= len(iter.series) { + return nil, io.EOF + } + + obj := iter.series[iter.pos] + iter.pos++ + return obj, nil +} + +// ForEach call the cb function for each reference contained on this iter until +// an error happends or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *ReferenceSliceIter) ForEach(cb func(*plumbing.Reference) error) error { + defer iter.Close() + for _, r := range iter.series { + if err := cb(r); err != nil { + if err == ErrStop { + return nil + } + + return err + } + } + + return nil +} + +// Close releases any resources used by the iterator. +func (iter *ReferenceSliceIter) Close() { + iter.pos = len(iter.series) +} + +// ResolveReference resolves a SymbolicReference to a HashReference. +func ResolveReference(s ReferenceStorer, n plumbing.ReferenceName) (*plumbing.Reference, error) { + r, err := s.Reference(n) + if err != nil || r == nil { + return r, err + } + return resolveReference(s, r, 0) +} + +func resolveReference(s ReferenceStorer, r *plumbing.Reference, recursion int) (*plumbing.Reference, error) { + if r.Type() != plumbing.SymbolicReference { + return r, nil + } + + if recursion > MaxResolveRecursion { + return nil, ErrMaxResolveRecursion + } + + t, err := s.Reference(r.Target()) + if err != nil { + return nil, err + } + + recursion++ + return resolveReference(s, t, recursion) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go new file mode 100644 index 0000000000000000000000000000000000000000..39aaaa540d8af6cf31c68b9757dda99ba5c9485a --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go @@ -0,0 +1,10 @@ +package storer + +import "gopkg.in/src-d/go-git.v4/plumbing" + +// ShallowStorer is a storage of references to shallow commits by hash, +// meaning that these commits have missing parents because of a shallow fetch. +type ShallowStorer interface { + SetShallow([]plumbing.Hash) error + Shallow() ([]plumbing.Hash, error) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go new file mode 100644 index 0000000000000000000000000000000000000000..0b96c0ee1c9f29c64afd5a8552cda88a14b6183e --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go @@ -0,0 +1,7 @@ +package storer + +// Storer is a basic storer for encoded objects and references. +type Storer interface { + EncodedObjectStorer + ReferenceStorer +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go new file mode 100644 index 0000000000000000000000000000000000000000..a398a743f6b0ab544a307652e543ee64970e43c9 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go @@ -0,0 +1,48 @@ +// Package client contains helper function to deal with the different client +// protocols. +package client + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/file" + "gopkg.in/src-d/go-git.v4/plumbing/transport/git" + "gopkg.in/src-d/go-git.v4/plumbing/transport/http" + "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh" +) + +// Protocols are the protocols supported by default. +var Protocols = map[string]transport.Transport{ + "http": http.DefaultClient, + "https": http.DefaultClient, + "ssh": ssh.DefaultClient, + "git": git.DefaultClient, + "file": file.DefaultClient, +} + +// InstallProtocol adds or modifies an existing protocol. +func InstallProtocol(scheme string, c transport.Transport) { + if c == nil { + delete(Protocols, scheme) + return + } + + Protocols[scheme] = c +} + +// NewClient returns the appropriate client among of the set of known protocols: +// http://, https://, ssh:// and file://. +// See `InstallProtocol` to add or modify protocols. +func NewClient(endpoint transport.Endpoint) (transport.Transport, error) { + f, ok := Protocols[endpoint.Scheme] + if !ok { + return nil, fmt.Errorf("unsupported scheme %q", endpoint.Scheme) + } + + if f == nil { + return nil, fmt.Errorf("malformed client for scheme %q, client is defined as nil", endpoint.Scheme) + } + + return f, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go new file mode 100644 index 0000000000000000000000000000000000000000..3fcdef24d55e027a980678e6c2e24c4f56c02556 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go @@ -0,0 +1,143 @@ +// Package transport includes the implementation for different transport +// protocols. +// +// `Client` can be used to fetch and send packfiles to a git server. +// The `client` package provides higher level functions to instantiate the +// appropriate `Client` based on the repository URL. +// +// go-git supports HTTP and SSH (see `Protocols`), but you can also install +// your own protocols (see the `client` package). +// +// Each protocol has its own implementation of `Client`, but you should +// generally not use them directly, use `client.NewClient` instead. +package transport + +import ( + "errors" + "fmt" + "io" + "net/url" + "regexp" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" +) + +var ( + ErrRepositoryNotFound = errors.New("repository not found") + ErrEmptyRemoteRepository = errors.New("remote repository is empty") + ErrAuthorizationRequired = errors.New("authorization required") + ErrEmptyUploadPackRequest = errors.New("empty git-upload-pack given") + ErrInvalidAuthMethod = errors.New("invalid auth method") + ErrAlreadyConnected = errors.New("session already established") +) + +const ( + UploadPackServiceName = "git-upload-pack" + ReceivePackServiceName = "git-receive-pack" +) + +// Transport can initiate git-upload-pack and git-receive-pack processes. +// It is implemented both by the client and the server, making this a RPC. +type Transport interface { + // NewUploadPackSession starts a git-upload-pack session for an endpoint. + NewUploadPackSession(Endpoint, AuthMethod) (UploadPackSession, error) + // NewReceivePackSession starts a git-receive-pack session for an endpoint. + NewReceivePackSession(Endpoint, AuthMethod) (ReceivePackSession, error) +} + +type Session interface { + // AdvertisedReferences retrieves the advertised references for a + // repository. + // If the repository does not exist, returns ErrRepositoryNotFound. + // If the repository exists, but is empty, returns ErrEmptyRemoteRepository. + AdvertisedReferences() (*packp.AdvRefs, error) + io.Closer +} + +type AuthMethod interface { + fmt.Stringer + Name() string +} + +// UploadPackSession represents a git-upload-pack session. +// A git-upload-pack session has two steps: reference discovery +// (AdvertisedReferences) and uploading pack (UploadPack). +type UploadPackSession interface { + Session + // UploadPack takes a git-upload-pack request and returns a response, + // including a packfile. Don't be confused by terminology, the client + // side of a git-upload-pack is called git-fetch-pack, although here + // the same interface is used to make it RPC-like. + UploadPack(*packp.UploadPackRequest) (*packp.UploadPackResponse, error) +} + +// ReceivePackSession represents a git-receive-pack session. +// A git-receive-pack session has two steps: reference discovery +// (AdvertisedReferences) and receiving pack (ReceivePack). +// In that order. +type ReceivePackSession interface { + Session + // ReceivePack sends an update references request and a packfile + // reader and returns a ReportStatus and error. Don't be confused by + // terminology, the client side of a git-receive-pack is called + // git-send-pack, although here the same interface is used to make it + // RPC-like. + ReceivePack(*packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) +} + +type Endpoint url.URL + +var ( + isSchemeRegExp = regexp.MustCompile("^[^:]+://") + scpLikeUrlRegExp = regexp.MustCompile("^(?P<user>[^@]+@)?(?P<host>[^:]+):/?(?P<path>.+)$") +) + +func NewEndpoint(endpoint string) (Endpoint, error) { + endpoint = transformSCPLikeIfNeeded(endpoint) + + u, err := url.Parse(endpoint) + if err != nil { + return Endpoint{}, plumbing.NewPermanentError(err) + } + + if !u.IsAbs() { + return Endpoint{}, plumbing.NewPermanentError(fmt.Errorf( + "invalid endpoint: %s", endpoint, + )) + } + + return Endpoint(*u), nil +} + +func (e *Endpoint) String() string { + u := url.URL(*e) + return u.String() +} + +func transformSCPLikeIfNeeded(endpoint string) string { + if !isSchemeRegExp.MatchString(endpoint) && scpLikeUrlRegExp.MatchString(endpoint) { + m := scpLikeUrlRegExp.FindStringSubmatch(endpoint) + return fmt.Sprintf("ssh://%s%s/%s", m[1], m[2], m[3]) + } + + return endpoint +} + +// UnsupportedCapabilities are the capabilities not supported by any client +// implementation +var UnsupportedCapabilities = []capability.Capability{ + capability.MultiACK, + capability.MultiACKDetailed, + capability.ThinPack, +} + +// FilterUnsupportedCapabilities it filter out all the UnsupportedCapabilities +// from a capability.List, the intended usage is on the client implementation +// to filter the capabilities from an AdvRefs message. +func FilterUnsupportedCapabilities(list *capability.List) { + for _, c := range UnsupportedCapabilities { + list.Delete(c) + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go new file mode 100644 index 0000000000000000000000000000000000000000..58e60dbc3c82a84c03cca26e75c950abd3b13c9b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go @@ -0,0 +1,80 @@ +// Package file implements the file transport protocol. +package file + +import ( + "io" + "os/exec" + + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" +) + +// DefaultClient is the default local client. +var DefaultClient = NewClient( + transport.UploadPackServiceName, + transport.ReceivePackServiceName, +) + +type runner struct { + UploadPackBin string + ReceivePackBin string +} + +// NewClient returns a new local client using the given git-upload-pack and +// git-receive-pack binaries. +func NewClient(uploadPackBin, receivePackBin string) transport.Transport { + return common.NewClient(&runner{ + UploadPackBin: uploadPackBin, + ReceivePackBin: receivePackBin, + }) +} + +func (r *runner) Command(cmd string, ep transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { + switch cmd { + case transport.UploadPackServiceName: + cmd = r.UploadPackBin + case transport.ReceivePackServiceName: + cmd = r.ReceivePackBin + } + + if _, err := exec.LookPath(cmd); err != nil { + return nil, err + } + + return &command{cmd: exec.Command(cmd, ep.Path)}, nil +} + +type command struct { + cmd *exec.Cmd + closed bool +} + +func (c *command) Start() error { + return c.cmd.Start() +} + +func (c *command) StderrPipe() (io.Reader, error) { + return c.cmd.StderrPipe() +} + +func (c *command) StdinPipe() (io.WriteCloser, error) { + return c.cmd.StdinPipe() +} + +func (c *command) StdoutPipe() (io.Reader, error) { + return c.cmd.StdoutPipe() +} + +// Close waits for the command to exit. +func (c *command) Close() error { + if c.closed { + return nil + } + + return c.cmd.Process.Kill() +} + +func (c *command) Wait() error { + defer func() { c.closed = true }() + return c.cmd.Wait() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go new file mode 100644 index 0000000000000000000000000000000000000000..74085c276a6a4e1e65b5fa4bac64ffaa890c8a4c --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go @@ -0,0 +1,53 @@ +package file + +import ( + "fmt" + "os" + + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" + "gopkg.in/src-d/go-git.v4/plumbing/transport/server" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// ServeUploadPack serves a git-upload-pack request using standard output, input +// and error. This is meant to be used when implementing a git-upload-pack +// command. +func ServeUploadPack(path string) error { + ep, err := transport.NewEndpoint(fmt.Sprintf("file://%s", path)) + if err != nil { + return err + } + + // TODO: define and implement a server-side AuthMethod + s, err := server.DefaultServer.NewUploadPackSession(ep, nil) + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + + return common.ServeUploadPack(srvCmd, s) +} + +// ServeReceivePack serves a git-receive-pack request using standard output, +// input and error. This is meant to be used when implementing a +// git-receive-pack command. +func ServeReceivePack(path string) error { + ep, err := transport.NewEndpoint(fmt.Sprintf("file://%s", path)) + if err != nil { + return err + } + + // TODO: define and implement a server-side AuthMethod + s, err := server.DefaultServer.NewReceivePackSession(ep, nil) + if err != nil { + return fmt.Errorf("error creating session: %s", err) + } + + return common.ServeReceivePack(srvCmd, s) +} + +var srvCmd = common.ServerCommand{ + Stdin: os.Stdin, + Stdout: ioutil.WriteNopCloser(os.Stdout), + Stderr: os.Stderr, +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go new file mode 100644 index 0000000000000000000000000000000000000000..24134c1b295a930a3ee0ec2dcaf06bd5436dcb88 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go @@ -0,0 +1,107 @@ +// Package git implements the git transport protocol. +package git + +import ( + "fmt" + "io" + "net" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// DefaultClient is the default git client. +var DefaultClient = common.NewClient(&runner{}) + +type runner struct{} + +// Command returns a new Command for the given cmd in the given Endpoint +func (r *runner) Command(cmd string, ep transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { + // auth not allowed since git protocol doesn't support authentication + if auth != nil { + return nil, transport.ErrInvalidAuthMethod + } + c := &command{command: cmd, endpoint: ep} + if err := c.connect(); err != nil { + return nil, err + } + return c, nil +} + +type command struct { + conn net.Conn + connected bool + command string + endpoint transport.Endpoint +} + +// Start executes the command sending the required message to the TCP connection +func (c *command) Start() error { + cmd := endpointToCommand(c.command, c.endpoint) + + e := pktline.NewEncoder(c.conn) + return e.Encode([]byte(cmd)) +} + +func (c *command) connect() error { + if c.connected { + return transport.ErrAlreadyConnected + } + + var err error + c.conn, err = net.Dial("tcp", c.getHostWithPort()) + if err != nil { + return err + } + + c.connected = true + return nil +} + +func (c *command) getHostWithPort() string { + host := c.endpoint.Host + if strings.Index(c.endpoint.Host, ":") == -1 { + host += ":9418" + } + + return host +} + +// StderrPipe git protocol doesn't have any dedicated error channel +func (c *command) StderrPipe() (io.Reader, error) { + return nil, nil +} + +// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent +// call to the Close function from the connection, a command execution in git +// protocol can't be closed or killed +func (c *command) StdinPipe() (io.WriteCloser, error) { + return ioutil.WriteNopCloser(c.conn), nil +} + +// StdoutPipe return the underlying connection as Reader +func (c *command) StdoutPipe() (io.Reader, error) { + return c.conn, nil +} + +func endpointToCommand(cmd string, ep transport.Endpoint) string { + return fmt.Sprintf("%s %s%chost=%s%c", cmd, ep.Path, 0, ep.Host, 0) +} + +// Wait no-op function, required by the interface +func (c *command) Wait() error { + return nil +} + +// Close closes the TCP connection and connection. +func (c *command) Close() error { + if !c.connected { + return nil + } + + c.connected = false + return c.conn.Close() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go new file mode 100644 index 0000000000000000000000000000000000000000..baad12ec6707552476a5d3ea5548a05c4cd8e08e --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go @@ -0,0 +1,152 @@ +// Package http implements the HTTP transport protocol. +package http + +import ( + "fmt" + "net/http" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/transport" +) + +type client struct { + c *http.Client +} + +// DefaultClient is the default HTTP client, which uses `http.DefaultClient`. +var DefaultClient = NewClient(nil) + +// NewClient creates a new client with a custom net/http client. +// See `InstallProtocol` to install and override default http client. +// Unless a properly initialized client is given, it will fall back into +// `http.DefaultClient`. +// +// Note that for HTTP client cannot distinguist between private repositories and +// unexistent repositories on GitHub. So it returns `ErrAuthorizationRequired` +// for both. +func NewClient(c *http.Client) transport.Transport { + if c == nil { + return &client{http.DefaultClient} + } + + return &client{ + c: c, + } +} + +func (c *client) NewUploadPackSession(ep transport.Endpoint, auth transport.AuthMethod) ( + transport.UploadPackSession, error) { + + return newUploadPackSession(c.c, ep, auth) +} + +func (c *client) NewReceivePackSession(ep transport.Endpoint, auth transport.AuthMethod) ( + transport.ReceivePackSession, error) { + + return newReceivePackSession(c.c, ep, auth) +} + +type session struct { + auth AuthMethod + client *http.Client + endpoint transport.Endpoint + advRefs *packp.AdvRefs +} + +func (*session) Close() error { + return nil +} + +func (s *session) applyAuthToRequest(req *http.Request) { + if s.auth == nil { + return + } + + s.auth.setAuth(req) +} + +// AuthMethod is concrete implementation of common.AuthMethod for HTTP services +type AuthMethod interface { + transport.AuthMethod + setAuth(r *http.Request) +} + +func basicAuthFromEndpoint(ep transport.Endpoint) *BasicAuth { + info := ep.User + if info == nil { + return nil + } + + p, ok := info.Password() + if !ok { + return nil + } + + u := info.Username() + return NewBasicAuth(u, p) +} + +// BasicAuth represent a HTTP basic auth +type BasicAuth struct { + username, password string +} + +// NewBasicAuth returns a basicAuth base on the given user and password +func NewBasicAuth(username, password string) *BasicAuth { + return &BasicAuth{username, password} +} + +func (a *BasicAuth) setAuth(r *http.Request) { + if a == nil { + return + } + + r.SetBasicAuth(a.username, a.password) +} + +// Name is name of the auth +func (a *BasicAuth) Name() string { + return "http-basic-auth" +} + +func (a *BasicAuth) String() string { + masked := "*******" + if a.password == "" { + masked = "<empty>" + } + + return fmt.Sprintf("%s - %s:%s", a.Name(), a.username, masked) +} + +// Err is a dedicated error to return errors based on status code +type Err struct { + Response *http.Response +} + +// NewErr returns a new Err based on a http response +func NewErr(r *http.Response) error { + if r.StatusCode >= http.StatusOK && r.StatusCode < http.StatusMultipleChoices { + return nil + } + + switch r.StatusCode { + case http.StatusUnauthorized: + return transport.ErrAuthorizationRequired + case http.StatusNotFound: + return transport.ErrRepositoryNotFound + } + + return plumbing.NewUnexpectedError(&Err{r}) +} + +// StatusCode returns the status code of the response +func (e *Err) StatusCode() int { + return e.Response.StatusCode +} + +func (e *Err) Error() string { + return fmt.Sprintf("unexpected requesting %q status code: %d", + e.Response.Request.URL, e.Response.StatusCode, + ) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go new file mode 100644 index 0000000000000000000000000000000000000000..7a3704927400d959008bdfddf9efc4a0c9d56247 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go @@ -0,0 +1,30 @@ +package http + +import ( + "errors" + "net/http" + + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/transport" +) + +var errReceivePackNotSupported = errors.New("receive-pack not supported yet") + +type rpSession struct { + *session +} + +func newReceivePackSession(c *http.Client, ep transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + return &rpSession{&session{}}, nil +} + +func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + + return nil, errReceivePackNotSupported +} + +func (s *rpSession) ReceivePack(*packp.ReferenceUpdateRequest) ( + *packp.ReportStatus, error) { + + return nil, errReceivePackNotSupported +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go new file mode 100644 index 0000000000000000000000000000000000000000..fd1787c95783126d6367ec172cb4c622af3bab71 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go @@ -0,0 +1,183 @@ +package http + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +type upSession struct { + *session +} + +func newUploadPackSession(c *http.Client, ep transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { + s := &session{ + auth: basicAuthFromEndpoint(ep), + client: c, + endpoint: ep, + } + if auth != nil { + a, ok := auth.(AuthMethod) + if !ok { + return nil, transport.ErrInvalidAuthMethod + } + + s.auth = a + } + + return &upSession{session: s}, nil +} + +func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + if s.advRefs != nil { + return s.advRefs, nil + } + + url := fmt.Sprintf( + "%s/info/refs?service=%s", + s.endpoint.String(), transport.UploadPackServiceName, + ) + + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, err + } + + s.applyAuthToRequest(req) + s.applyHeadersToRequest(req, nil) + res, err := s.client.Do(req) + if err != nil { + return nil, err + } + + defer res.Body.Close() + + if res.StatusCode == http.StatusUnauthorized { + return nil, transport.ErrAuthorizationRequired + } + + ar := packp.NewAdvRefs() + if err := ar.Decode(res.Body); err != nil { + if err == packp.ErrEmptyAdvRefs { + err = transport.ErrEmptyRemoteRepository + } + + return nil, err + } + + transport.FilterUnsupportedCapabilities(ar.Capabilities) + s.advRefs = ar + return ar, nil +} + +func (s *upSession) UploadPack(req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + url := fmt.Sprintf( + "%s/%s", + s.endpoint.String(), transport.UploadPackServiceName, + ) + + content, err := uploadPackRequestToReader(req) + if err != nil { + return nil, err + } + + res, err := s.doRequest(http.MethodPost, url, content) + if err != nil { + return nil, err + } + + r, err := ioutil.NonEmptyReader(res.Body) + if err != nil { + if err == ioutil.ErrEmptyReader || err == io.ErrUnexpectedEOF { + return nil, transport.ErrEmptyUploadPackRequest + } + + return nil, err + } + + rc := ioutil.NewReadCloser(r, res.Body) + return common.DecodeUploadPackResponse(rc, req) +} + +// Close does nothing. +func (s *upSession) Close() error { + return nil +} + +func (s *upSession) doRequest(method, url string, content *bytes.Buffer) (*http.Response, error) { + var body io.Reader + if content != nil { + body = content + } + + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, plumbing.NewPermanentError(err) + } + + s.applyHeadersToRequest(req, content) + s.applyAuthToRequest(req) + + res, err := s.client.Do(req) + if err != nil { + return nil, plumbing.NewUnexpectedError(err) + } + + if err := NewErr(res); err != nil { + _ = res.Body.Close() + return nil, err + } + + return res, nil +} + +// it requires a bytes.Buffer, because we need to know the length +func (s *upSession) applyHeadersToRequest(req *http.Request, content *bytes.Buffer) { + req.Header.Add("User-Agent", "git/1.0") + req.Header.Add("Host", s.endpoint.Host) + + if content == nil { + req.Header.Add("Accept", "*/*") + return + } + + req.Header.Add("Accept", "application/x-git-upload-pack-result") + req.Header.Add("Content-Type", "application/x-git-upload-pack-request") + req.Header.Add("Content-Length", strconv.Itoa(content.Len())) +} + +func uploadPackRequestToReader(req *packp.UploadPackRequest) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + e := pktline.NewEncoder(buf) + + if err := req.UploadRequest.Encode(buf); err != nil { + return nil, fmt.Errorf("sending upload-req message: %s", err) + } + + if err := req.UploadHaves.Encode(buf, false); err != nil { + return nil, fmt.Errorf("sending haves message: %s", err) + } + + if err := e.EncodeString("done\n"); err != nil { + return nil, err + } + + return buf, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go new file mode 100644 index 0000000000000000000000000000000000000000..f67ae29c5ba96450e931ea0a47c92af4461bc89b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go @@ -0,0 +1,414 @@ +// Package common implements the git pack protocol with a pluggable transport. +// This is a low-level package to implement new transports. Use a concrete +// implementation instead (e.g. http, file, ssh). +// +// A simple example of usage can be found in the file package. +package common + +import ( + "bufio" + "errors" + "fmt" + "io" + "strings" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing/format/pktline" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +const ( + readErrorSecondsTimeout = 10 + errLinesBuffer = 1000 +) + +var ( + ErrTimeoutExceeded = errors.New("timeout exceeded") +) + +// Commander creates Command instances. This is the main entry point for +// transport implementations. +type Commander interface { + // Command creates a new Command for the given git command and + // endpoint. cmd can be git-upload-pack or git-receive-pack. An + // error should be returned if the endpoint is not supported or the + // command cannot be created (e.g. binary does not exist, connection + // cannot be established). + Command(cmd string, ep transport.Endpoint, auth transport.AuthMethod) (Command, error) +} + +// Command is used for a single command execution. +// This interface is modeled after exec.Cmd and ssh.Session in the standard +// library. +type Command interface { + // StderrPipe returns a pipe that will be connected to the command's + // standard error when the command starts. It should not be called after + // Start. + StderrPipe() (io.Reader, error) + // StdinPipe returns a pipe that will be connected to the command's + // standard input when the command starts. It should not be called after + // Start. The pipe should be closed when no more input is expected. + StdinPipe() (io.WriteCloser, error) + // StdoutPipe returns a pipe that will be connected to the command's + // standard output when the command starts. It should not be called after + // Start. + StdoutPipe() (io.Reader, error) + // Start starts the specified command. It does not wait for it to + // complete. + Start() error + // Wait waits for the command to exit. It must have been started by + // Start. The returned error is nil if the command runs, has no + // problems copying stdin, stdout, and stderr, and exits with a zero + // exit status. + Wait() error + // Close closes the command and releases any resources used by it. It + // can be called to forcibly finish the command without calling to Wait + // or to release resources after calling Wait. + Close() error +} + +type client struct { + cmdr Commander +} + +// NewClient creates a new client using the given Commander. +func NewClient(runner Commander) transport.Transport { + return &client{runner} +} + +// NewUploadPackSession creates a new UploadPackSession. +func (c *client) NewUploadPackSession(ep transport.Endpoint, auth transport.AuthMethod) ( + transport.UploadPackSession, error) { + + return c.newSession(transport.UploadPackServiceName, ep, auth) +} + +// NewReceivePackSession creates a new ReceivePackSession. +func (c *client) NewReceivePackSession(ep transport.Endpoint, auth transport.AuthMethod) ( + transport.ReceivePackSession, error) { + + return c.newSession(transport.ReceivePackServiceName, ep, auth) +} + +type session struct { + Stdin io.WriteCloser + Stdout io.Reader + Command Command + + isReceivePack bool + advRefs *packp.AdvRefs + packRun bool + finished bool + errLines chan string +} + +func (c *client) newSession(s string, ep transport.Endpoint, auth transport.AuthMethod) (*session, error) { + cmd, err := c.cmdr.Command(s, ep, auth) + if err != nil { + return nil, err + } + + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, err + } + + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if err := cmd.Start(); err != nil { + return nil, err + } + + return &session{ + Stdin: stdin, + Stdout: stdout, + Command: cmd, + errLines: c.listenErrors(stderr), + isReceivePack: s == transport.ReceivePackServiceName, + }, nil +} + +func (c *client) listenErrors(r io.Reader) chan string { + if r == nil { + return nil + } + + errLines := make(chan string, errLinesBuffer) + go func() { + s := bufio.NewScanner(r) + for s.Scan() { + line := string(s.Bytes()) + errLines <- line + } + }() + + return errLines +} + +// AdvertisedReferences retrieves the advertised references from the server. +func (s *session) AdvertisedReferences() (*packp.AdvRefs, error) { + if s.advRefs != nil { + return s.advRefs, nil + } + + ar := packp.NewAdvRefs() + if err := ar.Decode(s.Stdout); err != nil { + if err := s.handleAdvRefDecodeError(err); err != nil { + return nil, err + } + } + + transport.FilterUnsupportedCapabilities(ar.Capabilities) + s.advRefs = ar + return ar, nil +} + +func (s *session) handleAdvRefDecodeError(err error) error { + // If repository is not found, we get empty stdout and server writes an + // error to stderr. + if err == packp.ErrEmptyInput { + if err := s.checkNotFoundError(); err != nil { + return err + } + + return io.ErrUnexpectedEOF + } + + // For empty (but existing) repositories, we get empty advertised-references + // message. But valid. That is, it includes at least a flush. + if err == packp.ErrEmptyAdvRefs { + // Empty repositories are valid for git-receive-pack. + if s.isReceivePack { + return nil + } + + if err := s.finish(); err != nil { + return err + } + + return transport.ErrEmptyRemoteRepository + } + + // Some server sends the errors as normal content (git protocol), so when + // we try to decode it fails, we need to check the content of it, to detect + // not found errors + if uerr, ok := err.(*packp.ErrUnexpectedData); ok { + if isRepoNotFoundError(string(uerr.Data)) { + return transport.ErrRepositoryNotFound + } + } + + return err +} + +// UploadPack performs a request to the server to fetch a packfile. A reader is +// returned with the packfile content. The reader must be closed after reading. +func (s *session) UploadPack(req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + if _, err := s.AdvertisedReferences(); err != nil { + return nil, err + } + + s.packRun = true + + if err := uploadPack(s.Stdin, s.Stdout, req); err != nil { + return nil, err + } + + r, err := ioutil.NonEmptyReader(s.Stdout) + if err == ioutil.ErrEmptyReader { + if c, ok := s.Stdout.(io.Closer); ok { + _ = c.Close() + } + + return nil, transport.ErrEmptyUploadPackRequest + } + + if err != nil { + return nil, err + } + + wc := &waitCloser{s.Command} + rc := ioutil.NewReadCloser(r, wc) + + return DecodeUploadPackResponse(rc, req) +} + +func (s *session) ReceivePack(req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { + if _, err := s.AdvertisedReferences(); err != nil { + return nil, err + } + + s.packRun = true + + if err := req.Encode(s.Stdin); err != nil { + return nil, err + } + + if !req.Capabilities.Supports(capability.ReportStatus) { + // If we have neither report-status or sideband, we can only + // check return value error. + return nil, s.Command.Wait() + } + + report := packp.NewReportStatus() + if err := report.Decode(s.Stdout); err != nil { + return nil, err + } + + if err := report.Error(); err != nil { + return report, err + } + + return report, s.Command.Wait() +} + +func (s *session) finish() error { + if s.finished { + return nil + } + + s.finished = true + + // If we did not run a upload/receive-pack, we close the connection + // gracefully by sending a flush packet to the server. If the server + // operates correctly, it will exit with status 0. + if !s.packRun { + _, err := s.Stdin.Write(pktline.FlushPkt) + return err + } + + return nil +} + +func (s *session) Close() error { + if err := s.finish(); err != nil { + _ = s.Command.Close() + return nil + } + + return s.Command.Close() +} + +func (s *session) checkNotFoundError() error { + t := time.NewTicker(time.Second * readErrorSecondsTimeout) + defer t.Stop() + + select { + case <-t.C: + return ErrTimeoutExceeded + case line, ok := <-s.errLines: + if !ok { + return nil + } + + if isRepoNotFoundError(line) { + return transport.ErrRepositoryNotFound + } + + return fmt.Errorf("unknown error: %s", line) + } +} + +var ( + githubRepoNotFoundErr = "ERROR: Repository not found." + bitbucketRepoNotFoundErr = "conq: repository does not exist." + localRepoNotFoundErr = "does not appear to be a git repository" + gitProtocolNotFoundErr = "ERR \n Repository not found." +) + +func isRepoNotFoundError(s string) bool { + if strings.HasPrefix(s, githubRepoNotFoundErr) { + return true + } + + if strings.HasPrefix(s, bitbucketRepoNotFoundErr) { + return true + } + + if strings.HasSuffix(s, localRepoNotFoundErr) { + return true + } + + if strings.HasPrefix(s, gitProtocolNotFoundErr) { + return true + } + + return false +} + +var ( + nak = []byte("NAK") + eol = []byte("\n") +) + +// uploadPack implements the git-upload-pack protocol. +func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error { + // TODO support multi_ack mode + // TODO support multi_ack_detailed mode + // TODO support acks for common objects + // TODO build a proper state machine for all these processing options + + if err := req.UploadRequest.Encode(w); err != nil { + return fmt.Errorf("sending upload-req message: %s", err) + } + + if err := req.UploadHaves.Encode(w, true); err != nil { + return fmt.Errorf("sending haves message: %s", err) + } + + if err := sendDone(w); err != nil { + return fmt.Errorf("sending done message: %s", err) + } + + if err := w.Close(); err != nil { + return fmt.Errorf("closing input: %s", err) + } + + return nil +} + +func sendDone(w io.Writer) error { + e := pktline.NewEncoder(w) + + return e.Encodef("done\n") +} + +// DecodeUploadPackResponse decodes r into a new packp.UploadPackResponse +func DecodeUploadPackResponse(r io.ReadCloser, req *packp.UploadPackRequest) ( + *packp.UploadPackResponse, error, +) { + res := packp.NewUploadPackResponse(req) + if err := res.Decode(r); err != nil { + return nil, fmt.Errorf("error decoding upload-pack response: %s", err) + } + + return res, nil +} + +type waitCloser struct { + Command Command +} + +// Close waits until the command exits and returns error, if any. +func (c *waitCloser) Close() error { + return c.Command.Wait() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go new file mode 100644 index 0000000000000000000000000000000000000000..dd6cfbea308a35b2a0df2aa07c699d63d2e30129 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go @@ -0,0 +1,72 @@ +package common + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +// ServerCommand is used for a single server command execution. +type ServerCommand struct { + Stderr io.Writer + Stdout io.WriteCloser + Stdin io.Reader +} + +func ServeUploadPack(cmd ServerCommand, s transport.UploadPackSession) (err error) { + ioutil.CheckClose(cmd.Stdout, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return err + } + + if err := ar.Encode(cmd.Stdout); err != nil { + return err + } + + req := packp.NewUploadPackRequest() + if err := req.Decode(cmd.Stdin); err != nil { + return err + } + + var resp *packp.UploadPackResponse + resp, err = s.UploadPack(req) + if err != nil { + return err + } + + return resp.Encode(cmd.Stdout) +} + +func ServeReceivePack(cmd ServerCommand, s transport.ReceivePackSession) error { + ar, err := s.AdvertisedReferences() + if err != nil { + return fmt.Errorf("internal error in advertised references: %s", err) + } + + if err := ar.Encode(cmd.Stdout); err != nil { + return fmt.Errorf("error in advertised references encoding: %s", err) + } + + req := packp.NewReferenceUpdateRequest() + if err := req.Decode(cmd.Stdin); err != nil { + return fmt.Errorf("error decoding: %s", err) + } + + rs, err := s.ReceivePack(req) + if rs != nil { + if err := rs.Encode(cmd.Stdout); err != nil { + return fmt.Errorf("error in encoding report status %s", err) + } + } + + if err != nil { + return fmt.Errorf("error in receive pack: %s", err) + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go new file mode 100644 index 0000000000000000000000000000000000000000..cfe9f041b13d7bffa07e9b9c684981e1e22482ce --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go @@ -0,0 +1,59 @@ +package server + +import ( + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/storage/filesystem" + + "gopkg.in/src-d/go-billy.v2" + "gopkg.in/src-d/go-billy.v2/osfs" +) + +// DefaultLoader is a filesystem loader ignoring host and resolving paths to /. +var DefaultLoader = NewFilesystemLoader(osfs.New("/")) + +// Loader loads repository's storer.Storer based on an optional host and a path. +type Loader interface { + // Load loads a storer.Storer given a transport.Endpoint. + // Returns transport.ErrRepositoryNotFound if the repository does not + // exist. + Load(ep transport.Endpoint) (storer.Storer, error) +} + +type fsLoader struct { + base billy.Filesystem +} + +// NewFilesystemLoader creates a Loader that ignores host and resolves paths +// with a given base filesystem. +func NewFilesystemLoader(base billy.Filesystem) Loader { + return &fsLoader{base} +} + +// Load looks up the endpoint's path in the base file system and returns a +// storer for it. Returns transport.ErrRepositoryNotFound if a repository does +// not exist in the given path. +func (l *fsLoader) Load(ep transport.Endpoint) (storer.Storer, error) { + fs := l.base.Dir(ep.Path) + if _, err := fs.Stat("config"); err != nil { + return nil, transport.ErrRepositoryNotFound + } + + return filesystem.NewStorage(fs) +} + +// MapLoader is a Loader that uses a lookup map of storer.Storer by +// transport.Endpoint. +type MapLoader map[transport.Endpoint]storer.Storer + +// Load returns a storer.Storer for given a transport.Endpoint by looking it up +// in the map. Returns transport.ErrRepositoryNotFound if the endpoint does not +// exist. +func (l MapLoader) Load(ep transport.Endpoint) (storer.Storer, error) { + s, ok := l[ep] + if !ok { + return nil, transport.ErrRepositoryNotFound + } + + return s, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go new file mode 100644 index 0000000000000000000000000000000000000000..89fce5f6584de5237764a7735a4fe40683c609ae --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go @@ -0,0 +1,433 @@ +// Package server implements the git server protocol. For most use cases, the +// transport-specific implementations should be used. +package server + +import ( + "errors" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" + "gopkg.in/src-d/go-git.v4/plumbing/revlist" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/plumbing/transport" +) + +var DefaultServer = NewServer(DefaultLoader) + +type server struct { + loader Loader + handler *handler +} + +// NewServer returns a transport.Transport implementing a git server, +// independent of transport. Each transport must wrap this. +func NewServer(loader Loader) transport.Transport { + return &server{loader, &handler{}} +} + +func (s *server) NewUploadPackSession(ep transport.Endpoint, auth transport.AuthMethod) (transport.UploadPackSession, error) { + sto, err := s.loader.Load(ep) + if err != nil { + return nil, err + } + + return s.handler.NewUploadPackSession(sto) +} + +func (s *server) NewReceivePackSession(ep transport.Endpoint, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + sto, err := s.loader.Load(ep) + if err != nil { + return nil, err + } + + return s.handler.NewReceivePackSession(sto) +} + +type handler struct{} + +func (h *handler) NewUploadPackSession(s storer.Storer) (transport.UploadPackSession, error) { + return &upSession{ + session: session{storer: s}, + }, nil +} + +func (h *handler) NewReceivePackSession(s storer.Storer) (transport.ReceivePackSession, error) { + return &rpSession{ + session: session{storer: s}, + cmdStatus: map[plumbing.ReferenceName]error{}, + }, nil +} + +type session struct { + storer storer.Storer + caps *capability.List +} + +func (s *session) Close() error { + return nil +} + +func (s *session) SetAuth(transport.AuthMethod) error { + //TODO: deprecate + return nil +} + +func (s *session) checkSupportedCapabilities(cl *capability.List) error { + for _, c := range cl.All() { + if !s.caps.Supports(c) { + return fmt.Errorf("unsupported capability: %s", c) + } + } + + return nil +} + +type upSession struct { + session +} + +func (s *upSession) AdvertisedReferences() (*packp.AdvRefs, error) { + ar := packp.NewAdvRefs() + + if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { + return nil, err + } + + s.caps = ar.Capabilities + + if err := setReferences(s.storer, ar); err != nil { + return nil, err + } + + if err := setHEAD(s.storer, ar); err != nil { + return nil, err + } + + return ar, nil +} + +func (s *upSession) UploadPack(req *packp.UploadPackRequest) (*packp.UploadPackResponse, error) { + if req.IsEmpty() { + return nil, transport.ErrEmptyUploadPackRequest + } + + if err := req.Validate(); err != nil { + return nil, err + } + + if s.caps == nil { + s.caps = capability.NewList() + if err := s.setSupportedCapabilities(s.caps); err != nil { + return nil, err + } + } + + if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { + return nil, err + } + + s.caps = req.Capabilities + + if len(req.Shallows) > 0 { + return nil, fmt.Errorf("shallow not supported") + } + + objs, err := s.objectsToUpload(req) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + e := packfile.NewEncoder(pw, s.storer, false) + go func() { + _, err := e.Encode(objs) + pw.CloseWithError(err) + }() + + return packp.NewUploadPackResponseWithPackfile(req, pr), nil +} + +func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Hash, error) { + haves, err := revlist.Objects(s.storer, req.Haves, nil) + if err != nil { + return nil, err + } + + return revlist.Objects(s.storer, req.Wants, haves) +} + +func (*upSession) setSupportedCapabilities(c *capability.List) error { + if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + return err + } + + if err := c.Set(capability.OFSDelta); err != nil { + return err + } + + return nil +} + +type rpSession struct { + session + cmdStatus map[plumbing.ReferenceName]error + firstErr error + unpackErr error +} + +func (s *rpSession) AdvertisedReferences() (*packp.AdvRefs, error) { + ar := packp.NewAdvRefs() + + if err := s.setSupportedCapabilities(ar.Capabilities); err != nil { + return nil, err + } + + s.caps = ar.Capabilities + + if err := setReferences(s.storer, ar); err != nil { + return nil, err + } + + if err := setHEAD(s.storer, ar); err != nil { + return nil, err + } + + return ar, nil +} + +var ( + ErrUpdateReference = errors.New("failed to update ref") +) + +func (s *rpSession) ReceivePack(req *packp.ReferenceUpdateRequest) (*packp.ReportStatus, error) { + if s.caps == nil { + s.caps = capability.NewList() + if err := s.setSupportedCapabilities(s.caps); err != nil { + return nil, err + } + } + + if err := s.checkSupportedCapabilities(req.Capabilities); err != nil { + return nil, err + } + + s.caps = req.Capabilities + + //TODO: Implement 'atomic' update of references. + + if err := s.writePackfile(req.Packfile); err != nil { + s.unpackErr = err + s.firstErr = err + return s.reportStatus(), err + } + + updatedRefs := s.updatedReferences(req) + + if s.caps.Supports(capability.Atomic) && s.firstErr != nil { + //TODO: add support for 'atomic' once we have reference + // transactions, currently we do not announce it. + rs := s.reportStatus() + for _, cs := range rs.CommandStatuses { + if cs.Error() == nil { + cs.Status = "" + } + } + } + + for name, ref := range updatedRefs { + //TODO: add support for 'delete-refs' once we can delete + // references, currently we do not announce it. + err := s.storer.SetReference(ref) + s.setStatus(name, err) + } + + return s.reportStatus(), s.firstErr +} + +func (s *rpSession) updatedReferences(req *packp.ReferenceUpdateRequest) map[plumbing.ReferenceName]*plumbing.Reference { + refs := map[plumbing.ReferenceName]*plumbing.Reference{} + for _, cmd := range req.Commands { + exists, err := referenceExists(s.storer, cmd.Name) + if err != nil { + s.setStatus(cmd.Name, err) + continue + } + + switch cmd.Action() { + case packp.Create: + if exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + ref := plumbing.NewHashReference(cmd.Name, cmd.New) + refs[ref.Name()] = ref + case packp.Delete: + if !exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + if !s.caps.Supports(capability.DeleteRefs) { + s.setStatus(cmd.Name, fmt.Errorf("delete not supported")) + continue + } + + refs[cmd.Name] = nil + case packp.Update: + if !exists { + s.setStatus(cmd.Name, ErrUpdateReference) + continue + } + + if err != nil { + s.setStatus(cmd.Name, err) + continue + } + + ref := plumbing.NewHashReference(cmd.Name, cmd.New) + refs[ref.Name()] = ref + } + } + + return refs +} + +func (s *rpSession) failAtomicUpdate() (*packp.ReportStatus, error) { + rs := s.reportStatus() + for _, cs := range rs.CommandStatuses { + if cs.Error() == nil { + cs.Status = "atomic updated" + } + } + + return rs, s.firstErr +} + +func (s *rpSession) writePackfile(r io.ReadCloser) error { + if r == nil { + return nil + } + + if err := packfile.UpdateObjectStorage(s.storer, r); err != nil { + _ = r.Close() + return err + } + + return r.Close() +} + +func (s *rpSession) setStatus(ref plumbing.ReferenceName, err error) { + s.cmdStatus[ref] = err + if s.firstErr == nil && err != nil { + s.firstErr = err + } +} + +func (s *rpSession) reportStatus() *packp.ReportStatus { + if !s.caps.Supports(capability.ReportStatus) { + return nil + } + + rs := packp.NewReportStatus() + rs.UnpackStatus = "ok" + + if s.unpackErr != nil { + rs.UnpackStatus = s.unpackErr.Error() + } + + if s.cmdStatus == nil { + return rs + } + + for ref, err := range s.cmdStatus { + msg := "ok" + if err != nil { + msg = err.Error() + } + status := &packp.CommandStatus{ + ReferenceName: ref, + Status: msg, + } + rs.CommandStatuses = append(rs.CommandStatuses, status) + } + + return rs +} + +func (*rpSession) setSupportedCapabilities(c *capability.List) error { + if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil { + return err + } + + if err := c.Set(capability.OFSDelta); err != nil { + return err + } + + return c.Set(capability.ReportStatus) +} + +func setHEAD(s storer.Storer, ar *packp.AdvRefs) error { + ref, err := s.Reference(plumbing.HEAD) + if err == plumbing.ErrReferenceNotFound { + return nil + } + + if err != nil { + return err + } + + if ref.Type() == plumbing.SymbolicReference { + if err := ar.AddReference(ref); err != nil { + return nil + } + + ref, err = storer.ResolveReference(s, ref.Target()) + if err == plumbing.ErrReferenceNotFound { + return nil + } + + if err != nil { + return err + } + } + + if ref.Type() != plumbing.HashReference { + return plumbing.ErrInvalidType + } + + h := ref.Hash() + ar.Head = &h + + return nil +} + +func setReferences(s storer.Storer, ar *packp.AdvRefs) error { + //TODO: add peeled references. + iter, err := s.IterReferences() + if err != nil { + return err + } + + return iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + ar.References[ref.Name().String()] = ref.Hash() + return nil + }) +} + +func referenceExists(s storer.ReferenceStorer, n plumbing.ReferenceName) (bool, error) { + _, err := s.Reference(n) + if err == plumbing.ErrReferenceNotFound { + return false, nil + } + + return err == nil, err +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go new file mode 100644 index 0000000000000000000000000000000000000000..f53e510e5b41a3a466fb6886512b73bf106f90ba --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go @@ -0,0 +1,165 @@ +package ssh + +import ( + "errors" + "fmt" + "net" + "os" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +var ErrEmptySSHAgentAddr = errors.New("SSH_AUTH_SOCK env variable is required") + +// AuthMethod is the interface all auth methods for the ssh client +// must implement. The clientConfig method returns the ssh client +// configuration needed to establish an ssh connection. +type AuthMethod interface { + clientConfig() *ssh.ClientConfig +} + +// The names of the AuthMethod implementations. To be returned by the +// Name() method. Most git servers only allow PublicKeysName and +// PublicKeysCallbackName. +const ( + KeyboardInteractiveName = "ssh-keyboard-interactive" + PasswordName = "ssh-password" + PasswordCallbackName = "ssh-password-callback" + PublicKeysName = "ssh-public-keys" + PublicKeysCallbackName = "ssh-public-key-callback" +) + +// KeyboardInteractive implements AuthMethod by using a +// prompt/response sequence controlled by the server. +type KeyboardInteractive struct { + User string + Challenge ssh.KeyboardInteractiveChallenge +} + +func (a *KeyboardInteractive) Name() string { + return KeyboardInteractiveName +} + +func (a *KeyboardInteractive) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *KeyboardInteractive) clientConfig() *ssh.ClientConfig { + return &ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.KeyboardInteractiveChallenge(a.Challenge)}, + } +} + +// Password implements AuthMethod by using the given password. +type Password struct { + User string + Pass string +} + +func (a *Password) Name() string { + return PasswordName +} + +func (a *Password) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *Password) clientConfig() *ssh.ClientConfig { + return &ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.Password(a.Pass)}, + } +} + +// PasswordCallback implements AuthMethod by using a callback +// to fetch the password. +type PasswordCallback struct { + User string + Callback func() (pass string, err error) +} + +func (a *PasswordCallback) Name() string { + return PasswordCallbackName +} + +func (a *PasswordCallback) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PasswordCallback) clientConfig() *ssh.ClientConfig { + return &ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)}, + } +} + +// PublicKeys implements AuthMethod by using the given +// key pairs. +type PublicKeys struct { + User string + Signer ssh.Signer +} + +func (a *PublicKeys) Name() string { + return PublicKeysName +} + +func (a *PublicKeys) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PublicKeys) clientConfig() *ssh.ClientConfig { + return &ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)}, + } +} + +// PublicKeysCallback implements AuthMethod by asking a +// ssh.agent.Agent to act as a signer. +type PublicKeysCallback struct { + User string + Callback func() (signers []ssh.Signer, err error) +} + +func (a *PublicKeysCallback) Name() string { + return PublicKeysCallbackName +} + +func (a *PublicKeysCallback) String() string { + return fmt.Sprintf("user: %s, name: %s", a.User, a.Name()) +} + +func (a *PublicKeysCallback) clientConfig() *ssh.ClientConfig { + return &ssh.ClientConfig{ + User: a.User, + Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)}, + } +} + +const DefaultSSHUsername = "git" + +// NewSSHAgentAuth opens a pipe with the SSH agent and uses the pipe +// as the implementer of the public key callback function. +func NewSSHAgentAuth(user string) (*PublicKeysCallback, error) { + if user == "" { + user = DefaultSSHUsername + } + + sshAgentAddr := os.Getenv("SSH_AUTH_SOCK") + if sshAgentAddr == "" { + return nil, ErrEmptySSHAgentAddr + } + + pipe, err := net.Dial("unix", sshAgentAddr) + if err != nil { + return nil, fmt.Errorf("error connecting to SSH agent: %q", err) + } + + return &PublicKeysCallback{ + User: user, + Callback: agent.NewClient(pipe).Signers, + }, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go new file mode 100644 index 0000000000000000000000000000000000000000..5ed64d464fbfc5e4d7417cd4ec25e5cae67b20ea --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go @@ -0,0 +1,122 @@ +// Package ssh implements the SSH transport protocol. +package ssh + +import ( + "fmt" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common" + + "golang.org/x/crypto/ssh" +) + +// DefaultClient is the default SSH client. +var DefaultClient = common.NewClient(&runner{}) + +type runner struct{} + +func (r *runner) Command(cmd string, ep transport.Endpoint, auth transport.AuthMethod) (common.Command, error) { + c := &command{command: cmd, endpoint: ep} + if auth != nil { + c.setAuth(auth) + } + + if err := c.connect(); err != nil { + return nil, err + } + return c, nil +} + +type command struct { + *ssh.Session + connected bool + command string + endpoint transport.Endpoint + client *ssh.Client + auth AuthMethod +} + +func (c *command) setAuth(auth transport.AuthMethod) error { + a, ok := auth.(AuthMethod) + if !ok { + return transport.ErrInvalidAuthMethod + } + + c.auth = a + return nil +} + +func (c *command) Start() error { + return c.Session.Start(endpointToCommand(c.command, c.endpoint)) +} + +// Close closes the SSH session and connection. +func (c *command) Close() error { + if !c.connected { + return nil + } + + c.connected = false + + //XXX: If did read the full packfile, then the session might be already + // closed. + _ = c.Session.Close() + + return c.client.Close() +} + +// connect connects to the SSH server, unless a AuthMethod was set with +// SetAuth method, by default uses an auth method based on PublicKeysCallback, +// it connects to a SSH agent, using the address stored in the SSH_AUTH_SOCK +// environment var. +func (c *command) connect() error { + if c.connected { + return transport.ErrAlreadyConnected + } + + if c.auth == nil { + if err := c.setAuthFromEndpoint(); err != nil { + return err + } + } + + var err error + c.client, err = ssh.Dial("tcp", c.getHostWithPort(), c.auth.clientConfig()) + if err != nil { + return err + } + + c.Session, err = c.client.NewSession() + if err != nil { + _ = c.client.Close() + return err + } + + c.connected = true + return nil +} + +func (c *command) getHostWithPort() string { + host := c.endpoint.Host + if strings.Index(c.endpoint.Host, ":") == -1 { + host += ":22" + } + + return host +} + +func (c *command) setAuthFromEndpoint() error { + var u string + if info := c.endpoint.User; info != nil { + u = info.Username() + } + + var err error + c.auth, err = NewSSHAgentAuth(u) + return err +} + +func endpointToCommand(cmd string, ep transport.Endpoint) string { + return fmt.Sprintf("%s '%s'", cmd, ep.Path) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/references.go b/vendor/gopkg.in/src-d/go-git.v4/references.go new file mode 100644 index 0000000000000000000000000000000000000000..957d7410d4c87f8631ec339d3819282a586500ee --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/references.go @@ -0,0 +1,236 @@ +package git + +import ( + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/object" + "gopkg.in/src-d/go-git.v4/utils/diff" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +// References returns a slice of Commits for the file at "path", starting from +// the commit provided that contains the file from the provided path. The last +// commit into the returned slice is the commit where the file was created. +// If the provided commit does not contains the specified path, a nil slice is +// returned. The commits are sorted in commit order, newer to older. +// +// Caveats: +// +// - Moves and copies are not currently supported. +// +// - Cherry-picks are not detected unless there are no commits between them and +// therefore can appear repeated in the list. (see git path-id for hints on how +// to fix this). +func References(c *object.Commit, path string) ([]*object.Commit, error) { + var result []*object.Commit + seen := make(map[plumbing.Hash]struct{}, 0) + if err := walkGraph(&result, &seen, c, path); err != nil { + return nil, err + } + + object.SortCommits(result) + + // for merges of identical cherry-picks + return removeComp(path, result, equivalent) +} + +// Recursive traversal of the commit graph, generating a linear history of the +// path. +func walkGraph(result *[]*object.Commit, seen *map[plumbing.Hash]struct{}, current *object.Commit, path string) error { + // check and update seen + if _, ok := (*seen)[current.Hash]; ok { + return nil + } + (*seen)[current.Hash] = struct{}{} + + // if the path is not in the current commit, stop searching. + if _, err := current.File(path); err != nil { + return nil + } + + // optimization: don't traverse branches that does not + // contain the path. + parents := parentsContainingPath(path, current) + + switch len(parents) { + // if the path is not found in any of its parents, the path was + // created by this commit; we must add it to the revisions list and + // stop searching. This includes the case when current is the + // initial commit. + case 0: + *result = append(*result, current) + return nil + case 1: // only one parent contains the path + // if the file contents has change, add the current commit + different, err := differentContents(path, current, parents) + if err != nil { + return err + } + if len(different) == 1 { + *result = append(*result, current) + } + // in any case, walk the parent + return walkGraph(result, seen, parents[0], path) + default: // more than one parent contains the path + // TODO: detect merges that had a conflict, because they must be + // included in the result here. + for _, p := range parents { + err := walkGraph(result, seen, p, path) + if err != nil { + return err + } + } + } + return nil +} + +func parentsContainingPath(path string, c *object.Commit) []*object.Commit { + // TODO: benchmark this method making git.object.Commit.parent public instead of using + // an iterator + var result []*object.Commit + iter := c.Parents() + for { + parent, err := iter.Next() + if err != nil { + if err == io.EOF { + return result + } + panic("unreachable") + } + if _, err := parent.File(path); err == nil { + result = append(result, parent) + } + } +} + +// Returns an slice of the commits in "cs" that has the file "path", but with different +// contents than what can be found in "c". +func differentContents(path string, c *object.Commit, cs []*object.Commit) ([]*object.Commit, error) { + result := make([]*object.Commit, 0, len(cs)) + h, found := blobHash(path, c) + if !found { + return nil, object.ErrFileNotFound + } + for _, cx := range cs { + if hx, found := blobHash(path, cx); found && h != hx { + result = append(result, cx) + } + } + return result, nil +} + +// blobHash returns the hash of a path in a commit +func blobHash(path string, commit *object.Commit) (hash plumbing.Hash, found bool) { + file, err := commit.File(path) + if err != nil { + var empty plumbing.Hash + return empty, found + } + return file.Hash, true +} + +type contentsComparatorFn func(path string, a, b *object.Commit) (bool, error) + +// Returns a new slice of commits, with duplicates removed. Expects a +// sorted commit list. Duplication is defined according to "comp". It +// will always keep the first commit of a series of duplicated commits. +func removeComp(path string, cs []*object.Commit, comp contentsComparatorFn) ([]*object.Commit, error) { + result := make([]*object.Commit, 0, len(cs)) + if len(cs) == 0 { + return result, nil + } + result = append(result, cs[0]) + for i := 1; i < len(cs); i++ { + equals, err := comp(path, cs[i], cs[i-1]) + if err != nil { + return nil, err + } + if !equals { + result = append(result, cs[i]) + } + } + return result, nil +} + +// Equivalent commits are commits whose patch is the same. +func equivalent(path string, a, b *object.Commit) (bool, error) { + numParentsA := a.NumParents() + numParentsB := b.NumParents() + + // the first commit is not equivalent to anyone + // and "I think" merges can not be equivalent to anything + if numParentsA != 1 || numParentsB != 1 { + return false, nil + } + + diffsA, err := patch(a, path) + if err != nil { + return false, err + } + diffsB, err := patch(b, path) + if err != nil { + return false, err + } + + return sameDiffs(diffsA, diffsB), nil +} + +func patch(c *object.Commit, path string) ([]diffmatchpatch.Diff, error) { + // get contents of the file in the commit + file, err := c.File(path) + if err != nil { + return nil, err + } + content, err := file.Contents() + if err != nil { + return nil, err + } + + // get contents of the file in the first parent of the commit + var contentParent string + iter := c.Parents() + parent, err := iter.Next() + if err != nil { + return nil, err + } + file, err = parent.File(path) + if err != nil { + contentParent = "" + } else { + contentParent, err = file.Contents() + if err != nil { + return nil, err + } + } + + // compare the contents of parent and child + return diff.Do(content, contentParent), nil +} + +func sameDiffs(a, b []diffmatchpatch.Diff) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if !sameDiff(a[i], b[i]) { + return false + } + } + return true +} + +func sameDiff(a, b diffmatchpatch.Diff) bool { + if a.Type != b.Type { + return false + } + switch a.Type { + case 0: + return countLines(a.Text) == countLines(b.Text) + case 1, -1: + return a.Text == b.Text + default: + panic("unreachable") + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/remote.go b/vendor/gopkg.in/src-d/go-git.v4/remote.go new file mode 100644 index 0000000000000000000000000000000000000000..ca6b33eeabf7bb2e9ca718a2d7e3da98f3b95867 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/remote.go @@ -0,0 +1,547 @@ +package git + +import ( + "errors" + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability" + "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband" + "gopkg.in/src-d/go-git.v4/plumbing/revlist" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/plumbing/transport" + "gopkg.in/src-d/go-git.v4/plumbing/transport/client" + "gopkg.in/src-d/go-git.v4/storage" + "gopkg.in/src-d/go-git.v4/storage/memory" + "gopkg.in/src-d/go-git.v4/utils/ioutil" +) + +var NoErrAlreadyUpToDate = errors.New("already up-to-date") + +// Remote represents a connection to a remote repository. +type Remote struct { + c *config.RemoteConfig + s storage.Storer +} + +func newRemote(s storage.Storer, c *config.RemoteConfig) *Remote { + return &Remote{s: s, c: c} +} + +// Config returns the RemoteConfig object used to instantiate this Remote. +func (r *Remote) Config() *config.RemoteConfig { + return r.c +} + +func (r *Remote) String() string { + fetch := r.c.URL + push := r.c.URL + + return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%s (push)", r.c.Name, fetch, push) +} + +// Fetch fetches references from the remote to the local repository. +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +func (r *Remote) Fetch(o *FetchOptions) error { + _, err := r.fetch(o) + return err +} + +// Push performs a push to the remote. Returns NoErrAlreadyUpToDate if the +// remote was already up-to-date. +func (r *Remote) Push(o *PushOptions) (err error) { + // TODO: Support deletes. + // TODO: Support pushing tags. + // TODO: Check if force update is given, otherwise reject non-fast forward. + // TODO: Sideband suppor + + if o.RemoteName == "" { + o.RemoteName = r.c.Name + } + + if err := o.Validate(); err != nil { + return err + } + + if o.RemoteName != r.c.Name { + return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name) + } + + s, err := newSendPackSession(r.c.URL, o.Auth) + if err != nil { + return err + } + + ar, err := s.AdvertisedReferences() + if err != nil { + return err + } + + remoteRefs, err := ar.AllReferences() + if err != nil { + return err + } + + req := packp.NewReferenceUpdateRequestFromCapabilities(ar.Capabilities) + if err := r.addReferencesToUpdate(o.RefSpecs, remoteRefs, req); err != nil { + return err + } + + if len(req.Commands) == 0 { + return NoErrAlreadyUpToDate + } + + objects, err := objectsToPush(req.Commands) + if err != nil { + return err + } + + haves, err := referencesToHashes(remoteRefs) + if err != nil { + return err + } + + hashesToPush, err := revlist.Objects(r.s, objects, haves) + if err != nil { + return err + } + + rs, err := pushHashes(s, r.s, req, hashesToPush) + if err != nil { + return err + } + + return rs.Error() +} + +func (r *Remote) fetch(o *FetchOptions) (refs storer.ReferenceStorer, err error) { + if o.RemoteName == "" { + o.RemoteName = r.c.Name + } + + if err := o.Validate(); err != nil { + return nil, err + } + + if len(o.RefSpecs) == 0 { + o.RefSpecs = r.c.Fetch + } + + s, err := newUploadPackSession(r.c.URL, o.Auth) + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(s, &err) + + ar, err := s.AdvertisedReferences() + if err != nil { + return nil, err + } + + req, err := r.newUploadPackRequest(o, ar) + if err != nil { + return nil, err + } + + remoteRefs, err := ar.AllReferences() + if err != nil { + return nil, err + } + + req.Wants, err = getWants(o.RefSpecs, r.s, remoteRefs) + if len(req.Wants) == 0 { + return remoteRefs, NoErrAlreadyUpToDate + } + + req.Haves, err = getHaves(r.s) + if err != nil { + return nil, err + } + + if err := r.fetchPack(o, s, req); err != nil { + return nil, err + } + + if err := r.updateLocalReferenceStorage(o.RefSpecs, remoteRefs); err != nil { + return nil, err + } + + return remoteRefs, err +} + +func newUploadPackSession(url string, auth transport.AuthMethod) (transport.UploadPackSession, error) { + c, ep, err := newClient(url) + if err != nil { + return nil, err + } + + return c.NewUploadPackSession(ep, auth) +} + +func newSendPackSession(url string, auth transport.AuthMethod) (transport.ReceivePackSession, error) { + c, ep, err := newClient(url) + if err != nil { + return nil, err + } + + return c.NewReceivePackSession(ep, auth) +} + +func newClient(url string) (transport.Transport, transport.Endpoint, error) { + ep, err := transport.NewEndpoint(url) + if err != nil { + return nil, transport.Endpoint{}, err + } + + c, err := client.NewClient(ep) + if err != nil { + return nil, transport.Endpoint{}, err + } + + return c, ep, err +} + +func (r *Remote) fetchPack(o *FetchOptions, s transport.UploadPackSession, + req *packp.UploadPackRequest) (err error) { + + reader, err := s.UploadPack(req) + if err != nil { + return err + } + + defer ioutil.CheckClose(reader, &err) + + if err := r.updateShallow(o, reader); err != nil { + return err + } + + if err = packfile.UpdateObjectStorage(r.s, + buildSidebandIfSupported(req.Capabilities, reader, o.Progress), + ); err != nil { + return err + } + + return err +} + +func (r *Remote) addReferencesToUpdate(refspecs []config.RefSpec, + remoteRefs storer.ReferenceStorer, + req *packp.ReferenceUpdateRequest) error { + + for _, rs := range refspecs { + iter, err := r.s.IterReferences() + if err != nil { + return err + } + + err = iter.ForEach(func(ref *plumbing.Reference) error { + return r.addReferenceIfRefSpecMatches( + rs, remoteRefs, ref, req, + ) + }) + if err != nil { + return err + } + } + + return nil +} + +func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec, + remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference, + req *packp.ReferenceUpdateRequest) error { + + if localRef.Type() != plumbing.HashReference { + return nil + } + + if !rs.Match(localRef.Name()) { + return nil + } + + dstName := rs.Dst(localRef.Name()) + oldHash := plumbing.ZeroHash + newHash := localRef.Hash() + + iter, err := remoteRefs.IterReferences() + if err != nil { + return err + } + + err = iter.ForEach(func(remoteRef *plumbing.Reference) error { + if remoteRef.Type() != plumbing.HashReference { + return nil + } + + if dstName != remoteRef.Name() { + return nil + } + + oldHash = remoteRef.Hash() + return nil + }) + + if oldHash == newHash { + return nil + } + + req.Commands = append(req.Commands, &packp.Command{ + Name: dstName, + Old: oldHash, + New: newHash, + }) + return nil +} + +func getHaves(localRefs storer.ReferenceStorer) ([]plumbing.Hash, error) { + iter, err := localRefs.IterReferences() + if err != nil { + return nil, err + } + + var haves []plumbing.Hash + err = iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + haves = append(haves, ref.Hash()) + return nil + }) + if err != nil { + return nil, err + } + + return haves, nil +} + +func getWants( + spec []config.RefSpec, localStorer storage.Storer, remoteRefs storer.ReferenceStorer, +) ([]plumbing.Hash, error) { + wantTags := true + for _, s := range spec { + if !s.IsWildcard() { + wantTags = false + break + } + } + + iter, err := remoteRefs.IterReferences() + if err != nil { + return nil, err + } + + wants := map[plumbing.Hash]bool{} + err = iter.ForEach(func(ref *plumbing.Reference) error { + if !config.MatchAny(spec, ref.Name()) { + if !ref.IsTag() || !wantTags { + return nil + } + } + + if ref.Type() == plumbing.SymbolicReference { + ref, err = storer.ResolveReference(remoteRefs, ref.Name()) + if err != nil { + return err + } + } + + if ref.Type() != plumbing.HashReference { + return nil + } + + hash := ref.Hash() + + exists, err := objectExists(localStorer, hash) + if err != nil { + return err + } + + if !exists { + wants[hash] = true + } + + return nil + }) + if err != nil { + return nil, err + } + + var result []plumbing.Hash + for h := range wants { + result = append(result, h) + } + + return result, nil +} + +func objectExists(s storer.EncodedObjectStorer, h plumbing.Hash) (bool, error) { + _, err := s.EncodedObject(plumbing.AnyObject, h) + if err == plumbing.ErrObjectNotFound { + return false, nil + } + + return true, err +} + +func (r *Remote) newUploadPackRequest(o *FetchOptions, + ar *packp.AdvRefs) (*packp.UploadPackRequest, error) { + + req := packp.NewUploadPackRequestFromCapabilities(ar.Capabilities) + + if o.Depth != 0 { + req.Depth = packp.DepthCommits(o.Depth) + if err := req.Capabilities.Set(capability.Shallow); err != nil { + return nil, err + } + } + + if o.Progress == nil && ar.Capabilities.Supports(capability.NoProgress) { + if err := req.Capabilities.Set(capability.NoProgress); err != nil { + return nil, err + } + } + + return req, nil +} + +func buildSidebandIfSupported(l *capability.List, reader io.Reader, p sideband.Progress) io.Reader { + var t sideband.Type + + switch { + case l.Supports(capability.Sideband): + t = sideband.Sideband + case l.Supports(capability.Sideband64k): + t = sideband.Sideband64k + default: + return reader + } + + d := sideband.NewDemuxer(t, reader) + d.Progress = p + + return d +} + +func (r *Remote) updateLocalReferenceStorage(specs []config.RefSpec, refs memory.ReferenceStorage) error { + for _, spec := range specs { + for _, ref := range refs { + if !spec.Match(ref.Name()) { + continue + } + + if ref.Type() != plumbing.HashReference { + continue + } + + name := spec.Dst(ref.Name()) + n := plumbing.NewHashReference(name, ref.Hash()) + if err := r.s.SetReference(n); err != nil { + return err + } + } + } + + return r.buildFetchedTags(refs) +} + +func (r *Remote) buildFetchedTags(refs storer.ReferenceStorer) error { + iter, err := refs.IterReferences() + if err != nil { + return err + } + + return iter.ForEach(func(ref *plumbing.Reference) error { + if !ref.IsTag() { + return nil + } + + _, err := r.s.EncodedObject(plumbing.AnyObject, ref.Hash()) + if err == plumbing.ErrObjectNotFound { + return nil + } + + if err != nil { + return err + } + + return r.s.SetReference(ref) + }) +} + +func objectsToPush(commands []*packp.Command) ([]plumbing.Hash, error) { + var objects []plumbing.Hash + for _, cmd := range commands { + if cmd.New == plumbing.ZeroHash { + continue + } + + objects = append(objects, cmd.New) + } + + return objects, nil +} + +func referencesToHashes(refs storer.ReferenceStorer) ([]plumbing.Hash, error) { + iter, err := refs.IterReferences() + if err != nil { + return nil, err + } + + var hs []plumbing.Hash + err = iter.ForEach(func(ref *plumbing.Reference) error { + if ref.Type() != plumbing.HashReference { + return nil + } + + hs = append(hs, ref.Hash()) + return nil + }) + if err != nil { + return nil, err + } + + return hs, nil +} + +func pushHashes(sess transport.ReceivePackSession, sto storer.EncodedObjectStorer, + req *packp.ReferenceUpdateRequest, hs []plumbing.Hash) (*packp.ReportStatus, error) { + + rd, wr := io.Pipe() + req.Packfile = rd + done := make(chan error) + go func() { + e := packfile.NewEncoder(wr, sto, false) + if _, err := e.Encode(hs); err != nil { + done <- wr.CloseWithError(err) + return + } + + done <- wr.Close() + }() + + rs, err := sess.ReceivePack(req) + if err != nil { + return nil, err + } + + if err := <-done; err != nil { + return nil, err + } + + return rs, nil +} + +func (r *Remote) updateShallow(o *FetchOptions, resp *packp.UploadPackResponse) error { + if o.Depth == 0 { + return nil + } + + return r.s.SetShallow(resp.Shallows) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/repository.go b/vendor/gopkg.in/src-d/go-git.v4/repository.go new file mode 100644 index 0000000000000000000000000000000000000000..9e325e4d44e0b4e0d6594e23f12ddb01449da6f9 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/repository.go @@ -0,0 +1,898 @@ +package git + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/internal/revision" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/object" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage" + "gopkg.in/src-d/go-git.v4/storage/filesystem" + + "gopkg.in/src-d/go-billy.v2" + "gopkg.in/src-d/go-billy.v2/osfs" +) + +var ( + ErrObjectNotFound = errors.New("object not found") + ErrInvalidReference = errors.New("invalid reference, should be a tag or a branch") + ErrRepositoryNotExists = errors.New("repository not exists") + ErrRepositoryAlreadyExists = errors.New("repository already exists") + ErrRemoteNotFound = errors.New("remote not found") + ErrRemoteExists = errors.New("remote already exists") + ErrWorktreeNotProvided = errors.New("worktree should be provided") + ErrIsBareRepository = errors.New("worktree not available in a bare repository") +) + +// Repository represents a git repository +type Repository struct { + Storer storage.Storer + + r map[string]*Remote + wt billy.Filesystem +} + +// Init creates an empty git repository, based on the given Storer and worktree. +// The worktree Filesystem is optional, if nil a bare repository is created. If +// the given storer is not empty ErrRepositoryAlreadyExists is returned +func Init(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { + r := newRepository(s, worktree) + _, err := r.Reference(plumbing.HEAD, false) + switch err { + case plumbing.ErrReferenceNotFound: + case nil: + return nil, ErrRepositoryAlreadyExists + default: + return nil, err + } + + h := plumbing.NewSymbolicReference(plumbing.HEAD, plumbing.Master) + if err := s.SetReference(h); err != nil { + return nil, err + } + + if worktree == nil { + r.setIsBare(true) + return r, nil + } + + return r, setWorktreeAndStoragePaths(r, worktree) +} + +func setWorktreeAndStoragePaths(r *Repository, worktree billy.Filesystem) error { + type fsBased interface { + Filesystem() billy.Filesystem + } + + // .git file is only created if the storage is file based and the file + // system is osfs.OS + fs, isFSBased := r.Storer.(fsBased) + if !isFSBased { + return nil + } + + _, isOS := fs.Filesystem().(*osfs.OS) + if !isOS { + return nil + } + + if err := createDotGitFile(worktree, fs.Filesystem()); err != nil { + return err + } + + return setConfigWorktree(r, worktree, fs.Filesystem()) +} + +func createDotGitFile(worktree, storage billy.Filesystem) error { + path, err := filepath.Rel(worktree.Base(), storage.Base()) + if err != nil { + path = storage.Base() + } + + if path == ".git" { + // not needed, since the folder is the default place + return nil + } + + f, err := worktree.Create(".git") + if err != nil { + return err + } + + defer f.Close() + _, err = fmt.Fprintf(f, "gitdir: %s\n", path) + return err +} + +func setConfigWorktree(r *Repository, worktree, storage billy.Filesystem) error { + path, err := filepath.Rel(storage.Base(), worktree.Base()) + if err != nil { + path = worktree.Base() + } + + if path == ".." { + // not needed, since the folder is the default place + return nil + } + + cfg, err := r.Storer.Config() + if err != nil { + return err + } + + cfg.Core.Worktree = path + return r.Storer.SetConfig(cfg) +} + +// Open opens a git repository using the given Storer and worktree filesystem, +// if the given storer is complete empty ErrRepositoryNotExists is returned. +// The worktree can be nil when the repository being opened is bare, if the +// repository is a normal one (not bare) and worktree is nil the err +// ErrWorktreeNotProvided is returned +func Open(s storage.Storer, worktree billy.Filesystem) (*Repository, error) { + _, err := s.Reference(plumbing.HEAD) + if err == plumbing.ErrReferenceNotFound { + return nil, ErrRepositoryNotExists + } + + if err != nil { + return nil, err + } + + cfg, err := s.Config() + if err != nil { + return nil, err + } + + if !cfg.Core.IsBare && worktree == nil { + return nil, ErrWorktreeNotProvided + } + + return newRepository(s, worktree), nil +} + +// Clone a repository into the given Storer and worktree Filesystem with the +// given options, if worktree is nil a bare repository is created. If the given +// storer is not empty ErrRepositoryAlreadyExists is returned +func Clone(s storage.Storer, worktree billy.Filesystem, o *CloneOptions) (*Repository, error) { + r, err := Init(s, worktree) + if err != nil { + return nil, err + } + + return r, r.clone(o) +} + +// PlainInit create an empty git repository at the given path. isBare defines +// if the repository will have worktree (non-bare) or not (bare), if the path +// is not empty ErrRepositoryAlreadyExists is returned +func PlainInit(path string, isBare bool) (*Repository, error) { + var wt, dot billy.Filesystem + + if isBare { + dot = osfs.New(path) + } else { + wt = osfs.New(path) + dot = wt.Dir(".git") + } + + s, err := filesystem.NewStorage(dot) + if err != nil { + return nil, err + } + + return Init(s, wt) +} + +// PlainOpen opens a git repository from the given path. It detects if the +// repository is bare or a normal one. If the path doesn't contain a valid +// repository ErrRepositoryNotExists is returned +func PlainOpen(path string) (*Repository, error) { + var wt, dot billy.Filesystem + + fs := osfs.New(path) + if _, err := fs.Stat(".git"); err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + dot = fs + } else { + wt = fs + dot = fs.Dir(".git") + } + + s, err := filesystem.NewStorage(dot) + if err != nil { + return nil, err + } + + return Open(s, wt) +} + +// PlainClone a repository into the path with the given options, isBare defines +// if the new repository will be bare or normal. If the path is not empty +// ErrRepositoryAlreadyExists is returned +func PlainClone(path string, isBare bool, o *CloneOptions) (*Repository, error) { + r, err := PlainInit(path, isBare) + if err != nil { + return nil, err + } + + return r, r.clone(o) +} + +func newRepository(s storage.Storer, worktree billy.Filesystem) *Repository { + return &Repository{ + Storer: s, + wt: worktree, + r: make(map[string]*Remote, 0), + } +} + +// Config return the repository config +func (r *Repository) Config() (*config.Config, error) { + return r.Storer.Config() +} + +// Remote return a remote if exists +func (r *Repository) Remote(name string) (*Remote, error) { + cfg, err := r.Storer.Config() + if err != nil { + return nil, err + } + + c, ok := cfg.Remotes[name] + if !ok { + return nil, ErrRemoteNotFound + } + + return newRemote(r.Storer, c), nil +} + +// Remotes returns a list with all the remotes +func (r *Repository) Remotes() ([]*Remote, error) { + cfg, err := r.Storer.Config() + if err != nil { + return nil, err + } + + remotes := make([]*Remote, len(cfg.Remotes)) + + var i int + for _, c := range cfg.Remotes { + remotes[i] = newRemote(r.Storer, c) + i++ + } + + return remotes, nil +} + +// CreateRemote creates a new remote +func (r *Repository) CreateRemote(c *config.RemoteConfig) (*Remote, error) { + if err := c.Validate(); err != nil { + return nil, err + } + + remote := newRemote(r.Storer, c) + + cfg, err := r.Storer.Config() + if err != nil { + return nil, err + } + + if _, ok := cfg.Remotes[c.Name]; ok { + return nil, ErrRemoteExists + } + + cfg.Remotes[c.Name] = c + return remote, r.Storer.SetConfig(cfg) +} + +// DeleteRemote delete a remote from the repository and delete the config +func (r *Repository) DeleteRemote(name string) error { + cfg, err := r.Storer.Config() + if err != nil { + return err + } + + if _, ok := cfg.Remotes[name]; !ok { + return ErrRemoteNotFound + } + + delete(cfg.Remotes, name) + return r.Storer.SetConfig(cfg) +} + +// Clone clones a remote repository +func (r *Repository) clone(o *CloneOptions) error { + if err := o.Validate(); err != nil { + return err + } + + c := &config.RemoteConfig{ + Name: o.RemoteName, + URL: o.URL, + } + + remote, err := r.CreateRemote(c) + if err != nil { + return err + } + + remoteRefs, err := remote.fetch(&FetchOptions{ + RefSpecs: r.cloneRefSpec(o, c), + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + }) + if err != nil { + return err + } + + head, err := storer.ResolveReference(remoteRefs, o.ReferenceName) + if err != nil { + return err + } + + if _, err := r.updateReferences(c.Fetch, o.ReferenceName, head); err != nil { + return err + } + + if err := r.updateWorktree(); err != nil { + return err + } + + if o.RecurseSubmodules != NoRecurseSubmodules && r.wt != nil { + if err := r.updateSubmodules(o.RecurseSubmodules); err != nil { + return err + } + } + + return r.updateRemoteConfig(remote, o, c, head) +} + +func (r *Repository) updateSubmodules(recursion SubmoduleRescursivity) error { + w, err := r.Worktree() + if err != nil { + return err + } + + s, err := w.Submodules() + if err != nil { + return err + } + + return s.Update(&SubmoduleUpdateOptions{ + Init: true, + RecurseSubmodules: recursion, + }) +} + +func (r *Repository) cloneRefSpec(o *CloneOptions, + c *config.RemoteConfig) []config.RefSpec { + + if !o.SingleBranch { + return c.Fetch + } + + var rs string + + if o.ReferenceName == plumbing.HEAD { + rs = fmt.Sprintf(refspecSingleBranchHEAD, c.Name) + } else { + rs = fmt.Sprintf(refspecSingleBranch, + o.ReferenceName.Short(), c.Name) + } + + return []config.RefSpec{config.RefSpec(rs)} +} + +func (r *Repository) setIsBare(isBare bool) error { + cfg, err := r.Storer.Config() + if err != nil { + return err + } + + cfg.Core.IsBare = isBare + return r.Storer.SetConfig(cfg) +} + +const ( + refspecSingleBranch = "+refs/heads/%s:refs/remotes/%s/%[1]s" + refspecSingleBranchHEAD = "+HEAD:refs/remotes/%s/HEAD" +) + +func (r *Repository) updateRemoteConfig(remote *Remote, o *CloneOptions, + c *config.RemoteConfig, head *plumbing.Reference) error { + + if !o.SingleBranch { + return nil + } + + c.Fetch = []config.RefSpec{config.RefSpec(fmt.Sprintf( + refspecSingleBranch, head.Name().Short(), c.Name, + ))} + + cfg, err := r.Storer.Config() + if err != nil { + return err + } + + cfg.Remotes[c.Name] = c + return r.Storer.SetConfig(cfg) +} + +func (r *Repository) updateReferences(spec []config.RefSpec, + headName plumbing.ReferenceName, resolvedHead *plumbing.Reference) (updated bool, err error) { + + if !resolvedHead.IsBranch() { + // Detached HEAD mode + head := plumbing.NewHashReference(plumbing.HEAD, resolvedHead.Hash()) + return updateReferenceStorerIfNeeded(r.Storer, head) + } + + refs := []*plumbing.Reference{ + // Create local reference for the resolved head + resolvedHead, + // Create local symbolic HEAD + plumbing.NewSymbolicReference(plumbing.HEAD, resolvedHead.Name()), + } + + refs = append(refs, r.calculateRemoteHeadReference(spec, resolvedHead)...) + + for _, ref := range refs { + u, err := updateReferenceStorerIfNeeded(r.Storer, ref) + if err != nil { + return updated, err + } + + if u { + updated = true + } + } + + return +} + +func (r *Repository) calculateRemoteHeadReference(spec []config.RefSpec, + resolvedHead *plumbing.Reference) []*plumbing.Reference { + + var refs []*plumbing.Reference + + // Create resolved HEAD reference with remote prefix if it does not + // exist. This is needed when using single branch and HEAD. + for _, rs := range spec { + name := resolvedHead.Name() + if !rs.Match(name) { + continue + } + + name = rs.Dst(name) + _, err := r.Storer.Reference(name) + if err == plumbing.ErrReferenceNotFound { + refs = append(refs, plumbing.NewHashReference(name, resolvedHead.Hash())) + } + } + + return refs +} + +func updateReferenceStorerIfNeeded( + s storer.ReferenceStorer, r *plumbing.Reference) (updated bool, err error) { + + p, err := s.Reference(r.Name()) + if err != nil && err != plumbing.ErrReferenceNotFound { + return false, err + } + + // we use the string method to compare references, is the easiest way + if err == plumbing.ErrReferenceNotFound || r.String() != p.String() { + if err := s.SetReference(r); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +// Pull incorporates changes from a remote repository into the current branch. +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +func (r *Repository) Pull(o *PullOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return err + } + + remoteRefs, err := remote.fetch(&FetchOptions{ + Depth: o.Depth, + Auth: o.Auth, + Progress: o.Progress, + }) + + updated := true + if err == NoErrAlreadyUpToDate { + updated = false + } else if err != nil { + return err + } + + head, err := storer.ResolveReference(remoteRefs, o.ReferenceName) + if err != nil { + return err + } + + refsUpdated, err := r.updateReferences(remote.c.Fetch, o.ReferenceName, head) + if err != nil { + return err + } + + if refsUpdated { + updated = refsUpdated + } + + if !updated { + return NoErrAlreadyUpToDate + } + + if err := r.updateWorktree(); err != nil { + return err + } + + if o.RecurseSubmodules != NoRecurseSubmodules && r.wt != nil { + if err := r.updateSubmodules(o.RecurseSubmodules); err != nil { + return err + } + } + + return nil +} + +func (r *Repository) updateWorktree() error { + if r.wt == nil { + return nil + } + + w, err := r.Worktree() + if err != nil { + return err + } + + h, err := r.Head() + if err != nil { + return err + } + + return w.Checkout(h.Hash()) +} + +// Fetch fetches changes from a remote repository. +// Returns nil if the operation is successful, NoErrAlreadyUpToDate if there are +// no changes to be fetched, or an error. +func (r *Repository) Fetch(o *FetchOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return err + } + + return remote.Fetch(o) +} + +// Push pushes changes to a remote. +func (r *Repository) Push(o *PushOptions) error { + if err := o.Validate(); err != nil { + return err + } + + remote, err := r.Remote(o.RemoteName) + if err != nil { + return err + } + + return remote.Push(o) +} + +// Tags returns all the References from Tags. This method returns all the tag +// types, lightweight, and annotated ones. +func (r *Repository) Tags() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.IsTag() + }, refIter), nil +} + +// Branches returns all the References that are Branches. +func (r *Repository) Branches() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.IsBranch() + }, refIter), nil +} + +// Notes returns all the References that are Branches. +func (r *Repository) Notes() (storer.ReferenceIter, error) { + refIter, err := r.Storer.IterReferences() + if err != nil { + return nil, err + } + + return storer.NewReferenceFilteredIter( + func(r *plumbing.Reference) bool { + return r.IsNote() + }, refIter), nil +} + +// TreeObject return a Tree with the given hash. If not found +// plumbing.ErrObjectNotFound is returned +func (r *Repository) TreeObject(h plumbing.Hash) (*object.Tree, error) { + return object.GetTree(r.Storer, h) +} + +// TreeObjects returns an unsorted TreeIter with all the trees in the repository +func (r *Repository) TreeObjects() (*object.TreeIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.TreeObject) + if err != nil { + return nil, err + } + + return object.NewTreeIter(r.Storer, iter), nil +} + +// CommitObject return a Commit with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) CommitObject(h plumbing.Hash) (*object.Commit, error) { + return object.GetCommit(r.Storer, h) +} + +// CommitObjects returns an unsorted CommitIter with all the commits in the repository. +func (r *Repository) CommitObjects() (*object.CommitIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.CommitObject) + if err != nil { + return nil, err + } + + return object.NewCommitIter(r.Storer, iter), nil +} + +// BlobObject returns a Blob with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) BlobObject(h plumbing.Hash) (*object.Blob, error) { + return object.GetBlob(r.Storer, h) +} + +// BlobObjects returns an unsorted BlobIter with all the blobs in the repository. +func (r *Repository) BlobObjects() (*object.BlobIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.BlobObject) + if err != nil { + return nil, err + } + + return object.NewBlobIter(r.Storer, iter), nil +} + +// TagObject returns a Tag with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. This method only returns +// annotated Tags, no lightweight Tags. +func (r *Repository) TagObject(h plumbing.Hash) (*object.Tag, error) { + return object.GetTag(r.Storer, h) +} + +// TagObjects returns a unsorted TagIter that can step through all of the annotated +// tags in the repository. +func (r *Repository) TagObjects() (*object.TagIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.TagObject) + if err != nil { + return nil, err + } + + return object.NewTagIter(r.Storer, iter), nil +} + +// Object returns an Object with the given hash. If not found +// plumbing.ErrObjectNotFound is returned. +func (r *Repository) Object(t plumbing.ObjectType, h plumbing.Hash) (object.Object, error) { + obj, err := r.Storer.EncodedObject(t, h) + if err != nil { + if err == plumbing.ErrObjectNotFound { + return nil, ErrObjectNotFound + } + + return nil, err + } + + return object.DecodeObject(r.Storer, obj) +} + +// Objects returns an unsorted ObjectIter with all the objects in the repository. +func (r *Repository) Objects() (*object.ObjectIter, error) { + iter, err := r.Storer.IterEncodedObjects(plumbing.AnyObject) + if err != nil { + return nil, err + } + + return object.NewObjectIter(r.Storer, iter), nil +} + +// Head returns the reference where HEAD is pointing to. +func (r *Repository) Head() (*plumbing.Reference, error) { + return storer.ResolveReference(r.Storer, plumbing.HEAD) +} + +// Reference returns the reference for a given reference name. If resolved is +// true, any symbolic reference will be resolved. +func (r *Repository) Reference(name plumbing.ReferenceName, resolved bool) ( + *plumbing.Reference, error) { + + if resolved { + return storer.ResolveReference(r.Storer, name) + } + + return r.Storer.Reference(name) +} + +// References returns an unsorted ReferenceIter for all references. +func (r *Repository) References() (storer.ReferenceIter, error) { + return r.Storer.IterReferences() +} + +// Worktree returns a worktree based on the given fs, if nil the default +// worktree will be used. +func (r *Repository) Worktree() (*Worktree, error) { + if r.wt == nil { + return nil, ErrIsBareRepository + } + + return &Worktree{r: r, fs: r.wt}, nil +} + +// ResolveRevision resolves revision to corresponding hash. +func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, error) { + p := revision.NewParserFromString(string(rev)) + + items, err := p.Parse() + + if err != nil { + return nil, err + } + + var commit *object.Commit + + for _, item := range items { + switch item.(type) { + case revision.Ref: + ref, err := storer.ResolveReference(r.Storer, plumbing.ReferenceName(item.(revision.Ref))) + + if err != nil { + return &plumbing.ZeroHash, err + } + + h := ref.Hash() + + commit, err = r.CommitObject(h) + + if err != nil { + return &plumbing.ZeroHash, err + } + case revision.CaretPath: + depth := item.(revision.CaretPath).Depth + + if depth == 0 { + break + } + + iter := commit.Parents() + + c, err := iter.Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + if depth == 1 { + commit = c + + break + } + + c, err = iter.Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + commit = c + case revision.TildePath: + for i := 0; i < item.(revision.TildePath).Depth; i++ { + c, err := commit.Parents().Next() + + if err != nil { + return &plumbing.ZeroHash, err + } + + commit = c + } + case revision.CaretReg: + history, err := commit.History() + + if err != nil { + return &plumbing.ZeroHash, err + } + + re := item.(revision.CaretReg).Regexp + negate := item.(revision.CaretReg).Negate + + var c *object.Commit + + for i := 0; i < len(history); i++ { + if !negate && re.MatchString(history[i].Message) { + c = history[i] + + break + } + + if negate && !re.MatchString(history[i].Message) { + c = history[i] + + break + } + } + + if c == nil { + return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String()) + } + + commit = c + case revision.AtDate: + history, err := commit.History() + + if err != nil { + return &plumbing.ZeroHash, err + } + + date := item.(revision.AtDate).Date + var c *object.Commit + + for i := 0; i < len(history); i++ { + if date.Equal(history[i].Committer.When.UTC()) || history[i].Committer.When.UTC().Before(date) { + c = history[i] + + break + } + } + + if c == nil { + return &plumbing.ZeroHash, fmt.Errorf(`No commit exists prior to date "%s"`, date.String()) + } + + commit = c + } + } + + return &commit.Hash, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go new file mode 100644 index 0000000000000000000000000000000000000000..cad698a4a41595240012292fb535f15c767cd938 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go @@ -0,0 +1,60 @@ +package filesystem + +import ( + "io/ioutil" + "os" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" +) + +type ConfigStorage struct { + dir *dotgit.DotGit +} + +func (c *ConfigStorage) Config() (*config.Config, error) { + cfg := config.NewConfig() + + f, err := c.dir.Config() + if err != nil { + if os.IsNotExist(err) { + return cfg, nil + } + + return nil, err + } + + defer f.Close() + + b, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if err := cfg.Unmarshal(b); err != nil { + return nil, err + } + + return cfg, nil +} + +func (c *ConfigStorage) SetConfig(cfg *config.Config) error { + if err := cfg.Validate(); err != nil { + return err + } + + f, err := c.dir.ConfigWriter() + if err != nil { + return err + } + + defer f.Close() + + b, err := cfg.Marshal() + if err != nil { + return err + } + + _, err = f.Write(b) + return err +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go new file mode 100644 index 0000000000000000000000000000000000000000..456ef0b89814acc8d498b66e543285b177598e94 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go @@ -0,0 +1,44 @@ +package filesystem + +import ( + "os" + + "gopkg.in/src-d/go-git.v4/plumbing/format/index" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" +) + +type IndexStorage struct { + dir *dotgit.DotGit +} + +func (s *IndexStorage) SetIndex(idx *index.Index) error { + f, err := s.dir.IndexWriter() + if err != nil { + return err + } + + defer f.Close() + + e := index.NewEncoder(f) + return e.Encode(idx) +} + +func (s *IndexStorage) Index() (*index.Index, error) { + idx := &index.Index{ + Version: 2, + } + + f, err := s.dir.Index() + if err != nil { + if os.IsNotExist(err) { + return idx, nil + } + + return nil, err + } + + defer f.Close() + + d := index.NewDecoder(f) + return idx, d.Decode(idx) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/dotgit.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/dotgit.go new file mode 100644 index 0000000000000000000000000000000000000000..fdfcea73552e1a49d81f767dff84819db34b921f --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/dotgit.go @@ -0,0 +1,483 @@ +// https://github.com/git/git/blob/master/Documentation/gitrepository-layout.txt +package dotgit + +import ( + "bufio" + "errors" + "fmt" + stdioutil "io/ioutil" + "os" + "strings" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/utils/ioutil" + + "gopkg.in/src-d/go-billy.v2" +) + +const ( + suffix = ".git" + packedRefsPath = "packed-refs" + configPath = "config" + indexPath = "index" + shallowPath = "shallow" + modulePath = "module" + objectsPath = "objects" + packPath = "pack" + refsPath = "refs" + + tmpPackedRefsPrefix = "._packed-refs" + + packExt = ".pack" + idxExt = ".idx" +) + +var ( + // ErrNotFound is returned by New when the path is not found. + ErrNotFound = errors.New("path not found") + // ErrIdxNotFound is returned by Idxfile when the idx file is not found + ErrIdxNotFound = errors.New("idx file not found") + // ErrPackfileNotFound is returned by Packfile when the packfile is not found + ErrPackfileNotFound = errors.New("packfile not found") + // ErrConfigNotFound is returned by Config when the config is not found + ErrConfigNotFound = errors.New("config file not found") + // ErrPackedRefsDuplicatedRef is returned when a duplicated reference is + // found in the packed-ref file. This is usually the case for corrupted git + // repositories. + ErrPackedRefsDuplicatedRef = errors.New("duplicated ref found in packed-ref file") + // ErrPackedRefsBadFormat is returned when the packed-ref file corrupt. + ErrPackedRefsBadFormat = errors.New("malformed packed-ref") + // ErrSymRefTargetNotFound is returned when a symbolic reference is + // targeting a non-existing object. This usually means the repository + // is corrupt. + ErrSymRefTargetNotFound = errors.New("symbolic reference target not found") +) + +// The DotGit type represents a local git repository on disk. This +// type is not zero-value-safe, use the New function to initialize it. +type DotGit struct { + fs billy.Filesystem +} + +// New returns a DotGit value ready to be used. The path argument must +// be the absolute path of a git repository directory (e.g. +// "/foo/bar/.git"). +func New(fs billy.Filesystem) *DotGit { + return &DotGit{fs: fs} +} + +// ConfigWriter returns a file pointer for write to the config file +func (d *DotGit) ConfigWriter() (billy.File, error) { + return d.fs.Create(configPath) +} + +// Config returns a file pointer for read to the config file +func (d *DotGit) Config() (billy.File, error) { + return d.fs.Open(configPath) +} + +// IndexWriter returns a file pointer for write to the index file +func (d *DotGit) IndexWriter() (billy.File, error) { + return d.fs.Create(indexPath) +} + +// Index returns a file pointer for read to the index file +func (d *DotGit) Index() (billy.File, error) { + return d.fs.Open(indexPath) +} + +// ShallowWriter returns a file pointer for write to the shallow file +func (d *DotGit) ShallowWriter() (billy.File, error) { + return d.fs.Create(shallowPath) +} + +// Shallow returns a file pointer for read to the shallow file +func (d *DotGit) Shallow() (billy.File, error) { + f, err := d.fs.Open(shallowPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + return f, nil +} + +// NewObjectPack return a writer for a new packfile, it saves the packfile to +// disk and also generates and save the index for the given packfile. +func (d *DotGit) NewObjectPack() (*PackWriter, error) { + return newPackWrite(d.fs) +} + +// ObjectPacks returns the list of availables packfiles +func (d *DotGit) ObjectPacks() ([]plumbing.Hash, error) { + packDir := d.fs.Join(objectsPath, packPath) + files, err := d.fs.ReadDir(packDir) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var packs []plumbing.Hash + for _, f := range files { + if !strings.HasSuffix(f.Name(), packExt) { + continue + } + + n := f.Name() + h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack + packs = append(packs, h) + + } + + return packs, nil +} + +// ObjectPack returns a fs.File of the given packfile +func (d *DotGit) ObjectPack(hash plumbing.Hash) (billy.File, error) { + file := d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.pack", hash.String())) + + pack, err := d.fs.Open(file) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrPackfileNotFound + } + + return nil, err + } + + return pack, nil +} + +// ObjectPackIdx returns a fs.File of the index file for a given packfile +func (d *DotGit) ObjectPackIdx(hash plumbing.Hash) (billy.File, error) { + file := d.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s.idx", hash.String())) + idx, err := d.fs.Open(file) + if err != nil { + if os.IsNotExist(err) { + return nil, ErrPackfileNotFound + } + + return nil, err + } + + return idx, nil +} + +// NewObject return a writer for a new object file. +func (d *DotGit) NewObject() (*ObjectWriter, error) { + return newObjectWriter(d.fs) +} + +// Objects returns a slice with the hashes of objects found under the +// .git/objects/ directory. +func (d *DotGit) Objects() ([]plumbing.Hash, error) { + files, err := d.fs.ReadDir(objectsPath) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + var objects []plumbing.Hash + for _, f := range files { + if f.IsDir() && len(f.Name()) == 2 && isHex(f.Name()) { + base := f.Name() + d, err := d.fs.ReadDir(d.fs.Join(objectsPath, base)) + if err != nil { + return nil, err + } + + for _, o := range d { + objects = append(objects, plumbing.NewHash(base+o.Name())) + } + } + } + + return objects, nil +} + +// Object return a fs.File poiting the object file, if exists +func (d *DotGit) Object(h plumbing.Hash) (billy.File, error) { + hash := h.String() + file := d.fs.Join(objectsPath, hash[0:2], hash[2:40]) + + return d.fs.Open(file) +} + +func (d *DotGit) SetRef(r *plumbing.Reference) error { + var content string + switch r.Type() { + case plumbing.SymbolicReference: + content = fmt.Sprintf("ref: %s\n", r.Target()) + case plumbing.HashReference: + content = fmt.Sprintln(r.Hash().String()) + } + + f, err := d.fs.Create(r.Name().String()) + if err != nil { + return err + } + + if _, err := f.Write([]byte(content)); err != nil { + return err + } + return f.Close() +} + +// Refs scans the git directory collecting references, which it returns. +// Symbolic references are resolved and included in the output. +func (d *DotGit) Refs() ([]*plumbing.Reference, error) { + var refs []*plumbing.Reference + if err := d.addRefsFromPackedRefs(&refs); err != nil { + return nil, err + } + + if err := d.addRefsFromRefDir(&refs); err != nil { + return nil, err + } + + if err := d.addRefFromHEAD(&refs); err != nil { + return nil, err + } + + return refs, nil +} + +// Ref returns the reference for a given reference name. +func (d *DotGit) Ref(name plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, err := d.readReferenceFile(".", name.String()) + if err == nil { + return ref, nil + } + + refs, err := d.Refs() + if err != nil { + return nil, err + } + + for _, ref := range refs { + if ref.Name() == name { + return ref, nil + } + } + + return nil, plumbing.ErrReferenceNotFound +} + +// RemoveRef removes a reference by name. +func (d *DotGit) RemoveRef(name plumbing.ReferenceName) error { + path := d.fs.Join(".", name.String()) + _, err := d.fs.Stat(path) + if err == nil { + return d.fs.Remove(path) + } + + if err != nil && !os.IsNotExist(err) { + return err + } + + return d.rewritePackedRefsWithoutRef(name) +} + +func (d *DotGit) addRefsFromPackedRefs(refs *[]*plumbing.Reference) (err error) { + f, err := d.fs.Open(packedRefsPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer ioutil.CheckClose(f, &err) + + s := bufio.NewScanner(f) + for s.Scan() { + ref, err := d.processLine(s.Text()) + if err != nil { + return err + } + + if ref != nil { + *refs = append(*refs, ref) + } + } + + return s.Err() +} + +func (d *DotGit) rewritePackedRefsWithoutRef(name plumbing.ReferenceName) (err error) { + f, err := d.fs.Open(packedRefsPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + defer ioutil.CheckClose(f, &err) + + // Creating the temp file in the same directory as the target file + // improves our chances for rename operation to be atomic. + tmp, err := d.fs.TempFile("", tmpPackedRefsPrefix) + if err != nil { + return err + } + + tmpPath := tmp.Filename() + defer ioutil.CheckClose(tmp, &err) + defer d.fs.Remove(tmpPath) + + s := bufio.NewScanner(f) + found := false + for s.Scan() { + line := s.Text() + ref, err := d.processLine(line) + if err != nil { + return err + } + + if ref != nil && ref.Name() == name { + found = true + continue + } + + if _, err := fmt.Fprintln(tmp, line); err != nil { + return err + } + } + + if err := s.Err(); err != nil { + return err + } + + if !found { + return nil + } + + return d.fs.Rename(tmpPath, packedRefsPath) +} + +// process lines from a packed-refs file +func (d *DotGit) processLine(line string) (*plumbing.Reference, error) { + if len(line) == 0 { + return nil, nil + } + + switch line[0] { + case '#': // comment - ignore + return nil, nil + case '^': // annotated tag commit of the previous line - ignore + return nil, nil + default: + ws := strings.Split(line, " ") // hash then ref + if len(ws) != 2 { + return nil, ErrPackedRefsBadFormat + } + + return plumbing.NewReferenceFromStrings(ws[1], ws[0]), nil + } +} + +func (d *DotGit) addRefsFromRefDir(refs *[]*plumbing.Reference) error { + return d.walkReferencesTree(refs, refsPath) +} + +func (d *DotGit) walkReferencesTree(refs *[]*plumbing.Reference, relPath string) error { + files, err := d.fs.ReadDir(relPath) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, f := range files { + newRelPath := d.fs.Join(relPath, f.Name()) + if f.IsDir() { + if err = d.walkReferencesTree(refs, newRelPath); err != nil { + return err + } + + continue + } + + ref, err := d.readReferenceFile(".", newRelPath) + if err != nil { + return err + } + + if ref != nil { + *refs = append(*refs, ref) + } + } + + return nil +} + +func (d *DotGit) addRefFromHEAD(refs *[]*plumbing.Reference) error { + ref, err := d.readReferenceFile(".", "HEAD") + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + *refs = append(*refs, ref) + return nil +} + +func (d *DotGit) readReferenceFile(refsPath, refFile string) (ref *plumbing.Reference, err error) { + path := d.fs.Join(refsPath, refFile) + + f, err := d.fs.Open(path) + if err != nil { + return nil, err + } + defer ioutil.CheckClose(f, &err) + + b, err := stdioutil.ReadAll(f) + if err != nil { + return nil, err + } + + line := strings.TrimSpace(string(b)) + return plumbing.NewReferenceFromStrings(refFile, line), nil +} + +// Module return a billy.Filesystem poiting to the module folder +func (d *DotGit) Module(name string) billy.Filesystem { + return d.fs.Dir(d.fs.Join(modulePath, name)) +} + +func isHex(s string) bool { + for _, b := range []byte(s) { + if isNum(b) { + continue + } + if isHexAlpha(b) { + continue + } + + return false + } + + return true +} + +func isNum(b byte) bool { + return b >= '0' && b <= '9' +} + +func isHexAlpha(b byte) bool { + return b >= 'a' && b <= 'f' || b >= 'A' && b <= 'F' +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/writers.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/writers.go new file mode 100644 index 0000000000000000000000000000000000000000..0d2747f314a4f37f4506f7a0e422262606c693d4 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit/writers.go @@ -0,0 +1,286 @@ +package dotgit + +import ( + "fmt" + "io" + "sync/atomic" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + + "gopkg.in/src-d/go-billy.v2" +) + +// PackWriter is a io.Writer that generates the packfile index simultaneously, +// a packfile.Decoder is used with a file reader to read the file being written +// this operation is synchronized with the write operations. +// The packfile is written in a temp file, when Close is called this file +// is renamed/moved (depends on the Filesystem implementation) to the final +// location, if the PackWriter is not used, nothing is written +type PackWriter struct { + Notify func(h plumbing.Hash, i idxfile.Idxfile) + + fs billy.Filesystem + fr, fw billy.File + synced *syncedReader + checksum plumbing.Hash + index idxfile.Idxfile + result chan error +} + +func newPackWrite(fs billy.Filesystem) (*PackWriter, error) { + fw, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_pack_") + if err != nil { + return nil, err + } + + fr, err := fs.Open(fw.Filename()) + if err != nil { + return nil, err + } + + writer := &PackWriter{ + fs: fs, + fw: fw, + fr: fr, + synced: newSyncedReader(fw, fr), + result: make(chan error), + } + + go writer.buildIndex() + return writer, nil +} + +func (w *PackWriter) buildIndex() { + s := packfile.NewScanner(w.synced) + d, err := packfile.NewDecoder(s, nil) + if err != nil { + w.result <- err + return + } + + checksum, err := d.Decode() + if err != nil { + w.result <- err + return + } + + w.checksum = checksum + w.index.PackfileChecksum = checksum + w.index.Version = idxfile.VersionSupported + + offsets := d.Offsets() + for h, crc := range d.CRCs() { + w.index.Add(h, uint64(offsets[h]), crc) + } + + w.result <- err +} + +// waitBuildIndex waits until buildIndex function finishes, this can terminate +// with a packfile.ErrEmptyPackfile, this means that nothing was written so we +// ignore the error +func (w *PackWriter) waitBuildIndex() error { + err := <-w.result + if err == packfile.ErrEmptyPackfile { + return nil + } + + return err +} + +func (w *PackWriter) Write(p []byte) (int, error) { + return w.synced.Write(p) +} + +// Close closes all the file descriptors and save the final packfile, if nothing +// was written, the tempfiles are deleted without writing a packfile. +func (w *PackWriter) Close() error { + defer func() { + if w.Notify != nil { + w.Notify(w.checksum, w.index) + } + + close(w.result) + }() + + if err := w.synced.Close(); err != nil { + return err + } + + if err := w.waitBuildIndex(); err != nil { + return err + } + + if err := w.fr.Close(); err != nil { + return err + } + + if err := w.fw.Close(); err != nil { + return err + } + + if len(w.index.Entries) == 0 { + return w.clean() + } + + return w.save() +} + +func (w *PackWriter) clean() error { + return w.fs.Remove(w.fw.Filename()) +} + +func (w *PackWriter) save() error { + base := w.fs.Join(objectsPath, packPath, fmt.Sprintf("pack-%s", w.checksum)) + idx, err := w.fs.Create(fmt.Sprintf("%s.idx", base)) + if err != nil { + return err + } + + if err := w.encodeIdx(idx); err != nil { + return err + } + + if err := idx.Close(); err != nil { + return err + } + + return w.fs.Rename(w.fw.Filename(), fmt.Sprintf("%s.pack", base)) +} + +func (w *PackWriter) encodeIdx(writer io.Writer) error { + e := idxfile.NewEncoder(writer) + _, err := e.Encode(&w.index) + return err +} + +type syncedReader struct { + w io.Writer + r io.ReadSeeker + + blocked, done uint32 + written, read uint64 + news chan bool +} + +func newSyncedReader(w io.Writer, r io.ReadSeeker) *syncedReader { + return &syncedReader{ + w: w, + r: r, + news: make(chan bool), + } +} + +func (s *syncedReader) Write(p []byte) (n int, err error) { + defer func() { + written := atomic.AddUint64(&s.written, uint64(n)) + read := atomic.LoadUint64(&s.read) + if written > read { + s.wake() + } + }() + + n, err = s.w.Write(p) + return +} + +func (s *syncedReader) Read(p []byte) (n int, err error) { + defer func() { atomic.AddUint64(&s.read, uint64(n)) }() + + s.sleep() + n, err = s.r.Read(p) + if err == io.EOF && !s.isDone() { + if n == 0 { + return s.Read(p) + } + + return n, nil + } + + return +} + +func (s *syncedReader) isDone() bool { + return atomic.LoadUint32(&s.done) == 1 +} + +func (s *syncedReader) isBlocked() bool { + return atomic.LoadUint32(&s.blocked) == 1 +} + +func (s *syncedReader) wake() { + if s.isBlocked() { + // fmt.Println("wake") + atomic.StoreUint32(&s.blocked, 0) + s.news <- true + } +} + +func (s *syncedReader) sleep() { + read := atomic.LoadUint64(&s.read) + written := atomic.LoadUint64(&s.written) + if read >= written { + atomic.StoreUint32(&s.blocked, 1) + // fmt.Println("sleep", read, written) + <-s.news + } + +} + +func (s *syncedReader) Seek(offset int64, whence int) (int64, error) { + if whence == io.SeekCurrent { + return s.r.Seek(offset, whence) + } + + p, err := s.r.Seek(offset, whence) + s.read = uint64(p) + + return p, err +} + +func (s *syncedReader) Close() error { + atomic.StoreUint32(&s.done, 1) + close(s.news) + return nil +} + +type ObjectWriter struct { + objfile.Writer + fs billy.Filesystem + f billy.File +} + +func newObjectWriter(fs billy.Filesystem) (*ObjectWriter, error) { + f, err := fs.TempFile(fs.Join(objectsPath, packPath), "tmp_obj_") + if err != nil { + return nil, err + } + + return &ObjectWriter{ + Writer: (*objfile.NewWriter(f)), + fs: fs, + f: f, + }, nil +} + +func (w *ObjectWriter) Close() error { + if err := w.Writer.Close(); err != nil { + return err + } + + if err := w.f.Close(); err != nil { + return err + } + + return w.save() +} + +func (w *ObjectWriter) save() error { + hash := w.Hash().String() + file := w.fs.Join(objectsPath, hash[0:2], hash[2:40]) + + return w.fs.Rename(w.f.Filename(), file) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go new file mode 100644 index 0000000000000000000000000000000000000000..2e469d375caa9f84a86e6c20b291996b9650aad9 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go @@ -0,0 +1,14 @@ +package filesystem + +import ( + "gopkg.in/src-d/go-git.v4/storage" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" +) + +type ModuleStorage struct { + dir *dotgit.DotGit +} + +func (s *ModuleStorage) Module(name string) (storage.Storer, error) { + return NewStorage(s.dir.Module(name)) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go new file mode 100644 index 0000000000000000000000000000000000000000..1518256a7bd0ae9b67a8b07be29fb01ec4345b75 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go @@ -0,0 +1,378 @@ +package filesystem + +import ( + "io" + "os" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/objfile" + "gopkg.in/src-d/go-git.v4/plumbing/format/packfile" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + "gopkg.in/src-d/go-git.v4/storage/memory" + + "gopkg.in/src-d/go-billy.v2" +) + +type ObjectStorage struct { + dir *dotgit.DotGit + index map[plumbing.Hash]idx +} + +func newObjectStorage(dir *dotgit.DotGit) (ObjectStorage, error) { + s := ObjectStorage{ + dir: dir, + index: make(map[plumbing.Hash]idx, 0), + } + + return s, s.loadIdxFiles() +} + +func (s *ObjectStorage) loadIdxFiles() error { + packs, err := s.dir.ObjectPacks() + if err != nil { + return err + } + + for _, h := range packs { + if err := s.loadIdxFile(h); err != nil { + return err + } + } + + return nil +} + +func (s *ObjectStorage) loadIdxFile(h plumbing.Hash) error { + idxfile, err := s.dir.ObjectPackIdx(h) + if err != nil { + return err + } + + s.index[h] = make(idx) + return s.index[h].Decode(idxfile) +} + +func (s *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { + return &plumbing.MemoryObject{} +} + +func (s *ObjectStorage) PackfileWriter() (io.WriteCloser, error) { + w, err := s.dir.NewObjectPack() + if err != nil { + return nil, err + } + + w.Notify = func(h plumbing.Hash, idxfile idxfile.Idxfile) { + s.index[h] = make(idx) + for _, e := range idxfile.Entries { + s.index[h][e.Hash] = int64(e.Offset) + } + } + + return w, nil +} + +// SetEncodedObject adds a new object to the storage. +func (s *ObjectStorage) SetEncodedObject(o plumbing.EncodedObject) (plumbing.Hash, error) { + if o.Type() == plumbing.OFSDeltaObject || o.Type() == plumbing.REFDeltaObject { + return plumbing.ZeroHash, plumbing.ErrInvalidType + } + + ow, err := s.dir.NewObject() + if err != nil { + return plumbing.ZeroHash, err + } + + defer ow.Close() + + or, err := o.Reader() + if err != nil { + return plumbing.ZeroHash, err + } + + defer or.Close() + + if err := ow.WriteHeader(o.Type(), o.Size()); err != nil { + return plumbing.ZeroHash, err + } + + if _, err := io.Copy(ow, or); err != nil { + return plumbing.ZeroHash, err + } + + return o.Hash(), nil +} + +// EncodedObject returns the object with the given hash, by searching for it in +// the packfile and the git object directories. +func (s *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, err := s.getFromUnpacked(h) + if err == plumbing.ErrObjectNotFound { + obj, err = s.getFromPackfile(h) + } + + if err != nil { + return nil, err + } + + if plumbing.AnyObject != t && obj.Type() != t { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedObject, err error) { + f, err := s.dir.Object(h) + if err != nil { + if os.IsNotExist(err) { + return nil, plumbing.ErrObjectNotFound + } + + return nil, err + } + + defer f.Close() + + obj = s.NewEncodedObject() + r, err := objfile.NewReader(f) + if err != nil { + return nil, err + } + + defer r.Close() + + t, size, err := r.Header() + if err != nil { + return nil, err + } + + obj.SetType(t) + obj.SetSize(size) + w, err := obj.Writer() + if err != nil { + return nil, err + } + + _, err = io.Copy(w, r) + return obj, err +} + +// Get returns the object with the given hash, by searching for it in +// the packfile. +func (s *ObjectStorage) getFromPackfile(h plumbing.Hash) (plumbing.EncodedObject, error) { + pack, offset := s.findObjectInPackfile(h) + if offset == -1 { + return nil, plumbing.ErrObjectNotFound + } + + f, err := s.dir.ObjectPack(pack) + if err != nil { + return nil, err + } + + defer f.Close() + + p := packfile.NewScanner(f) + d, err := packfile.NewDecoder(p, memory.NewStorage()) + if err != nil { + return nil, err + } + + d.SetOffsets(s.index[pack]) + return d.DecodeObjectAt(offset) +} + +func (s *ObjectStorage) findObjectInPackfile(h plumbing.Hash) (plumbing.Hash, int64) { + for packfile, index := range s.index { + if offset, ok := index[h]; ok { + return packfile, offset + } + } + + return plumbing.ZeroHash, -1 +} + +// IterEncodedObjects returns an iterator for all the objects in the packfile +// with the given type. +func (s *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { + objects, err := s.dir.Objects() + if err != nil { + return nil, err + } + + seen := make(map[plumbing.Hash]bool, 0) + var iters []storer.EncodedObjectIter + if len(objects) != 0 { + iters = append(iters, &objectsIter{s: s, t: t, h: objects}) + seen = hashListAsMap(objects) + } + + packi, err := s.buildPackfileIters(t, seen) + if err != nil { + return nil, err + } + + iters = append(iters, packi...) + return storer.NewMultiEncodedObjectIter(iters), nil +} + +func (s *ObjectStorage) buildPackfileIters( + t plumbing.ObjectType, seen map[plumbing.Hash]bool) ([]storer.EncodedObjectIter, error) { + packs, err := s.dir.ObjectPacks() + if err != nil { + return nil, err + } + + var iters []storer.EncodedObjectIter + for _, h := range packs { + pack, err := s.dir.ObjectPack(h) + if err != nil { + return nil, err + } + + iter, err := newPackfileIter(pack, t, seen, s.index[h]) + if err != nil { + return nil, err + } + + iters = append(iters, iter) + } + + return iters, nil +} + +type idx map[plumbing.Hash]int64 + +func (i idx) Decode(r io.Reader) error { + idx := &idxfile.Idxfile{} + + d := idxfile.NewDecoder(r) + if err := d.Decode(idx); err != nil { + return err + } + + for _, e := range idx.Entries { + i[e.Hash] = int64(e.Offset) + } + + return nil +} + +type packfileIter struct { + f billy.File + d *packfile.Decoder + t plumbing.ObjectType + + seen map[plumbing.Hash]bool + position uint32 + total uint32 +} + +func NewPackfileIter(f billy.File, t plumbing.ObjectType) (storer.EncodedObjectIter, error) { + return newPackfileIter(f, t, make(map[plumbing.Hash]bool), nil) +} + +func newPackfileIter(f billy.File, t plumbing.ObjectType, seen map[plumbing.Hash]bool, + index idx) (storer.EncodedObjectIter, error) { + s := packfile.NewScanner(f) + _, total, err := s.Header() + if err != nil { + return nil, err + } + + d, err := packfile.NewDecoderForType(s, memory.NewStorage(), t) + if err != nil { + return nil, err + } + + d.SetOffsets(index) + + return &packfileIter{ + f: f, + d: d, + t: t, + + total: total, + seen: seen, + }, nil +} + +func (iter *packfileIter) Next() (plumbing.EncodedObject, error) { + for { + if iter.position >= iter.total { + return nil, io.EOF + } + + obj, err := iter.d.DecodeObject() + if err != nil { + return nil, err + } + + iter.position++ + if obj == nil { + continue + } + + if iter.seen[obj.Hash()] { + return iter.Next() + } + + return obj, nil + } +} + +// ForEach is never called since is used inside of a MultiObjectIterator +func (iter *packfileIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return nil +} + +func (iter *packfileIter) Close() { + iter.f.Close() + iter.d.Close() +} + +type objectsIter struct { + s *ObjectStorage + t plumbing.ObjectType + h []plumbing.Hash +} + +func (iter *objectsIter) Next() (plumbing.EncodedObject, error) { + if len(iter.h) == 0 { + return nil, io.EOF + } + + obj, err := iter.s.getFromUnpacked(iter.h[0]) + iter.h = iter.h[1:] + + if err != nil { + return nil, err + } + + if iter.t != plumbing.AnyObject && iter.t != obj.Type() { + return iter.Next() + } + + return obj, err +} + +// ForEach is never called since is used inside of a MultiObjectIterator +func (iter *objectsIter) ForEach(cb func(plumbing.EncodedObject) error) error { + return nil +} + +func (iter *objectsIter) Close() { + iter.h = []plumbing.Hash{} +} + +func hashListAsMap(l []plumbing.Hash) map[plumbing.Hash]bool { + m := make(map[plumbing.Hash]bool, len(l)) + for _, h := range l { + m[h] = true + } + + return m +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go new file mode 100644 index 0000000000000000000000000000000000000000..49627d3540014df2f780f7ae62f3a7f6acda7348 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go @@ -0,0 +1,32 @@ +package filesystem + +import ( + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" +) + +type ReferenceStorage struct { + dir *dotgit.DotGit +} + +func (r *ReferenceStorage) SetReference(ref *plumbing.Reference) error { + return r.dir.SetRef(ref) +} + +func (r *ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { + return r.dir.Ref(n) +} + +func (r *ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { + refs, err := r.dir.Refs() + if err != nil { + return nil, err + } + + return storer.NewReferenceSliceIter(refs), nil +} + +func (r *ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { + return r.dir.RemoveRef(n) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go new file mode 100644 index 0000000000000000000000000000000000000000..ec8d20e89e72d8cd0b0cd0681f715032d6bb2bdf --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go @@ -0,0 +1,51 @@ +package filesystem + +import ( + "bufio" + "fmt" + + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" +) + +// ShallowStorage where the shallow commits are stored, an internal to +// manipulate the shallow file +type ShallowStorage struct { + dir *dotgit.DotGit +} + +// SetShallow save the shallows in the shallow file in the .git folder as one +// commit per line represented by 40-byte hexadecimal object terminated by a +// newline. +func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { + f, err := s.dir.ShallowWriter() + if err != nil { + return err + } + + defer f.Close() + for _, h := range commits { + if _, err := fmt.Fprintf(f, "%s\n", h); err != err { + return err + } + } + + return nil +} + +// Shallow return the shallow commits reading from shallo file from .git +func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) { + f, err := s.dir.Shallow() + if err != nil { + return nil, err + } + + var hash []plumbing.Hash + + scn := bufio.NewScanner(f) + for scn.Scan() { + hash = append(hash, plumbing.NewHash(scn.Text())) + } + + return hash, scn.Err() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go new file mode 100644 index 0000000000000000000000000000000000000000..6d58006b21be26f169782ffc2b8f7964da5581f2 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go @@ -0,0 +1,47 @@ +// Package filesystem is a storage backend base on filesystems +package filesystem + +import ( + "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit" + + "gopkg.in/src-d/go-billy.v2" +) + +// Storage is an implementation of git.Storer that stores data on disk in the +// standard git format (this is, the .git directory). Zero values of this type +// are not safe to use, see the NewStorage function below. +type Storage struct { + fs billy.Filesystem + + ObjectStorage + ReferenceStorage + IndexStorage + ShallowStorage + ConfigStorage + ModuleStorage +} + +// NewStorage returns a new Storage backed by a given `fs.Filesystem` +func NewStorage(fs billy.Filesystem) (*Storage, error) { + dir := dotgit.New(fs) + o, err := newObjectStorage(dir) + if err != nil { + return nil, err + } + + return &Storage{ + fs: fs, + + ObjectStorage: o, + ReferenceStorage: ReferenceStorage{dir: dir}, + IndexStorage: IndexStorage{dir: dir}, + ShallowStorage: ShallowStorage{dir: dir}, + ConfigStorage: ConfigStorage{dir: dir}, + ModuleStorage: ModuleStorage{dir: dir}, + }, nil +} + +// Filesystem returns the underlying filesystem +func (s *Storage) Filesystem() billy.Filesystem { + return s.fs +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go b/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go new file mode 100644 index 0000000000000000000000000000000000000000..2380fedbe07cd98484052c7fb1e1a18490f33180 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go @@ -0,0 +1,250 @@ +// Package memory is a storage backend base on memory +package memory + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/format/index" + "gopkg.in/src-d/go-git.v4/plumbing/storer" + "gopkg.in/src-d/go-git.v4/storage" +) + +var ErrUnsupportedObjectType = fmt.Errorf("unsupported object type") + +// Storage is an implementation of git.Storer that stores data on memory, being +// ephemeral. The use of this storage should be done in controlled envoriments, +// since the representation in memory of some repository can fill the machine +// memory. in the other hand this storage has the best performance. +type Storage struct { + ConfigStorage + ObjectStorage + ShallowStorage + IndexStorage + ReferenceStorage + ModuleStorage +} + +// NewStorage returns a new Storage base on memory +func NewStorage() *Storage { + return &Storage{ + ReferenceStorage: make(ReferenceStorage, 0), + ConfigStorage: ConfigStorage{}, + ShallowStorage: ShallowStorage{}, + ObjectStorage: ObjectStorage{ + Objects: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + Commits: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + Trees: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + Blobs: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + Tags: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + }, + ModuleStorage: make(ModuleStorage, 0), + } +} + +type ConfigStorage struct { + config *config.Config +} + +func (c *ConfigStorage) SetConfig(cfg *config.Config) error { + if err := cfg.Validate(); err != nil { + return err + } + + c.config = cfg + return nil +} + +func (c *ConfigStorage) Config() (*config.Config, error) { + if c.config == nil { + c.config = config.NewConfig() + } + + return c.config, nil +} + +type IndexStorage struct { + index *index.Index +} + +func (c *IndexStorage) SetIndex(idx *index.Index) error { + c.index = idx + return nil +} + +func (c *IndexStorage) Index() (*index.Index, error) { + if c.index == nil { + c.index = &index.Index{Version: 2} + } + + return c.index, nil +} + +type ObjectStorage struct { + Objects map[plumbing.Hash]plumbing.EncodedObject + Commits map[plumbing.Hash]plumbing.EncodedObject + Trees map[plumbing.Hash]plumbing.EncodedObject + Blobs map[plumbing.Hash]plumbing.EncodedObject + Tags map[plumbing.Hash]plumbing.EncodedObject +} + +func (o *ObjectStorage) NewEncodedObject() plumbing.EncodedObject { + return &plumbing.MemoryObject{} +} + +func (o *ObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { + h := obj.Hash() + o.Objects[h] = obj + + switch obj.Type() { + case plumbing.CommitObject: + o.Commits[h] = o.Objects[h] + case plumbing.TreeObject: + o.Trees[h] = o.Objects[h] + case plumbing.BlobObject: + o.Blobs[h] = o.Objects[h] + case plumbing.TagObject: + o.Tags[h] = o.Objects[h] + default: + return h, ErrUnsupportedObjectType + } + + return h, nil +} + +func (o *ObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, ok := o.Objects[h] + if !ok || (plumbing.AnyObject != t && obj.Type() != t) { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (o *ObjectStorage) IterEncodedObjects(t plumbing.ObjectType) (storer.EncodedObjectIter, error) { + var series []plumbing.EncodedObject + switch t { + case plumbing.AnyObject: + series = flattenObjectMap(o.Objects) + case plumbing.CommitObject: + series = flattenObjectMap(o.Commits) + case plumbing.TreeObject: + series = flattenObjectMap(o.Trees) + case plumbing.BlobObject: + series = flattenObjectMap(o.Blobs) + case plumbing.TagObject: + series = flattenObjectMap(o.Tags) + } + + return storer.NewEncodedObjectSliceIter(series), nil +} + +func flattenObjectMap(m map[plumbing.Hash]plumbing.EncodedObject) []plumbing.EncodedObject { + objects := make([]plumbing.EncodedObject, 0, len(m)) + for _, obj := range m { + objects = append(objects, obj) + } + return objects +} + +func (o *ObjectStorage) Begin() storer.Transaction { + return &TxObjectStorage{ + Storage: o, + Objects: make(map[plumbing.Hash]plumbing.EncodedObject, 0), + } +} + +type TxObjectStorage struct { + Storage *ObjectStorage + Objects map[plumbing.Hash]plumbing.EncodedObject +} + +func (tx *TxObjectStorage) SetEncodedObject(obj plumbing.EncodedObject) (plumbing.Hash, error) { + h := obj.Hash() + tx.Objects[h] = obj + + return h, nil +} + +func (tx *TxObjectStorage) EncodedObject(t plumbing.ObjectType, h plumbing.Hash) (plumbing.EncodedObject, error) { + obj, ok := tx.Objects[h] + if !ok || (plumbing.AnyObject != t && obj.Type() != t) { + return nil, plumbing.ErrObjectNotFound + } + + return obj, nil +} + +func (tx *TxObjectStorage) Commit() error { + for h, obj := range tx.Objects { + delete(tx.Objects, h) + if _, err := tx.Storage.SetEncodedObject(obj); err != nil { + return err + } + } + + return nil +} + +func (tx *TxObjectStorage) Rollback() error { + tx.Objects = make(map[plumbing.Hash]plumbing.EncodedObject, 0) + return nil +} + +type ReferenceStorage map[plumbing.ReferenceName]*plumbing.Reference + +func (r ReferenceStorage) SetReference(ref *plumbing.Reference) error { + if ref != nil { + r[ref.Name()] = ref + } + + return nil +} + +func (r ReferenceStorage) Reference(n plumbing.ReferenceName) (*plumbing.Reference, error) { + ref, ok := r[n] + if !ok { + return nil, plumbing.ErrReferenceNotFound + } + + return ref, nil +} + +func (r ReferenceStorage) IterReferences() (storer.ReferenceIter, error) { + var refs []*plumbing.Reference + for _, ref := range r { + refs = append(refs, ref) + } + + return storer.NewReferenceSliceIter(refs), nil +} + +func (r ReferenceStorage) RemoveReference(n plumbing.ReferenceName) error { + delete(r, n) + return nil +} + +type ShallowStorage []plumbing.Hash + +func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error { + *s = commits + return nil +} + +func (s ShallowStorage) Shallow() ([]plumbing.Hash, error) { + return s, nil +} + +type ModuleStorage map[string]*Storage + +func (s ModuleStorage) Module(name string) (storage.Storer, error) { + if m, ok := s[name]; ok { + return m, nil + } + + m := NewStorage() + s[name] = m + + return m, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go b/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go new file mode 100644 index 0000000000000000000000000000000000000000..50bfd55c81084da599da33676c03764f9d516337 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/storage/storer.go @@ -0,0 +1,26 @@ +package storage + +import ( + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing/storer" +) + +// Storer is a generic storage of objects, references and any information +// related to a particular repository. The package gopkg.in/src-d/go-git.v4/storage +// contains two implementation a filesystem base implementation (such as `.git`) +// and a memory implementations being ephemeral +type Storer interface { + storer.EncodedObjectStorer + storer.ReferenceStorer + storer.ShallowStorer + storer.IndexStorer + config.ConfigStorer + ModuleStorer +} + +// ModuleStorer allows interact with the modules' Storers +type ModuleStorer interface { + // Module returns a Storer reprensting a submodule, if not exists returns a + // new empty Storer is returned + Module(name string) (Storer, error) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/submodule.go b/vendor/gopkg.in/src-d/go-git.v4/submodule.go new file mode 100644 index 0000000000000000000000000000000000000000..69c5d75c852ae6d74f6b7e56f5d69d3f4a8f45cf --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/submodule.go @@ -0,0 +1,174 @@ +package git + +import ( + "errors" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing" +) + +var ( + ErrSubmoduleAlreadyInitialized = errors.New("submodule already initialized") + ErrSubmoduleNotInitialized = errors.New("submodule not initialized") +) + +// Submodule a submodule allows you to keep another Git repository in a +// subdirectory of your repository. +type Submodule struct { + initialized bool + + c *config.Submodule + w *Worktree +} + +// Config returns the submodule config +func (s *Submodule) Config() *config.Submodule { + return s.c +} + +// Init initialize the submodule reading the recoreded Entry in the index for +// the given submodule +func (s *Submodule) Init() error { + cfg, err := s.w.r.Storer.Config() + if err != nil { + return err + } + + _, ok := cfg.Submodules[s.c.Name] + if ok { + return ErrSubmoduleAlreadyInitialized + } + + s.initialized = true + + cfg.Submodules[s.c.Name] = s.c + return s.w.r.Storer.SetConfig(cfg) +} + +// Repository returns the Repository represented by this submodule +func (s *Submodule) Repository() (*Repository, error) { + storer, err := s.w.r.Storer.Module(s.c.Name) + if err != nil { + return nil, err + } + + _, err = storer.Reference(plumbing.HEAD) + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + worktree := s.w.fs.Dir(s.c.Path) + if err == nil { + return Open(storer, worktree) + } + + r, err := Init(storer, worktree) + if err != nil { + return nil, err + } + + _, err = r.CreateRemote(&config.RemoteConfig{ + Name: DefaultRemoteName, + URL: s.c.URL, + }) + + return r, err +} + +// Update the registered submodule to match what the superproject expects, the +// submodule should be initilized first calling the Init method or setting in +// the options SubmoduleUpdateOptions.Init equals true +func (s *Submodule) Update(o *SubmoduleUpdateOptions) error { + if !s.initialized && !o.Init { + return ErrSubmoduleNotInitialized + } + + if !s.initialized && o.Init { + if err := s.Init(); err != nil { + return err + } + } + + e, err := s.w.readIndexEntry(s.c.Path) + if err != nil { + return err + } + + r, err := s.Repository() + if err != nil { + return err + } + + if err := s.fetchAndCheckout(r, o, e.Hash); err != nil { + return err + } + + return s.doRecrusiveUpdate(r, o) +} + +func (s *Submodule) doRecrusiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error { + if o.RecurseSubmodules == NoRecurseSubmodules { + return nil + } + + w, err := r.Worktree() + if err != nil { + return err + } + + l, err := w.Submodules() + if err != nil { + return err + } + + new := &SubmoduleUpdateOptions{} + *new = *o + new.RecurseSubmodules-- + return l.Update(new) +} + +func (s *Submodule) fetchAndCheckout(r *Repository, o *SubmoduleUpdateOptions, hash plumbing.Hash) error { + if !o.NoFetch { + err := r.Fetch(&FetchOptions{}) + if err != nil && err != NoErrAlreadyUpToDate { + return err + } + } + + w, err := r.Worktree() + if err != nil { + return err + } + + if err := w.Checkout(hash); err != nil { + return err + } + + head := plumbing.NewHashReference(plumbing.HEAD, hash) + return r.Storer.SetReference(head) +} + +// Submodules list of several submodules from the same repository +type Submodules []*Submodule + +// Init initializes the submodules in this list +func (s Submodules) Init() error { + for _, sub := range s { + if err := sub.Init(); err != nil { + return err + } + } + + return nil +} + +// Update updates all the submodules in this list +func (s Submodules) Update(o *SubmoduleUpdateOptions) error { + for _, sub := range s { + if err := sub.Update(o); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go new file mode 100644 index 0000000000000000000000000000000000000000..0bcf11a6d3934ac15e2a4aae6c1bfc187cce6519 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go @@ -0,0 +1,124 @@ +// Package binary implements sintax-sugar functions on top of the standard +// library binary package +package binary + +import ( + "encoding/binary" + "io" + + "gopkg.in/src-d/go-git.v4/plumbing" +) + +// Read reads structured binary data from r into data. Bytes are read and +// decoded in BigEndian order +// https://golang.org/pkg/encoding/binary/#Read +func Read(r io.Reader, data ...interface{}) error { + for _, v := range data { + if err := binary.Read(r, binary.BigEndian, v); err != nil { + return err + } + } + + return nil +} + +// ReadUntil reads from r untin delim is found +func ReadUntil(r io.Reader, delim byte) ([]byte, error) { + var buf [1]byte + value := make([]byte, 0, 16) + for { + if _, err := io.ReadFull(r, buf[:]); err != nil { + if err == io.EOF { + return nil, err + } + + return nil, err + } + + if buf[0] == delim { + return value, nil + } + + value = append(value, buf[0]) + } +} + +// ReadVariableWidthInt reads and returns an int in Git VLQ special format: +// +// Ordinary VLQ has some redundancies, example: the number 358 can be +// encoded as the 2-octet VLQ 0x8166 or the 3-octet VLQ 0x808166 or the +// 4-octet VLQ 0x80808166 and so forth. +// +// To avoid these redundancies, the VLQ format used in Git removes this +// prepending redundancy and extends the representable range of shorter +// VLQs by adding an offset to VLQs of 2 or more octets in such a way +// that the lowest possible value for such an (N+1)-octet VLQ becomes +// exactly one more than the maximum possible value for an N-octet VLQ. +// In particular, since a 1-octet VLQ can store a maximum value of 127, +// the minimum 2-octet VLQ (0x8000) is assigned the value 128 instead of +// 0. Conversely, the maximum value of such a 2-octet VLQ (0xff7f) is +// 16511 instead of just 16383. Similarly, the minimum 3-octet VLQ +// (0x808000) has a value of 16512 instead of zero, which means +// that the maximum 3-octet VLQ (0xffff7f) is 2113663 instead of +// just 2097151. And so forth. +// +// This is how the offset is saved in C: +// +// dheader[pos] = ofs & 127; +// while (ofs >>= 7) +// dheader[--pos] = 128 | (--ofs & 127); +// +func ReadVariableWidthInt(r io.Reader) (int64, error) { + var c byte + if err := Read(r, &c); err != nil { + return 0, err + } + + var v = int64(c & maskLength) + for c&maskContinue > 0 { + v++ + if err := Read(r, &c); err != nil { + return 0, err + } + + v = (v << lengthBits) + int64(c&maskLength) + } + + return v, nil +} + +const ( + maskContinue = uint8(128) // 1000 000 + maskLength = uint8(127) // 0111 1111 + lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length +) + +// ReadUint32 reads 4 bytes and returns them as a Big ndian uint32 +func ReadUint32(r io.Reader) (uint32, error) { + var v uint32 + if err := binary.Read(r, binary.BigEndian, &v); err != nil { + return 0, err + } + + return v, nil +} + +// ReadUint16 reads 2 bytes and returns them as a BigEndian uint16 +func ReadUint16(r io.Reader) (uint16, error) { + var v uint16 + if err := binary.Read(r, binary.BigEndian, &v); err != nil { + return 0, err + } + + return v, nil +} + +// ReadHash reads a plumbing.Hash from r +func ReadHash(r io.Reader) (plumbing.Hash, error) { + var h plumbing.Hash + if err := binary.Read(r, binary.BigEndian, h[:]); err != nil { + return plumbing.ZeroHash, err + } + + return h, nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go new file mode 100644 index 0000000000000000000000000000000000000000..2ec358134642e9018375ecb24ebbcbb66029a5be --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go @@ -0,0 +1,44 @@ +package binary + +import ( + "encoding/binary" + "io" +) + +// Write writes the binary representation of data into w, using BigEndian order +// https://golang.org/pkg/encoding/binary/#Write +func Write(w io.Writer, data ...interface{}) error { + for _, v := range data { + if err := binary.Write(w, binary.BigEndian, v); err != nil { + return err + } + } + + return nil +} + +func WriteVariableWidthInt(w io.Writer, n int64) error { + buf := []byte{byte(n & 0x7f)} + n >>= 7 + for n != 0 { + n-- + buf = append([]byte{0x80 | (byte(n & 0x7f))}, buf...) + n >>= 7 + } + + _, err := w.Write(buf) + + return err +} + +// WriteUint32 writes the binary representation of a uint32 into w, in BigEndian +// order +func WriteUint32(w io.Writer, value uint32) error { + return binary.Write(w, binary.BigEndian, value) +} + +// WriteUint16 writes the binary representation of a uint16 into w, in BigEndian +// order +func WriteUint16(w io.Writer, value uint16) error { + return binary.Write(w, binary.BigEndian, value) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go b/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go new file mode 100644 index 0000000000000000000000000000000000000000..b840ad6d75565b881a036d50e2bf814fe22e1701 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go @@ -0,0 +1,45 @@ +// Package diff implements line oriented diffs, similar to the ancient +// Unix diff command. +// +// The current implementation is just a wrapper around Sergi's +// go-diff/diffmatchpatch library, which is a go port of Neil +// Fraser's google-diff-match-patch code +package diff + +import ( + "bytes" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +// Do computes the (line oriented) modifications needed to turn the src +// string into the dst string. +func Do(src, dst string) (diffs []diffmatchpatch.Diff) { + dmp := diffmatchpatch.New() + wSrc, wDst, warray := dmp.DiffLinesToChars(src, dst) + diffs = dmp.DiffMain(wSrc, wDst, false) + diffs = dmp.DiffCharsToLines(diffs, warray) + return diffs +} + +// Dst computes and returns the destination text. +func Dst(diffs []diffmatchpatch.Diff) string { + var text bytes.Buffer + for _, d := range diffs { + if d.Type != diffmatchpatch.DiffDelete { + text.WriteString(d.Text) + } + } + return text.String() +} + +// Src computes and returns the source text +func Src(diffs []diffmatchpatch.Diff) string { + var text bytes.Buffer + for _, d := range diffs { + if d.Type != diffmatchpatch.DiffInsert { + text.WriteString(d.Text) + } + } + return text.String() +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go b/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go new file mode 100644 index 0000000000000000000000000000000000000000..73cc9c3e76bbcb57a3deabdeccd1b822621e4341 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go @@ -0,0 +1,74 @@ +// Package ioutil implements some I/O utility functions. +package ioutil + +import ( + "bufio" + "errors" + "io" +) + +type readPeeker interface { + io.Reader + Peek(int) ([]byte, error) +} + +var ( + ErrEmptyReader = errors.New("reader is empty") +) + +// NonEmptyReader takes a reader and returns it if it is not empty, or +// `ErrEmptyReader` if it is empty. If there is an error when reading the first +// byte of the given reader, it will be propagated. +func NonEmptyReader(r io.Reader) (io.Reader, error) { + pr, ok := r.(readPeeker) + if !ok { + pr = bufio.NewReader(r) + } + + _, err := pr.Peek(1) + if err == io.EOF { + return nil, ErrEmptyReader + } + + if err != nil { + return nil, err + } + + return pr, nil +} + +type readCloser struct { + io.Reader + closer io.Closer +} + +func (r *readCloser) Close() error { + return r.closer.Close() +} + +// NewReadCloser creates an `io.ReadCloser` with the given `io.Reader` and +// `io.Closer`. +func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser { + return &readCloser{Reader: r, closer: c} +} + +type writeNopCloser struct { + io.Writer +} + +func (writeNopCloser) Close() error { return nil } + +// WriteNopCloser returns a WriteCloser with a no-op Close method wrapping +// the provided Writer w. +func WriteNopCloser(w io.Writer) io.WriteCloser { + return writeNopCloser{w} +} + +// CheckClose calls Close on the given io.Closer. If the given *error points to +// nil, it will be assigned the error returned by Close. Otherwise, any error +// returned by Close will be ignored. CheckClose is usually called with defer. +func CheckClose(c io.Closer, err *error) { + if cerr := c.Close(); cerr != nil && *err == nil { + *err = cerr + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go new file mode 100644 index 0000000000000000000000000000000000000000..0b50ca71d3facf2702473eeafb7239b10347c292 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go @@ -0,0 +1,149 @@ +package merkletrie + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// Action values represent the kind of things a Change can represent: +// insertion, deletions or modifications of files. +type Action int + +// The set of possible actions in a change. +const ( + _ Action = iota + Insert + Delete + Modify +) + +// String returns the action as a human readable text. +func (a Action) String() string { + switch a { + case Insert: + return "Insert" + case Delete: + return "Delete" + case Modify: + return "Modify" + default: + panic(fmt.Sprintf("unsupported action: %d", a)) + } +} + +// A Change value represent how a noder has change between to merkletries. +type Change struct { + // The noder before the change or nil if it was inserted. + From noder.Path + // The noder after the change or nil if it was deleted. + To noder.Path +} + +// Action is convenience method that returns what Action c represents. +func (c *Change) Action() (Action, error) { + if c.From == nil && c.To == nil { + return Action(0), fmt.Errorf("malformed change: nil from and to") + } + if c.From == nil { + return Insert, nil + } + if c.To == nil { + return Delete, nil + } + + return Modify, nil +} + +// NewInsert returns a new Change representing the insertion of n. +func NewInsert(n noder.Path) Change { return Change{To: n} } + +// NewDelete returns a new Change representing the deletion of n. +func NewDelete(n noder.Path) Change { return Change{From: n} } + +// NewModify returns a new Change representing that a has been modified and +// it is now b. +func NewModify(a, b noder.Path) Change { + return Change{ + From: a, + To: b, + } +} + +// String returns a single change in human readable form, using the +// format: '<' + action + space + path + '>'. The contents of the file +// before or after the change are not included in this format. +// +// Example: inserting a file at the path a/b/c.txt will return "<Insert +// a/b/c.txt>". +func (c Change) String() string { + action, err := c.Action() + if err != nil { + panic(err) + } + + var path string + if action == Delete { + path = c.From.String() + } else { + path = c.To.String() + } + + return fmt.Sprintf("<%s %s>", action, path) +} + +// Changes is a list of changes between to merkletries. +type Changes []Change + +// NewChanges returns an empty list of changes. +func NewChanges() Changes { + return Changes{} +} + +// Add adds the change c to the list of changes. +func (l *Changes) Add(c Change) { + *l = append(*l, c) +} + +// AddRecursiveInsert adds the required changes to insert all the +// file-like noders found in root, recursively. +func (l *Changes) AddRecursiveInsert(root noder.Path) error { + return l.addRecursive(root, NewInsert) +} + +// AddRecursiveDelete adds the required changes to delete all the +// file-like noders found in root, recursively. +func (l *Changes) AddRecursiveDelete(root noder.Path) error { + return l.addRecursive(root, NewDelete) +} + +type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete + +func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error { + if !root.IsDir() { + l.Add(ctor(root)) + return nil + } + + i, err := NewIterFromPath(root) + if err != nil { + return err + } + + var current noder.Path + for { + if current, err = i.Step(); err != nil { + if err == io.EOF { + break + } + return err + } + if current.IsDir() { + continue + } + l.Add(ctor(current)) + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go new file mode 100644 index 0000000000000000000000000000000000000000..074886554be1bd0cc956789f6a3a510bd2e00f71 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go @@ -0,0 +1,403 @@ +package merkletrie + +// The focus of this difftree implementation is to save time by +// skipping whole directories if their hash is the same in both +// trees. +// +// The diff algorithm implemented here is based on the doubleiter +// type defined in this same package; we will iterate over both +// trees at the same time, while comparing the current noders in +// each iterator. Depending on how they differ we will output the +// corresponding chages and move the iterators further over both +// trees. +// +// The table bellow show all the possible comparison results, along +// with what changes should we produce and how to advance the +// iterators. +// +// The table is implemented by the switches in this function, +// diffTwoNodes, diffTwoNodesSameName and diffTwoDirs. +// +// Many Bothans died to bring us this information, make sure you +// understand the table before modifying this code. + +// # Cases +// +// When comparing noders in both trees you will found yourself in +// one of 169 possible cases, but if we ignore moves, we can +// simplify a lot the search space into the following table: +// +// - "-": nothing, no file or directory +// - a<>: an empty file named "a". +// - a<1>: a file named "a", with "1" as its contents. +// - a<2>: a file named "a", with "2" as its contents. +// - a(): an empty dir named "a". +// - a(...): a dir named "a", with some files and/or dirs inside (possibly +// empty). +// - a(;;;): a dir named "a", with some other files and/or dirs inside +// (possibly empty), which different from the ones in "a(...)". +// +// \ to - a<> a<1> a<2> a() a(...) a(;;;) +// from \ +// - 00 01 02 03 04 05 06 +// a<> 10 11 12 13 14 15 16 +// a<1> 20 21 22 23 24 25 26 +// a<2> 30 31 32 33 34 35 36 +// a() 40 41 42 43 44 45 46 +// a(...) 50 51 52 53 54 55 56 +// a(;;;) 60 61 62 63 64 65 66 +// +// Every (from, to) combination in the table is a special case, but +// some of them can be merged into some more general cases, for +// instance 11 and 22 can be merged into the general case: both +// noders are equal. +// +// Here is a full list of all the cases that are similar and how to +// merge them together into more general cases. Each general case +// is labeled wiht an uppercase letter for further reference, and it +// is followed by the pseudocode of the checks you have to perfrom +// on both noders to see if you are in such a case, the actions to +// perform (i.e. what changes to output) and how to advance the +// iterators of each tree to continue the comparison process. +// +// ## A. Impossible: 00 +// +// ## B. Same thing on both sides: 11, 22, 33, 44, 55, 66 +// - check: `SameName() && SameHash()` +// - action: do nothing. +// - advance: `FromNext(); ToNext()` +// +// ### C. To was created: 01, 02, 03, 04, 05, 06 +// - check: `DifferentName() && ToBeforeFrom()` +// - action: inserRecursively(to) +// - advance: `ToNext()` +// +// ### D. From was deleted: 10, 20, 30, 40, 50, 60 +// - check: `DifferentName() && FromBeforeTo()` +// - action: `DeleteRecursively(from)` +// - advance: `FromNext()` +// +// ### E. Empty file to file with contents: 12, 13 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// ToIsFile() && FromIsEmpty()` +// - action: `modifyFile(from, to)` +// - advance: `FromNext()` or `FromStep()` +// +// ### E'. file with contents to empty file: 21, 31 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// ToIsFile() && ToIsEmpty()` +// - action: `modifyFile(from, to)` +// - advance: `FromNext()` or `FromStep()` +// +// ### F. empty file to empty dir with the same name: 14 +// - check: `SameName() && FromIsFile() && FromIsEmpty() && +// ToIsDir() && ToIsEmpty()` +// - action: `DeleteFile(from); InsertEmptyDir(to)` +// - advance: `FromNext(); ToNext()` +// +// ### F'. empty dir to empty file of the same name: 41 +// - check: `SameName() && FromIsDir() && FromIsEmpty && +// ToIsFile() && ToIsEmpty()` +// - action: `DeleteEmptyDir(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` or step for any of them. +// +// ### G. empty file to non-empty dir of the same name: 15, 16 +// - check: `SameName() && FromIsFile() && ToIsDir() && +// FromIsEmpty() && ToIsNotEmpty()` +// - action: `DeleteFile(from); InsertDirRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### G'. non-empty dir to empty file of the same name: 51, 61 +// - check: `SameName() && FromIsDir() && FromIsNotEmpty() && +// ToIsFile() && FromIsEmpty()` +// - action: `DeleteDirRecursively(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### H. modify file contents: 23, 32 +// - check: `SameName() && FromIsFile() && ToIsFile() && +// FromIsNotEmpty() && ToIsNotEmpty()` +// - action: `ModifyFile(from, to)` +// - advance: `FromNext(); ToNext()` +// +// ### I. file with contents to empty dir: 24, 34 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// FromIsNotEmpty() && ToIsDir() && ToIsEmpty()` +// - action: `DeleteFile(from); InsertEmptyDir(to)` +// - advance: `FromNext(); ToNext()` +// +// ### I'. empty dir to file with contents: 42, 43 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsFile() && ToIsEmpty()` +// - action: `DeleteDir(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### J. file with contents to dir with contetns: 25, 26, 35, 36 +// - check: `SameName() && DifferentHash() && FromIsFile() && +// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `DeleteFile(from); InsertDirRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### J'. dir with contetns to file with contents: 52, 62, 53, 63 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsNotEmpty() && ToIsFile() && ToIsNotEmpty()` +// - action: `DeleteDirRecursively(from); InsertFile(to)` +// - advance: `FromNext(); ToNext()` +// +// ### K. empty dir to dir with contents: 45, 46 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `InsertChildrenRecursively(to)` +// - advance: `FromNext(); ToNext()` +// +// ### K'. dir with contents to empty dir: 54, 64 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: `DeleteChildrenRecursively(from)` +// - advance: `FromNext(); ToNext()` +// +// ### L. dir with contents to dir with different contents: 56, 65 +// - check: `SameName() && DifferentHash() && FromIsDir() && +// FromIsNotEmpty() && ToIsDir() && ToIsNotEmpty()` +// - action: nothing +// - advance: `FromStep(); ToStep()` +// +// + +// All these cases can be further simplified by a truth table +// reduction process, in which we gather similar checks together to +// make the final code easier to read and understand. +// +// The first 6 columns are the outputs of the checks to perform on +// both noders. I have labeled them 1 to 6, this is what they mean: +// +// 1: SameName() +// 2: SameHash() +// 3: FromIsDir() +// 4: ToIsDir() +// 5: FromIsEmpty() +// 6: ToIsEmpty() +// +// The from and to columns are a fsnoder example of the elements +// that you will find on each tree under the specified comparison +// results (columns 1 to 6). +// +// The type column identifies the case we are into, from the list above. +// +// The type' column identifies the new set of reduced cases, using +// lowercase letters, and they are explained after the table. +// +// The last column is the set of actions and advances for each case. +// +// "---" means impossible except in case of hash collision. +// +// advance meaning: +// - NN: from.Next(); to.Next() +// - SS: from.Step(); to.Step() +// +// 1 2 3 4 5 6 | from | to |type|type'|action ; advance +// ------------+--------+--------+----+------------------------------------ +// 0 0 0 0 0 0 | | | | | if !SameName() { +// . | | | | | if FromBeforeTo() { +// . | | | D | d | delete(from); from.Next() +// . | | | | | } else { +// . | | | C | c | insert(to); to.Next() +// . | | | | | } +// 0 1 1 1 1 1 | | | | | } +// 1 0 0 0 0 0 | a<1> | a<2> | H | e | modify(from, to); NN +// 1 0 0 0 0 1 | a<1> | a<> | E' | e | modify(from, to); NN +// 1 0 0 0 1 0 | a<> | a<1> | E | e | modify(from, to); NN +// 1 0 0 0 1 1 | ---- | ---- | | e | +// 1 0 0 1 0 0 | a<1> | a(...) | J | f | delete(from); insert(to); NN +// 1 0 0 1 0 1 | a<1> | a() | I | f | delete(from); insert(to); NN +// 1 0 0 1 1 0 | a<> | a(...) | G | f | delete(from); insert(to); NN +// 1 0 0 1 1 1 | a<> | a() | F | f | delete(from); insert(to); NN +// 1 0 1 0 0 0 | a(...) | a<1> | J' | f | delete(from); insert(to); NN +// 1 0 1 0 0 1 | a(...) | a<> | G' | f | delete(from); insert(to); NN +// 1 0 1 0 1 0 | a() | a<1> | I' | f | delete(from); insert(to); NN +// 1 0 1 0 1 1 | a() | a<> | F' | f | delete(from); insert(to); NN +// 1 0 1 1 0 0 | a(...) | a(;;;) | L | g | nothing; SS +// 1 0 1 1 0 1 | a(...) | a() | K' | h | deleteChidren(from); NN +// 1 0 1 1 1 0 | a() | a(...) | K | i | insertChildren(to); NN +// 1 0 1 1 1 1 | ---- | ---- | | | +// 1 1 0 0 0 0 | a<1> | a<1> | B | b | nothing; NN +// 1 1 0 0 0 1 | ---- | ---- | | b | +// 1 1 0 0 1 0 | ---- | ---- | | b | +// 1 1 0 0 1 1 | a<> | a<> | B | b | nothing; NN +// 1 1 0 1 0 0 | ---- | ---- | | b | +// 1 1 0 1 0 1 | ---- | ---- | | b | +// 1 1 0 1 1 0 | ---- | ---- | | b | +// 1 1 0 1 1 1 | ---- | ---- | | b | +// 1 1 1 0 0 0 | ---- | ---- | | b | +// 1 1 1 0 0 1 | ---- | ---- | | b | +// 1 1 1 0 1 0 | ---- | ---- | | b | +// 1 1 1 0 1 1 | ---- | ---- | | b | +// 1 1 1 1 0 0 | a(...) | a(...) | B | b | nothing; NN +// 1 1 1 1 0 1 | ---- | ---- | | b | +// 1 1 1 1 1 0 | ---- | ---- | | b | +// 1 1 1 1 1 1 | a() | a() | B | b | nothing; NN +// +// c and d: +// if !SameName() +// d if FromBeforeTo() +// c else +// b: SameName) && sameHash() +// e: SameName() && !sameHash() && BothAreFiles() +// f: SameName() && !sameHash() && FileAndDir() +// g: SameName() && !sameHash() && BothAreDirs() && NoneIsEmpty +// i: SameName() && !sameHash() && BothAreDirs() && FromIsEmpty +// h: else of i + +import ( + "fmt" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// DiffTree calculates the list of changes between two merkletries. It +// uses the provided hashEqual callback to compare noders. +func DiffTree(fromTree, toTree noder.Noder, + hashEqual noder.Equal) (Changes, error) { + ret := NewChanges() + + ii, err := newDoubleIter(fromTree, toTree, hashEqual) + if err != nil { + return nil, err + } + + for { + from := ii.from.current + to := ii.to.current + + switch r := ii.remaining(); r { + case noMoreNoders: + return ret, nil + case onlyFromRemains: + if err = ret.AddRecursiveDelete(from); err != nil { + return nil, err + } + if err = ii.nextFrom(); err != nil { + return nil, err + } + case onlyToRemains: + if err = ret.AddRecursiveInsert(to); err != nil { + return nil, err + } + if err = ii.nextTo(); err != nil { + return nil, err + } + case bothHaveNodes: + if err = diffNodes(&ret, ii); err != nil { + return nil, err + } + default: + panic(fmt.Sprintf("unknown remaining value: %d", r)) + } + } +} + +func diffNodes(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + var err error + + // compare their full paths as strings + switch from.Compare(to) { + case -1: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = ii.nextFrom(); err != nil { + return err + } + case 1: + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextTo(); err != nil { + return err + } + default: + if err := diffNodesSameName(changes, ii); err != nil { + return err + } + } + + return nil +} + +func diffNodesSameName(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + + status, err := ii.compare() + if err != nil { + return err + } + + switch { + case status.sameHash: + // do nothing + if err = ii.nextBoth(); err != nil { + return err + } + case status.bothAreFiles: + changes.Add(NewModify(from, to)) + if err = ii.nextBoth(); err != nil { + return err + } + case status.fileAndDir: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case status.bothAreDirs: + if err = diffDirs(changes, ii); err != nil { + return err + } + default: + return fmt.Errorf("bad status from double iterator") + } + + return nil +} + +func diffDirs(changes *Changes, ii *doubleIter) error { + from := ii.from.current + to := ii.to.current + + status, err := ii.compare() + if err != nil { + return err + } + + switch { + case status.fromIsEmptyDir: + if err = changes.AddRecursiveInsert(to); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case status.toIsEmptyDir: + if err = changes.AddRecursiveDelete(from); err != nil { + return err + } + if err = ii.nextBoth(); err != nil { + return err + } + case !status.fromIsEmptyDir && !status.toIsEmptyDir: + // do nothing + if err = ii.stepBoth(); err != nil { + return err + } + default: + return fmt.Errorf("both dirs are empty but has different hash") + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go new file mode 100644 index 0000000000000000000000000000000000000000..5204024ad4f996a850c84fcce1095398814a4433 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go @@ -0,0 +1,34 @@ +/* +Package merkletrie provides support for n-ary trees that are at the same +time Merkle trees and Radix trees (tries). + +Git trees are Radix n-ary trees in virtue of the names of their +tree entries. At the same time, git trees are Merkle trees thanks to +their hashes. + +This package defines Merkle tries as nodes that should have: + +- a hash: the Merkle part of the Merkle trie + +- a key: the Radix part of the Merkle trie + +The Merkle hash condition is not enforced by this package though. This +means that the hash of a node doesn't have to take into account the hashes of +their children, which is good for testing purposes. + +Nodes in the Merkle trie are abstracted by the Noder interface. The +intended use is that git trees implements this interface, either +directly or using a simple wrapper. + +This package provides an iterator for merkletries that can skip whole +directory-like noders and an efficient merkletrie comparison algorithm. + +When comparing git trees, the simple approach of alphabetically sorting +their elements and comparing the resulting lists is too slow as it +depends linearly on the number of files in the trees: When a directory +has lots of files but none of them has been modified, this approach is +very expensive. We can do better by prunning whole directories that +have not change, just by looking at their hashes. This package provides +the tools to do exactly that. +*/ +package merkletrie diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go new file mode 100644 index 0000000000000000000000000000000000000000..e56dee701f2ae4c7d7a55315c4ea8a05d8620f6a --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go @@ -0,0 +1,187 @@ +package merkletrie + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// A doubleIter is a convenience type to keep track of the current +// noders in two merkletries that are going to be iterated in parallel. +// It has methods for: +// +// - iterating over the merkletries, both at the same time or +// individually: nextFrom, nextTo, nextBoth, stepBoth +// +// - checking if there are noders left in one or both of them with the +// remaining method and its associated returned type. +// +// - comparing the current noders of both merkletries in several ways, +// with the compare method and its associated returned type. +type doubleIter struct { + from struct { + iter *Iter + current noder.Path // nil if no more nodes + } + to struct { + iter *Iter + current noder.Path // nil if no more nodes + } + hashEqual noder.Equal +} + +// NewdoubleIter returns a new doubleIter for the merkletries "from" and +// "to". The hashEqual callback function will be used by the doubleIter +// to compare the hash of the noders in the merkletries. The doubleIter +// will be initialized to the first elements in each merkletrie if any. +func newDoubleIter(from, to noder.Noder, hashEqual noder.Equal) ( + *doubleIter, error) { + var ii doubleIter + var err error + + if ii.from.iter, err = NewIter(from); err != nil { + return nil, fmt.Errorf("from: %s", err) + } + if ii.from.current, err = ii.from.iter.Next(); turnEOFIntoNil(err) != nil { + return nil, fmt.Errorf("from: %s", err) + } + + if ii.to.iter, err = NewIter(to); err != nil { + return nil, fmt.Errorf("to: %s", err) + } + if ii.to.current, err = ii.to.iter.Next(); turnEOFIntoNil(err) != nil { + return nil, fmt.Errorf("to: %s", err) + } + + ii.hashEqual = hashEqual + + return &ii, nil +} + +func turnEOFIntoNil(e error) error { + if e != nil && e != io.EOF { + return e + } + return nil +} + +// NextBoth makes d advance to the next noder in both merkletries. If +// any of them is a directory, it skips its contents. +func (d *doubleIter) nextBoth() error { + if err := d.nextFrom(); err != nil { + return err + } + if err := d.nextTo(); err != nil { + return err + } + + return nil +} + +// NextFrom makes d advance to the next noder in the "from" merkletrie, +// skipping its contents if it is a directory. +func (d *doubleIter) nextFrom() (err error) { + d.from.current, err = d.from.iter.Next() + return turnEOFIntoNil(err) +} + +// NextTo makes d advance to the next noder in the "to" merkletrie, +// skipping its contents if it is a directory. +func (d *doubleIter) nextTo() (err error) { + d.to.current, err = d.to.iter.Next() + return turnEOFIntoNil(err) +} + +// StepBoth makes d advance to the next noder in both merkletries, +// getting deeper into directories if that is the case. +func (d *doubleIter) stepBoth() (err error) { + if d.from.current, err = d.from.iter.Step(); turnEOFIntoNil(err) != nil { + return err + } + if d.to.current, err = d.to.iter.Step(); turnEOFIntoNil(err) != nil { + return err + } + return nil +} + +// Remaining returns if there are no more noders in the tree, if both +// have noders or if one of them doesn't. +func (d *doubleIter) remaining() remaining { + if d.from.current == nil && d.to.current == nil { + return noMoreNoders + } + + if d.from.current == nil && d.to.current != nil { + return onlyToRemains + } + + if d.from.current != nil && d.to.current == nil { + return onlyFromRemains + } + + return bothHaveNodes +} + +// Remaining values tells you whether both trees still have noders, or +// only one of them or none of them. +type remaining int + +const ( + noMoreNoders remaining = iota + onlyToRemains + onlyFromRemains + bothHaveNodes +) + +// Compare returns the comparison between the current elements in the +// merkletries. +func (d *doubleIter) compare() (s comparison, err error) { + s.sameHash = d.hashEqual(d.from.current, d.to.current) + + fromIsDir := d.from.current.IsDir() + toIsDir := d.to.current.IsDir() + + s.bothAreDirs = fromIsDir && toIsDir + s.bothAreFiles = !fromIsDir && !toIsDir + s.fileAndDir = !s.bothAreDirs && !s.bothAreFiles + + fromNumChildren, err := d.from.current.NumChildren() + if err != nil { + return comparison{}, fmt.Errorf("from: %s", err) + } + + toNumChildren, err := d.to.current.NumChildren() + if err != nil { + return comparison{}, fmt.Errorf("to: %s", err) + } + + s.fromIsEmptyDir = fromIsDir && fromNumChildren == 0 + s.toIsEmptyDir = toIsDir && toNumChildren == 0 + + return +} + +// Answers to a lot of questions you can ask about how to noders are +// equal or different. +type comparison struct { + // the following are only valid if both nodes have the same name + // (i.e. nameComparison == 0) + + // Do both nodes have the same hash? + sameHash bool + // Are both nodes files? + bothAreFiles bool + + // the following are only valid if any of the noders are dirs, + // this is, if !bothAreFiles + + // Is one a file and the other a dir? + fileAndDir bool + // Are both nodes dirs? + bothAreDirs bool + // Is the from node an empty dir? + fromIsEmptyDir bool + // Is the to Node an empty dir? + toIsEmptyDir bool +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go new file mode 100644 index 0000000000000000000000000000000000000000..a0b042ee61d8ca843549d130717ca67ee2eec28b --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go @@ -0,0 +1,91 @@ +package frame + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// A Frame is a collection of siblings in a trie, sorted alphabetically +// by name. +type Frame struct { + // siblings, sorted in reverse alphabetical order by name + stack []noder.Noder +} + +type byName []noder.Noder + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + return strings.Compare(a[i].Name(), a[j].Name()) < 0 +} + +// New returns a frame with the children of the provided node. +func New(n noder.Noder) (*Frame, error) { + children, err := n.Children() + if err != nil { + return nil, err + } + + sort.Sort(sort.Reverse(byName(children))) + return &Frame{ + stack: children, + }, nil +} + +// String returns the quoted names of the noders in the frame sorted in +// alphabeticall order by name, surrounded by square brackets and +// separated by comas. +// +// Examples: +// [] +// ["a", "b"] +func (f *Frame) String() string { + var buf bytes.Buffer + _ = buf.WriteByte('[') + + sep := "" + for i := f.Len() - 1; i >= 0; i-- { + _, _ = buf.WriteString(sep) + sep = ", " + _, _ = buf.WriteString(fmt.Sprintf("%q", f.stack[i].Name())) + } + + _ = buf.WriteByte(']') + + return buf.String() +} + +// First returns, but dont extract, the noder with the alphabetically +// smaller name in the frame and true if the frame was not empy. +// Otherwise it returns nil and false. +func (f *Frame) First() (noder.Noder, bool) { + if f.Len() == 0 { + return nil, false + } + + top := f.Len() - 1 + + return f.stack[top], true +} + +// Drop extracts the noder with the alphabetically smaller name in the +// frame or does nothing if the frame was empty. +func (f *Frame) Drop() { + if f.Len() == 0 { + return + } + + top := f.Len() - 1 + f.stack[top] = nil + f.stack = f.stack[:top] +} + +// Len returns the number of noders in the frame. +func (f *Frame) Len() int { + return len(f.stack) +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go new file mode 100644 index 0000000000000000000000000000000000000000..c84f6fcc8a70525251cec18433ae27ae149ba3fe --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go @@ -0,0 +1,212 @@ +package merkletrie + +import ( + "fmt" + "io" + + "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame" + "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder" +) + +// Iter is an iterator for merkletries (only the trie part of the +// merkletrie is relevant here, it does not use the Hasher interface). +// +// The iteration is performed in depth-first pre-order. Entries at each +// depth are traversed in (case-sensitive) alphabetical order. +// +// This is the kind of traversal you will expect when listing ordinary +// files and directories recursively, for example: +// +// Trie Traversal order +// ---- --------------- +// . +// / | \ c +// / | \ d/ +// d c z ===> d/a +// / \ d/b +// b a z +// +// +// This iterator is somewhat especial as you can chose to skip whole +// "directories" when iterating: +// +// - The Step method will iterate normally. +// +// - the Next method will not descend deeper into the tree. +// +// For example, if the iterator is at `d/`, the Step method will return +// `d/a` while the Next would have returned `z` instead (skipping `d/` +// and its descendants). The name of the these two methods are based on +// the well known "next" and "step" operations, quite common in +// debuggers, like gdb. +// +// The paths returned by the iterator will be relative, if the iterator +// was created from a single node, or absolute, if the iterator was +// created from the path to the node (the path will be prefixed to all +// returned paths). +type Iter struct { + // Tells if the iteration has started. + hasStarted bool + // The top of this stack has the current node and its siblings. The + // rest of the stack keeps the ancestors of the current node and + // their corresponding siblings. The current element is always the + // top element of the top frame. + // + // When "step"ping into a node, its children are pushed as a new + // frame. + // + // When "next"ing pass a node, the current element is dropped by + // popping the top frame. + frameStack []*frame.Frame + // The base path used to turn the relative paths used internally by + // the iterator into absolute paths used by external applications. + // For relative iterator this will be nil. + base noder.Path +} + +// NewIter returns a new relative iterator using the provider noder as +// its unnamed root. When iterating, all returned paths will be +// relative to node. +func NewIter(n noder.Noder) (*Iter, error) { + return newIter(n, nil) +} + +// NewIterFromPath returns a new absolute iterator from the noder at the +// end of the path p. When iterating, all returned paths will be +// absolute, using the root of the path p as their root. +func NewIterFromPath(p noder.Path) (*Iter, error) { + return newIter(p, p) // Path implements Noder +} + +func newIter(root noder.Noder, base noder.Path) (*Iter, error) { + ret := &Iter{ + base: base, + } + + frame, err := frame.New(root) + if err != nil { + return nil, err + } + ret.push(frame) + + return ret, nil +} + +func (iter *Iter) top() (*frame.Frame, bool) { + if len(iter.frameStack) == 0 { + return nil, false + } + top := len(iter.frameStack) - 1 + + return iter.frameStack[top], true +} + +func (iter *Iter) push(f *frame.Frame) { + iter.frameStack = append(iter.frameStack, f) +} + +const ( + doDescend = true + dontDescend = false +) + +// Next returns the path of the next node without descending deeper into +// the trie and nil. If there are no more entries in the trie it +// returns nil and io.EOF. In case of error, it will return nil and the +// error. +func (iter *Iter) Next() (noder.Path, error) { + return iter.advance(dontDescend) +} + +// Step returns the path to the next node in the trie, descending deeper +// into it if needed, and nil. If there are no more nodes in the trie, +// it returns nil and io.EOF. In case of error, it will return nil and +// the error. +func (iter *Iter) Step() (noder.Path, error) { + return iter.advance(doDescend) +} + +// Advances the iterator in the desired direction: descend or +// dontDescend. +// +// Returns the new current element and a nil error on success. If there +// are no more elements in the trie below the base, it returns nil, and +// io.EOF. Returns nil and an error in case of errors. +func (iter *Iter) advance(wantDescend bool) (noder.Path, error) { + current, err := iter.current() + if err != nil { + return nil, err + } + + // The first time we just return the current node. + if !iter.hasStarted { + iter.hasStarted = true + return current, nil + } + + // Advances means getting a next current node, either its first child or + // its next sibling, depending if we must descend or not. + numChildren, err := current.NumChildren() + if err != nil { + return nil, err + } + + mustDescend := numChildren != 0 && wantDescend + if mustDescend { + // descend: add a new frame with the current's children. + frame, err := frame.New(current) + if err != nil { + return nil, err + } + iter.push(frame) + } else { + // don't descend: just drop the current node + iter.drop() + } + + return iter.current() +} + +// Returns the path to the current node, adding the base if there was +// one, and a nil error. If there were no noders left, it returns nil +// and io.EOF. If an error occurred, it returns nil and the error. +func (iter *Iter) current() (noder.Path, error) { + if topFrame, ok := iter.top(); !ok { + return nil, io.EOF + } else if _, ok := topFrame.First(); !ok { + return nil, io.EOF + } + + ret := make(noder.Path, 0, len(iter.base)+len(iter.frameStack)) + + // concat the base... + ret = append(ret, iter.base...) + // ... and the current node and all its ancestors + for i, f := range iter.frameStack { + t, ok := f.First() + if !ok { + panic(fmt.Sprintf("frame %d is empty", i)) + } + ret = append(ret, t) + } + + return ret, nil +} + +// removes the current node if any, and all the frames that become empty as a +// consecuence of this action. +func (iter *Iter) drop() { + frame, ok := iter.top() + if !ok { + return + } + + frame.Drop() + // if the frame is empty, remove it and its parent, recursively + if frame.Len() == 0 { + top := len(iter.frameStack) - 1 + iter.frameStack[top] = nil + iter.frameStack = iter.frameStack[:top] + iter.drop() + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go new file mode 100644 index 0000000000000000000000000000000000000000..d6b3de4adafa3ec873166cf8dd1186a53375bb03 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go @@ -0,0 +1,59 @@ +// Package noder provide an interface for defining nodes in a +// merkletrie, their hashes and their paths (a noders and its +// ancestors). +// +// The hasher interface is easy to implement naively by elements that +// already have a hash, like git blobs and trees. More sophisticated +// implementations can implement the Equal function in exotic ways +// though: for instance, comparing the modification time of directories +// in a filesystem. +package noder + +import "fmt" + +// Hasher interface is implemented by types that can tell you +// their hash. +type Hasher interface { + Hash() []byte +} + +// Equal functions take two hashers and return if they are equal. +// +// These functions are expected to be faster than reflect.Equal or +// reflect.DeepEqual because they can compare just the hash of the +// objects, instead of their contents, so they are expected to be O(1). +type Equal func(a, b Hasher) bool + +// The Noder interface is implemented by the elements of a Merkle Trie. +// +// There are two types of elements in a Merkle Trie: +// +// - file-like nodes: they cannot have children. +// +// - directory-like nodes: they can have 0 or more children and their +// hash is calculated by combining their children hashes. +type Noder interface { + Hasher + fmt.Stringer // for testing purposes + // Name returns the name of an element (relative, not its full + // path). + Name() string + // IsDir returns true if the element is a directory-like node or + // false if it is a file-like node. + IsDir() bool + // Children returns the children of the element. Note that empty + // directory-like noders and file-like noders will both return + // NoChildren. + Children() ([]Noder, error) + // NumChildren returns the number of children this element has. + // + // This method is an optimization: the number of children is easily + // calculated as the length of the value returned by the Children + // method (above); yet, some implementations will be able to + // implement NumChildren in O(1) while Children is usually more + // complex. + NumChildren() (int, error) +} + +// NoChildren represents the children of a noder without children. +var NoChildren = []Noder{} diff --git a/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go new file mode 100644 index 0000000000000000000000000000000000000000..85742dbe5f4d758b8c16ccd29ceb430401544616 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go @@ -0,0 +1,88 @@ +package noder + +import ( + "bytes" + "strings" +) + +// Path values represent a noder and its ancestors. The root goes first +// and the actual final noder the path is refering to will be the last. +// +// A path implements the Noder interface, redirecting all the interface +// calls to its final noder. +// +// Paths build from an empty Noder slice are not valid paths and should +// not be used. +type Path []Noder + +// String returns the full path of the final noder as a string, using +// "/" as the separator. +func (p Path) String() string { + var buf bytes.Buffer + sep := "" + for _, e := range p { + _, _ = buf.WriteString(sep) + sep = "/" + _, _ = buf.WriteString(e.Name()) + } + + return buf.String() +} + +// Last returns the final noder in the path. +func (p Path) Last() Noder { + return p[len(p)-1] +} + +// Hash returns the hash of the final noder of the path. +func (p Path) Hash() []byte { + return p.Last().Hash() +} + +// Name returns the name of the final noder of the path. +func (p Path) Name() string { + return p.Last().Name() +} + +// IsDir returns if the final noder of the path is a directory-like +// noder. +func (p Path) IsDir() bool { + return p.Last().IsDir() +} + +// Children returns the children of the final noder in the path. +func (p Path) Children() ([]Noder, error) { + return p.Last().Children() +} + +// NumChildren returns the number of children the final noder of the +// path has. +func (p Path) NumChildren() (int, error) { + return p.Last().NumChildren() +} + +// Compare returns -1, 0 or 1 if the path p is smaller, equal or bigger +// than other, in "directory order"; for example: +// +// "a" < "b" +// "a/b/c/d/z" < "b" +// "a/b/a" > "a/b" +func (p Path) Compare(other Path) int { + i := 0 + for { + switch { + case len(other) == len(p) && i == len(p): + return 0 + case i == len(other): + return 1 + case i == len(p): + return -1 + default: + cmp := strings.Compare(p[i].Name(), other[i].Name()) + if cmp != 0 { + return cmp + } + } + i++ + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree.go b/vendor/gopkg.in/src-d/go-git.v4/worktree.go new file mode 100644 index 0000000000000000000000000000000000000000..40ebe585dd4f9e92a53f642f9ade1d4e689f6d96 --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/worktree.go @@ -0,0 +1,438 @@ +package git + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + "gopkg.in/src-d/go-git.v4/config" + "gopkg.in/src-d/go-git.v4/plumbing" + "gopkg.in/src-d/go-git.v4/plumbing/filemode" + "gopkg.in/src-d/go-git.v4/plumbing/format/index" + "gopkg.in/src-d/go-git.v4/plumbing/object" + + "gopkg.in/src-d/go-billy.v2" +) + +var ErrWorktreeNotClean = errors.New("worktree is not clean") +var ErrSubmoduleNotFound = errors.New("submodule not found") + +type Worktree struct { + r *Repository + fs billy.Filesystem +} + +func (w *Worktree) Checkout(commit plumbing.Hash) error { + s, err := w.Status() + if err != nil { + return err + } + + if !s.IsClean() { + return ErrWorktreeNotClean + } + + c, err := w.r.CommitObject(commit) + if err != nil { + return err + } + + t, err := c.Tree() + if err != nil { + return err + } + + idx := &index.Index{Version: 2} + walker := object.NewTreeWalker(t, true) + + for { + name, entry, err := walker.Next() + if err == io.EOF { + break + } + + if err != nil { + return err + } + + if err := w.checkoutEntry(name, &entry, idx); err != nil { + return err + } + } + + return w.r.Storer.SetIndex(idx) +} + +func (w *Worktree) checkoutEntry(name string, e *object.TreeEntry, idx *index.Index) error { + if e.Mode == filemode.Submodule { + return w.addIndexFromTreeEntry(name, e, idx) + } + + if e.Mode == filemode.Dir { + return nil + } + + return w.checkoutFile(name, e, idx) +} + +func (w *Worktree) checkoutFile(name string, e *object.TreeEntry, idx *index.Index) error { + blob, err := object.GetBlob(w.r.Storer, e.Hash) + if err != nil { + return err + } + + from, err := blob.Reader() + if err != nil { + return err + } + defer from.Close() + + mode, err := e.Mode.ToOSFileMode() + if err != nil { + return err + } + + to, err := w.fs.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm()) + if err != nil { + return err + } + defer to.Close() + + if _, err := io.Copy(to, from); err != nil { + return err + } + + return w.addIndexFromFile(name, e, idx) +} + +var fillSystemInfo func(e *index.Entry, sys interface{}) + +func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *index.Index) error { + idx.Entries = append(idx.Entries, index.Entry{ + Hash: f.Hash, + Name: name, + Mode: filemode.Submodule, + }) + + return nil +} + +func (w *Worktree) addIndexFromFile(name string, f *object.TreeEntry, idx *index.Index) error { + fi, err := w.fs.Stat(name) + if err != nil { + return err + } + + mode, err := filemode.NewFromOSFileMode(fi.Mode()) + if err != nil { + return err + } + + e := index.Entry{ + Hash: f.Hash, + Name: name, + Mode: mode, + ModifiedAt: fi.ModTime(), + Size: uint32(fi.Size()), + } + + // if the FileInfo.Sys() comes from os the ctime, dev, inode, uid and gid + // can be retrieved, otherwise this doesn't apply + if fillSystemInfo != nil { + fillSystemInfo(&e, fi.Sys()) + } + + idx.Entries = append(idx.Entries, e) + return nil +} + +func (w *Worktree) Status() (Status, error) { + idx, err := w.r.Storer.Index() + if err != nil { + return nil, err + } + + files, err := readDirAll(w.fs) + if err != nil { + return nil, err + } + + s := make(Status, 0) + for _, e := range idx.Entries { + fi, ok := files[e.Name] + delete(files, e.Name) + + if !ok { + s.File(e.Name).Worktree = Deleted + continue + } + + status, err := w.compareFileWithEntry(fi, &e) + if err != nil { + return nil, err + } + + s.File(e.Name).Worktree = status + } + + for f := range files { + s.File(f).Worktree = Untracked + } + + return s, nil +} + +func (w *Worktree) compareFileWithEntry(fi billy.FileInfo, e *index.Entry) (StatusCode, error) { + if fi.Size() != int64(e.Size) { + return Modified, nil + } + + mode, err := filemode.NewFromOSFileMode(fi.Mode()) + if err != nil { + return Modified, err + } + + if mode != e.Mode { + return Modified, nil + } + + h, err := calcSHA1(w.fs, e.Name) + if h != e.Hash || err != nil { + return Modified, err + + } + + return Unmodified, nil +} + +const gitmodulesFile = ".gitmodules" + +// Submodule returns the submodule with the given name +func (w *Worktree) Submodule(name string) (*Submodule, error) { + l, err := w.Submodules() + if err != nil { + return nil, err + } + + for _, m := range l { + if m.Config().Name == name { + return m, nil + } + } + + return nil, ErrSubmoduleNotFound +} + +// Submodules returns all the available submodules +func (w *Worktree) Submodules() (Submodules, error) { + l := make(Submodules, 0) + m, err := w.readGitmodulesFile() + if err != nil || m == nil { + return l, err + } + + c, err := w.r.Config() + for _, s := range m.Submodules { + l = append(l, w.newSubmodule(s, c.Submodules[s.Name])) + } + + return l, nil +} + +func (w *Worktree) newSubmodule(fromModules, fromConfig *config.Submodule) *Submodule { + m := &Submodule{w: w} + m.initialized = fromConfig != nil + + if !m.initialized { + m.c = fromModules + return m + } + + m.c = fromConfig + m.c.Path = fromModules.Path + return m +} + +func (w *Worktree) readGitmodulesFile() (*config.Modules, error) { + f, err := w.fs.Open(gitmodulesFile) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + + return nil, err + } + + input, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + m := config.NewModules() + return m, m.Unmarshal(input) +} + +func (w *Worktree) readIndexEntry(path string) (index.Entry, error) { + var e index.Entry + + idx, err := w.r.Storer.Index() + if err != nil { + return e, err + } + + for _, e := range idx.Entries { + if e.Name == path { + return e, nil + } + } + + return e, fmt.Errorf("unable to find %q entry in the index", path) +} + +// Status current status of a Worktree +type Status map[string]*FileStatus + +func (s Status) File(filename string) *FileStatus { + if _, ok := (s)[filename]; !ok { + s[filename] = &FileStatus{} + } + + return s[filename] + +} + +func (s Status) IsClean() bool { + for _, status := range s { + if status.Worktree != Unmodified || status.Staging != Unmodified { + return false + } + } + + return true +} + +func (s Status) String() string { + var names []string + for name := range s { + names = append(names, name) + } + + var output string + for _, name := range names { + status := s[name] + if status.Staging == 0 && status.Worktree == 0 { + continue + } + + if status.Staging == Renamed { + name = fmt.Sprintf("%s -> %s", name, status.Extra) + } + + output += fmt.Sprintf("%s%s %s\n", status.Staging, status.Worktree, name) + } + + return output +} + +// FileStatus status of a file in the Worktree +type FileStatus struct { + Staging StatusCode + Worktree StatusCode + Extra string +} + +// StatusCode status code of a file in the Worktree +type StatusCode int8 + +const ( + Unmodified StatusCode = iota + Untracked + Modified + Added + Deleted + Renamed + Copied + UpdatedButUnmerged +) + +func (c StatusCode) String() string { + switch c { + case Unmodified: + return " " + case Modified: + return "M" + case Added: + return "A" + case Deleted: + return "D" + case Renamed: + return "R" + case Copied: + return "C" + case UpdatedButUnmerged: + return "U" + case Untracked: + return "?" + default: + return "-" + } +} + +func calcSHA1(fs billy.Filesystem, filename string) (plumbing.Hash, error) { + file, err := fs.Open(filename) + if err != nil { + return plumbing.ZeroHash, err + } + + stat, err := fs.Stat(filename) + if err != nil { + return plumbing.ZeroHash, err + } + + h := plumbing.NewHasher(plumbing.BlobObject, stat.Size()) + if _, err := io.Copy(h, file); err != nil { + return plumbing.ZeroHash, err + } + + return h.Sum(), nil +} + +func readDirAll(filesystem billy.Filesystem) (map[string]billy.FileInfo, error) { + all := make(map[string]billy.FileInfo, 0) + return all, doReadDirAll(filesystem, "", all) +} + +func doReadDirAll(fs billy.Filesystem, path string, files map[string]billy.FileInfo) error { + if path == defaultDotGitPath { + return nil + } + + l, err := fs.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + return nil + } + + return err + } + + for _, info := range l { + file := fs.Join(path, info.Name()) + if file == defaultDotGitPath { + continue + } + + if !info.IsDir() { + files[file] = info + continue + } + + if err := doReadDirAll(fs, file, files); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_darwin.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_darwin.go new file mode 100644 index 0000000000000000000000000000000000000000..8eaffde8af65da8866c020aecee2a2a824d22e2e --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/worktree_darwin.go @@ -0,0 +1,22 @@ +// +build darwin freebsd netbsd openbsd + +package git + +import ( + "syscall" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} diff --git a/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go b/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go new file mode 100644 index 0000000000000000000000000000000000000000..a33cd2fb9625ca819ee5a788903ed02512090dab --- /dev/null +++ b/vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go @@ -0,0 +1,22 @@ +// +build linux + +package git + +import ( + "syscall" + "time" + + "gopkg.in/src-d/go-git.v4/plumbing/format/index" +) + +func init() { + fillSystemInfo = func(e *index.Entry, sys interface{}) { + if os, ok := sys.(*syscall.Stat_t); ok { + e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec)) + e.Dev = uint32(os.Dev) + e.Inode = uint32(os.Ino) + e.GID = os.Gid + e.UID = os.Uid + } + } +} diff --git a/vendor/gopkg.in/warnings.v0/LICENSE b/vendor/gopkg.in/warnings.v0/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..d65f7e9d8cd6f1fda500b512fa6fcd14c2d8cefc --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2016 Péter Surányi. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/warnings.v0/README b/vendor/gopkg.in/warnings.v0/README new file mode 100644 index 0000000000000000000000000000000000000000..77f46dd2c691bd2359dde22218620368feac01c5 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/README @@ -0,0 +1,74 @@ +Package warnings implements error handling with non-fatal errors (warnings). + +import path: "gopkg.in/warnings.v0" +package docs: https://godoc.org/gopkg.in/warnings.v0 +issues: https://github.com/go-warnings/warnings/issues +pull requests: https://github.com/go-warnings/warnings/pulls + +A recurring pattern in Go programming is the following: + + func myfunc(params) error { + if err := doSomething(...); err != nil { + return err + } + if err := doSomethingElse(...); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + return errors.New("my error") + } + ... + return nil + } + +This pattern allows interrupting the flow on any received error. But what if +there are errors that should be noted but still not fatal, for which the flow +should not be interrupted? Implementing such logic at each if statement would +make the code complex and the flow much harder to follow. + +Package warnings provides the Collector type and a clean and simple pattern +for achieving such logic. The Collector takes care of deciding when to break +the flow and when to continue, collecting any non-fatal errors (warnings) +along the way. The only requirement is that fatal and non-fatal errors can be +distinguished programmatically; that is a function such as + + IsFatal(error) bool + +must be implemented. The following is an example of what the above snippet +could look like using the warnings package: + + import "gopkg.in/warnings.v0" + + func isFatal(err error) bool { + _, ok := err.(WarningType) + return !ok + } + + func myfunc(params) error { + c := warnings.NewCollector(isFatal) + c.FatalWithWarnings = true + if err := c.Collect(doSomething()); err != nil { + return err + } + if err := c.Collect(doSomethingElse(...)); err != nil { + return err + } + if ok := doAnotherThing(...); !ok { + if err := c.Collect(errors.New("my error")); err != nil { + return err + } + } + ... + return c.Done() + } + +Rules for using warnings + + - ensure that warnings are programmatically distinguishable from fatal + errors (i.e. implement an isFatal function and any necessary error types) + - ensure that there is a single Collector instance for a call of each + exported function + - ensure that all errors (fatal or warning) are fed through Collect + - ensure that every time an error is returned, it is one returned by a + Collector (from Collect or Done) + - ensure that Collect is never called after Done diff --git a/vendor/gopkg.in/warnings.v0/warnings.go b/vendor/gopkg.in/warnings.v0/warnings.go new file mode 100644 index 0000000000000000000000000000000000000000..5b57f4353c9dfde946be893c3b870259c5192087 --- /dev/null +++ b/vendor/gopkg.in/warnings.v0/warnings.go @@ -0,0 +1,191 @@ +// Package warnings implements error handling with non-fatal errors (warnings). +// +// A recurring pattern in Go programming is the following: +// +// func myfunc(params) error { +// if err := doSomething(...); err != nil { +// return err +// } +// if err := doSomethingElse(...); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// return errors.New("my error") +// } +// ... +// return nil +// } +// +// This pattern allows interrupting the flow on any received error. But what if +// there are errors that should be noted but still not fatal, for which the flow +// should not be interrupted? Implementing such logic at each if statement would +// make the code complex and the flow much harder to follow. +// +// Package warnings provides the Collector type and a clean and simple pattern +// for achieving such logic. The Collector takes care of deciding when to break +// the flow and when to continue, collecting any non-fatal errors (warnings) +// along the way. The only requirement is that fatal and non-fatal errors can be +// distinguished programmatically; that is a function such as +// +// IsFatal(error) bool +// +// must be implemented. The following is an example of what the above snippet +// could look like using the warnings package: +// +// import "gopkg.in/warnings.v0" +// +// func isFatal(err error) bool { +// _, ok := err.(WarningType) +// return !ok +// } +// +// func myfunc(params) error { +// c := warnings.NewCollector(isFatal) +// c.FatalWithWarnings = true +// if err := c.Collect(doSomething()); err != nil { +// return err +// } +// if err := c.Collect(doSomethingElse(...)); err != nil { +// return err +// } +// if ok := doAnotherThing(...); !ok { +// if err := c.Collect(errors.New("my error")); err != nil { +// return err +// } +// } +// ... +// return c.Done() +// } +// +// Rules for using warnings +// +// - ensure that warnings are programmatically distinguishable from fatal +// errors (i.e. implement an isFatal function and any necessary error types) +// - ensure that there is a single Collector instance for a call of each +// exported function +// - ensure that all errors (fatal or warning) are fed through Collect +// - ensure that every time an error is returned, it is one returned by a +// Collector (from Collect or Done) +// - ensure that Collect is never called after Done +// +// TODO +// +// - optionally limit the number of warnings (e.g. stop after 20 warnings) (?) +// - consider interaction with contexts +// - go vet-style invocations verifier +// - semi-automatic code converter +// +package warnings // import "gopkg.in/warnings.v0" + +import ( + "bytes" + "fmt" +) + +// List holds a collection of warnings and optionally one fatal error. +type List struct { + Warnings []error + Fatal error +} + +// Error implements the error interface. +func (l List) Error() string { + b := bytes.NewBuffer(nil) + if l.Fatal != nil { + fmt.Fprintln(b, "fatal:") + fmt.Fprintln(b, l.Fatal) + } + switch len(l.Warnings) { + case 0: + // nop + case 1: + fmt.Fprintln(b, "warning:") + default: + fmt.Fprintln(b, "warnings:") + } + for _, err := range l.Warnings { + fmt.Fprintln(b, err) + } + return b.String() +} + +// A Collector collects errors up to the first fatal error. +type Collector struct { + // IsFatal distinguishes between warnings and fatal errors. + IsFatal func(error) bool + // FatalWithWarnings set to true means that a fatal error is returned as + // a List together with all warnings so far. The default behavior is to + // only return the fatal error and discard any warnings that have been + // collected. + FatalWithWarnings bool + + l List + done bool +} + +// NewCollector returns a new Collector; it uses isFatal to distinguish between +// warnings and fatal errors. +func NewCollector(isFatal func(error) bool) *Collector { + return &Collector{IsFatal: isFatal} +} + +// Collect collects a single error (warning or fatal). It returns nil if +// collection can continue (only warnings so far), or otherwise the errors +// collected. Collect mustn't be called after the first fatal error or after +// Done has been called. +func (c *Collector) Collect(err error) error { + if c.done { + panic("warnings.Collector already done") + } + if err == nil { + return nil + } + if c.IsFatal(err) { + c.done = true + c.l.Fatal = err + } else { + c.l.Warnings = append(c.l.Warnings, err) + } + if c.l.Fatal != nil { + return c.erorr() + } + return nil +} + +// Done ends collection and returns the collected error(s). +func (c *Collector) Done() error { + c.done = true + return c.erorr() +} + +func (c *Collector) erorr() error { + if !c.FatalWithWarnings && c.l.Fatal != nil { + return c.l.Fatal + } + if c.l.Fatal == nil && len(c.l.Warnings) == 0 { + return nil + } + // Note that a single warning is also returned as a List. This is to make it + // easier to determine fatal-ness of the returned error. + return c.l +} + +// FatalOnly returns the fatal error, if any, **in an error returned by a +// Collector**. It returns nil if and only if err is nil or err is a List +// with err.Fatal == nil. +func FatalOnly(err error) error { + l, ok := err.(List) + if !ok { + return err + } + return l.Fatal +} + +// WarningsOnly returns the warnings **in an error returned by a Collector**. +func WarningsOnly(err error) []error { + l, ok := err.(List) + if !ok { + return nil + } + return l.Warnings +} diff --git a/vendor/vendor.json b/vendor/vendor.json new file mode 100644 index 0000000000000000000000000000000000000000..3938979b084c3ce2a40fa48e49eab0501135a3b4 --- /dev/null +++ b/vendor/vendor.json @@ -0,0 +1,418 @@ +{ + "comment": "", + "ignore": "test", + "package": [ + { + "checksumSHA1": "ZfDLreBle3gvVHKoIZl4/ArtWzk=", + "path": "github.com/aws/aws-sdk-go/aws", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", + "path": "github.com/aws/aws-sdk-go/aws/awserr", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "yyYr41HZ1Aq0hWc3J5ijXwYEcac=", + "path": "github.com/aws/aws-sdk-go/aws/awsutil", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", + "path": "github.com/aws/aws-sdk-go/aws/client/metadata", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "zu5C95rmCZff6NYZb62lEaT5ibE=", + "path": "github.com/aws/aws-sdk-go/aws/credentials", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "Y/H3JXynvwx55rAbQg6g2hCouB8=", + "path": "github.com/aws/aws-sdk-go/aws/endpoints", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "M78rTxU55Qagqr3MYj91im2031E=", + "path": "github.com/aws/aws-sdk-go/aws/request", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "0FvPLvkBUpTElfUc/FZtPsJfuV0=", + "path": "github.com/aws/aws-sdk-go/aws/signer/v4", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "ZyqvUCRrfCAGOliK3+iKwU1PIos=", + "path": "github.com/aws/aws-sdk-go/private/protocol/rest", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "+CTfnrGIUEEqOB4I+/x0vBrBLts=", + "path": "github.com/deoxxa/aws_signing_client", + "revision": "c20ee106809eacdffcc81ac7cb984261f8e3067e", + "revisionTime": "2016-11-09T13:10:55Z" + }, + { + "checksumSHA1": "VvZKmbuBN1QAG699KduTdmSPwA4=", + "origin": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", + "path": "github.com/go-ini/ini", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=", + "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", + "path": "github.com/jmespath/go-jmespath", + "revision": "2b26ad567f305510849d93c5d2025a8b561f2367", + "revisionTime": "2017-03-15T18:41:46Z" + }, + { + "checksumSHA1": "hcIgD9IEGtHu8o4NibzTl4qMtMU=", + "path": "github.com/lupine/icu", + "revision": "03c771153cfff2627e0e7c8326f6b3c6891b6acf", + "revisionTime": "2017-03-29T16:09:13Z" + }, + { + "checksumSHA1": "iWCtyR1TkJ22Bi/ygzfKDvOQdQY=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/github.com/sergi/go-diff/diffmatchpatch", + "path": "github.com/sergi/go-diff/diffmatchpatch", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "8QeSG127zQqbA+YfkO1WkKx/iUI=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/github.com/src-d/gcfg", + "path": "github.com/src-d/gcfg", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "yf5NBT8BofPfGYCXoLnj7BIA1wo=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/github.com/src-d/gcfg/scanner", + "path": "github.com/src-d/gcfg/scanner", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "C5Z8YVyNTuvupM9AUr9KbPlps4Q=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/github.com/src-d/gcfg/token", + "path": "github.com/src-d/gcfg/token", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "mDkN3UpR7auuFbwUuIwExz4DZgY=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/github.com/src-d/gcfg/types", + "path": "github.com/src-d/gcfg/types", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "C1KKOxFoW7/W/NFNpiXK+boguNo=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/golang.org/x/crypto/curve25519", + "path": "golang.org/x/crypto/curve25519", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "wGb//LjBPNxYHqk+dcLo7BjPXK8=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/golang.org/x/crypto/ed25519", + "path": "golang.org/x/crypto/ed25519", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "LXFcVx8I587SnWmKycSDEq9yvK8=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/golang.org/x/crypto/ed25519/internal/edwards25519", + "path": "golang.org/x/crypto/ed25519/internal/edwards25519", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "fsrFs762jlaILyqqQImS1GfvIvw=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/golang.org/x/crypto/ssh", + "path": "golang.org/x/crypto/ssh", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "SJ3Ma3Ozavxpbh1usZWBCnzMKIc=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/golang.org/x/crypto/ssh/agent", + "path": "golang.org/x/crypto/ssh/agent", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "Y+HGqEkYM15ir+J93MEaHdyFy0c=", + "path": "golang.org/x/net/context", + "revision": "a6577fac2d73be281a500b310739095313165611", + "revisionTime": "2017-03-08T20:54:49Z" + }, + { + "checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=", + "path": "golang.org/x/net/context/ctxhttp", + "revision": "a6577fac2d73be281a500b310739095313165611", + "revisionTime": "2017-03-08T20:54:49Z" + }, + { + "checksumSHA1": "++OrH7Epi89IOXHT7Cwhq5y9ZBo=", + "path": "gopkg.in/olivere/elastic.v5", + "revision": "8c148e9a5e627a3c155c6980fb965fe7813d78b8", + "revisionTime": "2017-03-16T11:04:01Z" + }, + { + "checksumSHA1": "pCApZfp2CeDnWbNVsyzNO62ZUXA=", + "path": "gopkg.in/olivere/elastic.v5/uritemplates", + "revision": "8c148e9a5e627a3c155c6980fb965fe7813d78b8", + "revisionTime": "2017-03-16T11:04:01Z" + }, + { + "checksumSHA1": "TrrpN8Ra20NKSET2JN1jHTJPVKI=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/gopkg.in/src-d/go-billy.v2", + "path": "gopkg.in/src-d/go-billy.v2", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "ulk0E0hadjYY051SDWIFUU7CmJU=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/gopkg.in/src-d/go-billy.v2/osfs", + "path": "gopkg.in/src-d/go-billy.v2/osfs", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "pBrkx5lQtbYkQKDP4XuZ8YKuhWY=", + "path": "gopkg.in/src-d/go-git.v4", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "s8gwNiC3stn7E+7r+o/YbJCzTg0=", + "path": "gopkg.in/src-d/go-git.v4/config", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "A3WduoxOIVoBnsDAEZtI798CZtU=", + "path": "gopkg.in/src-d/go-git.v4/internal/revision", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "gnDMY5QTenyeb4Z/wsbKdCVk8AU=", + "path": "gopkg.in/src-d/go-git.v4/plumbing", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "ccn330T2+i5MJIMFNWSld0JED2I=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/cache", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "pHPMiAzXG/TJqTLEKj2SHjxX4zs=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/filemode", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "SfXSsfv1ji5LFY8GdLLleOfWPd4=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/config", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "YoinR3unctE6LY/W74CqWnVCwp4=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/idxfile", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "I81+nhcMO6z6DWvV+oMSZR9ZRho=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/index", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "RYeQffgqVS4Kbbk2YVcaPadXCiI=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/objfile", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "d8vqfvAF30/uHLKL+9zM6S/XYjA=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/packfile", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "rA6jJ2fxdcALXL7EaP8Ja37xXjw=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/format/pktline", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "0PaLECZE+WS9F7XZqzV+wJIqbtQ=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/object", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "XBymyCj04MIi6oPrw+NXiMX95qI=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "FNapUewRlNHGOyob9Rzl44HvQB8=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "wVfbzV5BNhjW/HFFJuTCjkPSJ5M=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "lcZUgalumxWfUjijwHde2Ox1Xcg=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/revlist", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "ysDmbzNW3RSfgjlUCGN48GXx6Ok=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/storer", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "3O9OpI4U1fhkFdcNPjQ0vjYObUs=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "UF+MxBfonAjr2zn35ZZhFfL/dz4=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/client", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "eWYSaNrdtP6H85O1Qa62+J++oO8=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/file", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "F0xBNcEflhHlwUctYJVj6AhZ+kk=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/git", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "LXB3DF29p4tYagVyJYmTSKwElQg=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/http", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "pLQiuOmKInVwJIOml37Bt9e3YYA=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "PKt08BrFTrleyYIqCtXM33Vbz6A=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/server", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "FoyQq1pQUXCHpfNLfSbAEklCPkA=", + "path": "gopkg.in/src-d/go-git.v4/plumbing/transport/ssh", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "3n+s0+8HMD0++79ZUYOE2lyCQWY=", + "path": "gopkg.in/src-d/go-git.v4/storage", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "dCejhXWbG1RWI/wMENAtAQ9BIlI=", + "path": "gopkg.in/src-d/go-git.v4/storage/filesystem", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "GEOoF+Zk7c02wI9mnoctaKqkF34=", + "path": "gopkg.in/src-d/go-git.v4/storage/filesystem/internal/dotgit", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "OfzMDAIu253dJ9591gd3w/APq0I=", + "path": "gopkg.in/src-d/go-git.v4/storage/memory", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "8WNcGWxmpmcQTxa+LT7lghx3UNY=", + "path": "gopkg.in/src-d/go-git.v4/utils/binary", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "vniUxB6bbDYazl21cOfmhdZZiY8=", + "path": "gopkg.in/src-d/go-git.v4/utils/diff", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "nP0yUyAxESlNgNSZm7lM/VSGpqM=", + "path": "gopkg.in/src-d/go-git.v4/utils/ioutil", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "+5CJIylVMH9B9Y34yeU81gAHxFQ=", + "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "7eEw/xsSrFLfSppRf/JIt9u7lbU=", + "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "f3kxzpAxqnc5ETJNuQmbaaLRpQU=", + "path": "gopkg.in/src-d/go-git.v4/utils/merkletrie/noder", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + }, + { + "checksumSHA1": "ALQFMX3kmJkyB/8VEzVnGZG1H4A=", + "origin": "gopkg.in/src-d/go-git.v4/vendor/gopkg.in/warnings.v0", + "path": "gopkg.in/warnings.v0", + "revision": "36c78b9d1b1eea682703fb1cbb0f4f3144354389", + "revisionTime": "2017-03-28T03:53:33Z" + } + ], + "rootPath": "gitlab.com/gitlab-org/es-git-go" +}