Skip to content
Snippets Groups Projects

Initial implementation of an elasticsearch indexer in Go

Merged Nick Thomas requested to merge 1-initial-implementation into master
All threads resolved!
1 file
+ 100
102
Compare changes
  • Side-by-side
  • Inline
+ 100
102
@@ -7,110 +7,108 @@ import (
var indexMapping = `
{
"settings": {
"index": {
"analysis": {
"filter": {
"my_stemmer": {
"name": "light_english",
"type": "stemmer"
},
"code": {
"type": "pattern_capture",
"preserve_original": "1",
"patterns": [
"(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)",
"(\\d+)"
]
}
},
"char_filter": {
"code_mapping": {
"type": "mapping",
"mappings": [
". => ' '"
]
}
},
"analyzer": {
"default": {
"filter": [
"standard",
"lowercase",
"my_stemmer"
],
"tokenizer": "standard"
},
"code_search_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"char_filter": [
"code_mapping"
],
"type": "custom",
"tokenizer": "standard"
},
"path_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "path_tokenizer"
},
"sha_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "sha_tokenizer"
},
"code_analyzer": {
"filter": [
"code",
"lowercase",
"asciifolding"
],
"char_filter": [
"code_mapping"
],
"type": "custom",
"tokenizer": "standard"
},
"my_ngram_analyzer": {
"filter": [
"lowercase"
],
"tokenizer": "my_ngram_tokenizer"
}
},
"tokenizer": {
"my_ngram_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "2",
"type": "nGram",
"max_gram": "3"
},
"sha_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "5",
"type": "edgeNGram",
"max_gram": "40"
},
"path_tokenizer": {
"reverse": "true",
"type": "path_hierarchy"
}
"analysis": {
"filter": {
"my_stemmer": {
"name": "light_english",
"type": "stemmer"
},
"code": {
"type": "pattern_capture",
"preserve_original": "1",
"patterns": [
"(\\p{Ll}+|\\p{Lu}\\p{Ll}+|\\p{Lu}+)",
"(\\d+)"
]
}
},
"char_filter": {
"code_mapping": {
"type": "mapping",
"mappings": [
". => ' '"
]
}
},
"analyzer": {
"default": {
"filter": [
"standard",
"lowercase",
"my_stemmer"
],
"tokenizer": "standard"
},
"code_search_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"char_filter": [
"code_mapping"
],
"type": "custom",
"tokenizer": "standard"
},
"path_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "path_tokenizer"
},
"sha_analyzer": {
"filter": [
"lowercase",
"asciifolding"
],
"type": "custom",
"tokenizer": "sha_tokenizer"
},
"code_analyzer": {
"filter": [
"code",
"lowercase",
"asciifolding"
],
"char_filter": [
"code_mapping"
],
"type": "custom",
"tokenizer": "standard"
},
"my_ngram_analyzer": {
"filter": [
"lowercase"
],
"tokenizer": "my_ngram_tokenizer"
}
},
"tokenizer": {
"my_ngram_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "2",
"type": "nGram",
"max_gram": "3"
},
"sha_tokenizer": {
"token_chars": [
"letter",
"digit"
],
"min_gram": "5",
"type": "edgeNGram",
"max_gram": "40"
},
"path_tokenizer": {
"reverse": "true",
"type": "path_hierarchy"
}
}
}
},
"mappings": {
Loading