├── README.md ├── main.go └── release.sh /README.md: -------------------------------------------------------------------------------- 1 | # Elasticsearch Dumper 2 | 3 | ## EXAMPLE: 4 | ```elasticsearch-dumper -s http://source:9200 -d http://destination:9200 -i index1,index2``` 5 | 6 | ## INSTALL: 7 | 1. ```go get github.com/hoffoo/elasticsearch-dump``` 8 | 2. or download a prebuilt binary here: https://github.com/hoffoo/elasticsearch-dump/releases/ 9 | 10 | 11 | ``` 12 | Application Options: 13 | -s, --source= source elasticsearch instance 14 | -d, --dest= destination elasticsearch instance 15 | -c, --count= number of documents at a time: ie "size" in the scroll request (100) 16 | -t, --time= scroll time (1m) 17 | -f, --force delete destination index before copying (false) 18 | --shards= set a number of shards on newly created indexes 19 | --docs-only load documents only, do not try to recreate indexes (false) 20 | --index-only only create indexes, do not load documents (false) 21 | --replicate enable replication while indexing into the new indexes (false) 22 | -i, --indexes= list of indexes to copy, comma separated (_all) 23 | -a, --all copy indexes starting with . and _ (false) 24 | -w, --workers= concurrency (1) 25 | --settings copy sharding settings from source (true) 26 | --green wait for both hosts cluster status to be green before dump. otherwise yellow is okay (false) 27 | ``` 28 | 29 | 30 | ## NOTES: 31 | 32 | 1. Has been tested getting data from 0.9 onto a 1.4 box. For other scenaries YMMV. (look out for this bug: https://github.com/elasticsearch/elasticsearch/issues/5165) 33 | 1. Copies using the [_source](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-source-field.html) field in elasticsearch. If you have made modifications to it (excluding fields, etc) they will not be indexed on the destination host. 34 | 1. ```--force``` will delete indexes on the destination host. Otherwise an error will be returned if the index exists 35 | 1. ```--time``` is the [scroll time](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html#scroll-search-context) passed to the source host, default is 1m. This is a string in es's format. 36 | 1. ```--count``` is the [number of documents](http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html#scroll-scan) that will be request and bulk indexed at a time. Note that this depends on the number of shards (ie: size of 10 on 5 shards is 50 documents) 37 | 1. ```--indexes``` is a comma separated list of indexes to copy 38 | 1. ```--all``` indexes starting with . and _ are ignored by default, --all overrides this behavior 39 | 1. ```--workers``` concurrency when we post to the bulk api. Only one post happens at a time, but higher concurrency should give you more throughput when using larger scroll sizes. 40 | 1. Ports are required, otherwise 80 is the assumed port (what) 41 | 42 | ## BUGS: 43 | 44 | 1. It will not do anything special when copying the _id (copies _id from source host). If _id is remapped it may not do what you want. 45 | 1. Should assume a default port of 9200 46 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "runtime" 10 | "strings" 11 | "sync" 12 | "time" 13 | 14 | pb "github.com/cheggaaa/pb" 15 | goflags "github.com/jessevdk/go-flags" 16 | ) 17 | 18 | type Indexes map[string]interface{} 19 | 20 | type Document struct { 21 | Index string `json:"_index"` 22 | Type string `json:"_type"` 23 | Id string `json:"_id"` 24 | source map[string]interface{} `json:"_source"` 25 | } 26 | 27 | type Scroll struct { 28 | ScrollId string `json:"_scroll_id"` 29 | TimedOut bool `json:"timed_out"` 30 | Hits struct { 31 | Total int `json:"total"` 32 | Docs []interface{} `json:"hits"` 33 | } `json:"hits"` 34 | Shards struct { 35 | Failures []struct { 36 | Status int `json:"status"` 37 | Reason string `json:"reason"` 38 | } `json:"failures"` 39 | } `json:"_shards"` 40 | } 41 | 42 | type ClusterHealth struct { 43 | Name string `json:"cluster_name"` 44 | Status string `json:"status"` 45 | } 46 | 47 | type Config struct { 48 | FlushLock sync.Mutex 49 | DocChan chan map[string]interface{} 50 | ErrChan chan error 51 | Uid string // es scroll uid 52 | 53 | // config options 54 | SrcEs string `short:"s" long:"source" description:"source elasticsearch instance" required:"true"` 55 | DstEs string `short:"d" long:"dest" description:"destination elasticsearch instance" required:"true"` 56 | DocBufferCount int `short:"c" long:"count" description:"number of documents at a time: ie \"size\" in the scroll request" default:"100"` 57 | ScrollTime string `short:"t" long:"time" description:"scroll time" default:"1m"` 58 | Destructive bool `short:"f" long:"force" description:"delete destination index before copying" default:"false"` 59 | ShardsCount int `long:"shards" description:"set a number of shards on newly created indexes"` 60 | DocsOnly bool `long:"docs-only" description:"load documents only, do not try to recreate indexes" default:"false"` 61 | CreateIndexesOnly bool `long:"index-only" description:"only create indexes, do not load documents" default:"false"` 62 | EnableReplication bool `long:"replicate" description:"enable replication while indexing into the new indexes" default:"false"` 63 | IndexNames string `short:"i" long:"indexes" description:"list of indexes to copy, comma separated" default:"_all"` 64 | CopyAllIndexes bool `short:"a" long:"all" description:"copy indexes starting with . and _" default:"false"` 65 | Workers int `short:"w" long:"workers" description:"concurrency" default:"1"` 66 | CopySettings bool `long:"settings" description:"copy sharding settings from source" default:"true"` 67 | WaitForGreen bool `long:"green" description:"wait for both hosts cluster status to be green before dump. otherwise yellow is okay" default:"false"` 68 | } 69 | 70 | func main() { 71 | 72 | runtime.GOMAXPROCS(runtime.NumCPU()) 73 | 74 | c := Config{ 75 | FlushLock: sync.Mutex{}, 76 | ErrChan: make(chan error), 77 | } 78 | 79 | // parse args 80 | _, err := goflags.Parse(&c) 81 | if err != nil { 82 | fmt.Println(err) 83 | return 84 | } 85 | 86 | // enough of a buffer to hold all the search results across all workers 87 | c.DocChan = make(chan map[string]interface{}, c.DocBufferCount*c.Workers) 88 | 89 | // get all indexes from source 90 | idxs := Indexes{} 91 | if err := c.GetIndexes(c.SrcEs, &idxs); err != nil { 92 | fmt.Println(err) 93 | return 94 | } 95 | 96 | // copy index settings if user asked 97 | if c.ShardsCount > 0 { 98 | for name, _ := range idxs { 99 | idxs.SetShardCount(name, fmt.Sprint(c.ShardsCount)) 100 | } 101 | } else if c.CopySettings == true { 102 | if err := c.CopyShardingSettings(&idxs); err != nil { 103 | fmt.Println(err) 104 | return 105 | } 106 | } 107 | 108 | // disable replication 109 | if c.EnableReplication == false { 110 | idxs.DisableReplication() 111 | } 112 | 113 | if c.DocsOnly == false { 114 | // delete remote indexes if user asked 115 | if c.Destructive == true { 116 | if err := c.DeleteIndexes(&idxs); err != nil { 117 | fmt.Println(err) 118 | return 119 | } 120 | } 121 | 122 | // create indexes on DstEs 123 | if err := c.CreateIndexes(&idxs); err != nil { 124 | fmt.Println(err) 125 | return 126 | } 127 | } 128 | 129 | // if we only want to create indexes, we are done here, return 130 | if c.CreateIndexesOnly { 131 | fmt.Println("Indexes created, done") 132 | return 133 | } 134 | 135 | // wait for cluster state to be okay before dumping 136 | timer := time.NewTimer(time.Second * 3) 137 | for { 138 | if status, ready := c.ClusterReady(c.SrcEs); !ready { 139 | fmt.Printf("%s at %s is %s, delaying dump\n", status.Name, c.SrcEs, status.Status) 140 | <-timer.C 141 | continue 142 | } 143 | if status, ready := c.ClusterReady(c.DstEs); !ready { 144 | fmt.Printf("%s at %s is %s, delaying dump\n", status.Name, c.DstEs, status.Status) 145 | <-timer.C 146 | continue 147 | } 148 | 149 | timer.Stop() 150 | break 151 | } 152 | fmt.Println("starting dump..") 153 | 154 | // start scroll 155 | scroll, err := c.NewScroll() 156 | if err != nil { 157 | fmt.Println(err) 158 | return 159 | } 160 | 161 | // create a progressbar and start a docCount 162 | bar := pb.StartNew(scroll.Hits.Total) 163 | var docCount int 164 | 165 | wg := sync.WaitGroup{} 166 | wg.Add(c.Workers) 167 | for i := 0; i < c.Workers; i++ { 168 | go c.NewWorker(&docCount, bar, &wg) 169 | } 170 | 171 | // print errors 172 | go func() { 173 | for { 174 | err := <-c.ErrChan 175 | fmt.Println(err) 176 | } 177 | }() 178 | 179 | // loop scrolling until done 180 | for scroll.Next(&c) == false { 181 | } 182 | 183 | // finished, close doc chan and wait for goroutines to be done 184 | close(c.DocChan) 185 | wg.Wait() 186 | bar.FinishPrint(fmt.Sprintln("Indexed", docCount, "documents")) 187 | } 188 | 189 | // Stream from source es instance. "done" is an indicator that the stream is 190 | // over 191 | func (s *Scroll) Next(c *Config) (done bool) { 192 | 193 | // curl -XGET 'http://es-0.9:9200/_search/scroll?scroll=5m' 194 | id := bytes.NewBufferString(s.ScrollId) 195 | 196 | req, err := http.NewRequest("GET", fmt.Sprintf("%s/_search/scroll?scroll=%s", c.SrcEs, c.ScrollTime), id) 197 | if err != nil { 198 | c.ErrChan <- err 199 | } 200 | resp, err := http.DefaultClient.Do(req) 201 | if err != nil { 202 | c.ErrChan <- err 203 | } 204 | defer resp.Body.Close() 205 | 206 | // decode elasticsearch scroll response 207 | dec := json.NewDecoder(resp.Body) 208 | scroll := &Scroll{} 209 | err = dec.Decode(&scroll) 210 | if err != nil { 211 | c.ErrChan <- err 212 | return 213 | } 214 | 215 | // XXX this might be bad, but assume we are done 216 | /* 217 | switch resp.StatusCode { 218 | case 200: 219 | break 220 | case 404: 221 | // this may indicate bug 222 | c.ErrChan <- fmt.Errorf("looks like we dumped all we could...") 223 | default: 224 | c.ErrChan <- fmt.Errorf("scroll response: %s", stream) 225 | // flush and quit 226 | return true 227 | } 228 | */ 229 | 230 | // show any failures 231 | for _, failure := range scroll.Shards.Failures { 232 | c.ErrChan <- fmt.Errorf(failure.Reason) 233 | } 234 | 235 | // write all the docs into a channel 236 | for _, docI := range scroll.Hits.Docs { 237 | c.DocChan <- docI.(map[string]interface{}) 238 | } 239 | 240 | return 241 | } 242 | 243 | func (c *Config) NewWorker(docCount *int, bar *pb.ProgressBar, wg *sync.WaitGroup) { 244 | 245 | mainBuf := bytes.Buffer{} 246 | docBuf := bytes.Buffer{} 247 | docEnc := json.NewEncoder(&docBuf) 248 | 249 | READ_DOCS: 250 | for { 251 | var err error 252 | docI, open := <-c.DocChan 253 | 254 | // this check is in case the document is an error with scroll stuff 255 | if status, ok := docI["status"]; ok { 256 | if status.(int) == 404 { 257 | fmt.Println("error: ", docI["response"]) 258 | continue 259 | } 260 | } 261 | 262 | // sanity check 263 | for _, key := range []string{"_index", "_type", "_source", "_id"} { 264 | if _, ok := docI[key]; !ok { 265 | fmt.Println("failed parsing document: %v", docI) 266 | break READ_DOCS 267 | } 268 | } 269 | 270 | doc := Document{ 271 | Index: docI["_index"].(string), 272 | Type: docI["_type"].(string), 273 | source: docI["_source"].(map[string]interface{}), 274 | Id: docI["_id"].(string), 275 | } 276 | 277 | // if channel is closed flush and gtfo 278 | if !open { 279 | goto WORKER_DONE 280 | } 281 | 282 | // sanity check 283 | if len(doc.Index) == 0 || len(doc.Id) == 0 || len(doc.Type) == 0 { 284 | c.ErrChan <- fmt.Errorf("failed decoding document: %+v", doc) 285 | continue 286 | } 287 | 288 | // encode the doc and and the _source field for a bulk request 289 | post := map[string]Document{ 290 | "create": doc, 291 | } 292 | if err = docEnc.Encode(post); err != nil { 293 | c.ErrChan <- err 294 | } 295 | if err = docEnc.Encode(doc.source); err != nil { 296 | c.ErrChan <- err 297 | } 298 | 299 | // if we approach the 100mb es limit, flush to es and reset mainBuf 300 | if mainBuf.Len()+docBuf.Len() > 100000000 { 301 | c.BulkPost(&mainBuf) 302 | } 303 | 304 | // append the doc to the main buffer 305 | mainBuf.Write(docBuf.Bytes()) 306 | // reset for next document 307 | docBuf.Reset() 308 | bar.Increment() 309 | (*docCount)++ 310 | } 311 | 312 | WORKER_DONE: 313 | if docBuf.Len() > 0 { 314 | mainBuf.Write(docBuf.Bytes()) 315 | } 316 | c.BulkPost(&mainBuf) 317 | wg.Done() 318 | } 319 | 320 | func (c *Config) GetIndexes(host string, idxs *Indexes) (err error) { 321 | 322 | resp, err := http.Get(fmt.Sprintf("%s/%s/_mapping", host, c.IndexNames)) 323 | if err != nil { 324 | return 325 | } 326 | defer resp.Body.Close() 327 | 328 | dec := json.NewDecoder(resp.Body) 329 | err = dec.Decode(idxs) 330 | 331 | // always ignore internal _ indexes 332 | for name, _ := range *idxs { 333 | if name[0] == '_' { 334 | delete(*idxs, name) 335 | } 336 | } 337 | 338 | // remove indexes that start with . if user asked for it 339 | if c.CopyAllIndexes == false { 340 | for name, _ := range *idxs { 341 | switch name[0] { 342 | case '.': 343 | delete(*idxs, name) 344 | case '_': 345 | delete(*idxs, name) 346 | 347 | } 348 | } 349 | } 350 | 351 | // if _all indexes limit the list of indexes to only these that we kept 352 | // after looking at mappings 353 | if c.IndexNames == "_all" { 354 | var newIndexes []string 355 | for name, _ := range *idxs { 356 | newIndexes = append(newIndexes, name) 357 | } 358 | c.IndexNames = strings.Join(newIndexes, ",") 359 | } 360 | 361 | // wrap in mappings if dumping from super old es 362 | for name, idx := range *idxs { 363 | if _, ok := idx.(map[string]interface{})["mappings"]; !ok { 364 | (*idxs)[name] = map[string]interface{}{ 365 | "mappings": idx, 366 | } 367 | } 368 | } 369 | 370 | return 371 | } 372 | 373 | // CreateIndexes on remodeleted ES instance 374 | func (c *Config) CreateIndexes(idxs *Indexes) (err error) { 375 | 376 | for name, idx := range *idxs { 377 | body := bytes.Buffer{} 378 | enc := json.NewEncoder(&body) 379 | enc.Encode(idx) 380 | 381 | resp, err := http.Post(fmt.Sprintf("%s/%s", c.DstEs, name), "", &body) 382 | if err != nil { 383 | return err 384 | } 385 | defer resp.Body.Close() 386 | 387 | if resp.StatusCode != 200 { 388 | b, _ := ioutil.ReadAll(resp.Body) 389 | return fmt.Errorf("failed creating index: %s", string(b)) 390 | } 391 | 392 | fmt.Println("created index: ", name) 393 | } 394 | 395 | return 396 | } 397 | 398 | func (c *Config) DeleteIndexes(idxs *Indexes) (err error) { 399 | 400 | for name, idx := range *idxs { 401 | body := bytes.Buffer{} 402 | enc := json.NewEncoder(&body) 403 | enc.Encode(idx) 404 | 405 | req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/%s", c.DstEs, name), nil) 406 | if err != nil { 407 | return err 408 | } 409 | resp, err := http.DefaultClient.Do(req) 410 | if err != nil { 411 | return err 412 | } 413 | 414 | defer resp.Body.Close() 415 | if resp.StatusCode == 404 { 416 | // thats okay, index doesnt exist 417 | continue 418 | } 419 | 420 | if resp.StatusCode != 200 { 421 | b, _ := ioutil.ReadAll(resp.Body) 422 | resp.Body.Close() 423 | return fmt.Errorf("failed deleting index: %s", string(b)) 424 | } 425 | 426 | fmt.Println("deleted index: ", name) 427 | } 428 | 429 | return 430 | } 431 | 432 | func (c *Config) CopyShardingSettings(idxs *Indexes) (err error) { 433 | 434 | // get all settings 435 | allSettings := map[string]interface{}{} 436 | 437 | resp, err := http.Get(fmt.Sprintf("%s/_all/_settings", c.SrcEs)) 438 | if err != nil { 439 | return err 440 | } 441 | 442 | if resp.StatusCode != 200 { 443 | b, _ := ioutil.ReadAll(resp.Body) 444 | resp.Body.Close() 445 | return fmt.Errorf("failed getting settings for index: %s", string(b)) 446 | } 447 | 448 | dec := json.NewDecoder(resp.Body) 449 | if err := dec.Decode(&allSettings); err != nil { 450 | return err 451 | } 452 | 453 | for name, index := range *idxs { 454 | if settings, ok := allSettings[name]; !ok { 455 | return fmt.Errorf("couldnt find index %s", name) 456 | } else { 457 | // omg XXX 458 | index.(map[string]interface{})["settings"] = map[string]interface{}{} 459 | var shards string 460 | if _, ok := settings.(map[string]interface{})["settings"].(map[string]interface{})["index"]; ok { 461 | // try the new style syntax first, which has an index object 462 | shards = settings.(map[string]interface{})["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_shards"].(string) 463 | } else { 464 | // if not, could be running from old es, try the old style index.number_of_shards 465 | shards = settings.(map[string]interface{})["settings"].(map[string]interface{})["index.number_of_shards"].(string) 466 | } 467 | index.(map[string]interface{})["settings"].(map[string]interface{})["index"] = map[string]interface{}{ 468 | "number_of_shards": shards, 469 | } 470 | } 471 | } 472 | 473 | return 474 | } 475 | 476 | func (idxs *Indexes) SetShardCount(indexName, shards string) { 477 | 478 | index := (*idxs)[indexName] 479 | if _, ok := (*idxs)[indexName].(map[string]interface{})["settings"]; !ok { 480 | index.(map[string]interface{})["settings"] = map[string]interface{}{} 481 | } 482 | 483 | if _, ok := (*idxs)[indexName].(map[string]interface{})["settings"].(map[string]interface{})["index"]; !ok { 484 | index.(map[string]interface{})["settings"].(map[string]interface{})["index"] = map[string]interface{}{} 485 | } 486 | 487 | index.(map[string]interface{})["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_shards"] = shards 488 | } 489 | 490 | func (idxs *Indexes) DisableReplication() { 491 | 492 | for name, index := range *idxs { 493 | if _, ok := (*idxs)[name].(map[string]interface{})["settings"]; !ok { 494 | index.(map[string]interface{})["settings"] = map[string]interface{}{} 495 | } 496 | 497 | if _, ok := (*idxs)[name].(map[string]interface{})["settings"].(map[string]interface{})["index"]; !ok { 498 | index.(map[string]interface{})["settings"].(map[string]interface{})["index"] = map[string]interface{}{} 499 | } 500 | 501 | index.(map[string]interface{})["settings"].(map[string]interface{})["index"].(map[string]interface{})["number_of_replicas"] = "0" 502 | } 503 | } 504 | 505 | // make the initial scroll req 506 | func (c *Config) NewScroll() (scroll *Scroll, err error) { 507 | 508 | // curl -XGET 'http://es-0.9:9200/_search?search_type=scan&scroll=10m&size=50' 509 | url := fmt.Sprintf("%s/%s/_search?search_type=scan&scroll=%s&size=%d", c.SrcEs, c.IndexNames, c.ScrollTime, c.DocBufferCount) 510 | resp, err := http.Get(url) 511 | if err != nil { 512 | return 513 | } 514 | defer resp.Body.Close() 515 | 516 | dec := json.NewDecoder(resp.Body) 517 | 518 | scroll = &Scroll{} 519 | err = dec.Decode(scroll) 520 | 521 | return 522 | } 523 | 524 | // Post to es as bulk and reset the data buffer 525 | func (c *Config) BulkPost(data *bytes.Buffer) { 526 | 527 | c.FlushLock.Lock() 528 | defer c.FlushLock.Unlock() 529 | 530 | data.WriteRune('\n') 531 | resp, err := http.Post(fmt.Sprintf("%s/_bulk", c.DstEs), "", data) 532 | if err != nil { 533 | c.ErrChan <- err 534 | return 535 | } 536 | 537 | defer resp.Body.Close() 538 | defer data.Reset() 539 | if resp.StatusCode != 200 { 540 | b, _ := ioutil.ReadAll(resp.Body) 541 | c.ErrChan <- fmt.Errorf("bad bulk response: %s", string(b)) 542 | return 543 | } 544 | } 545 | 546 | func (c *Config) ClusterReady(host string) (*ClusterHealth, bool) { 547 | 548 | health := ClusterStatus(host) 549 | if health.Status == "red" { 550 | return health, false 551 | } 552 | 553 | if c.WaitForGreen == false && health.Status == "yellow" { 554 | return health, true 555 | } 556 | 557 | if health.Status == "green" { 558 | return health, true 559 | } 560 | 561 | return health, false 562 | } 563 | 564 | func ClusterStatus(host string) *ClusterHealth { 565 | 566 | resp, err := http.Get(fmt.Sprintf("%s/_cluster/health", host)) 567 | if err != nil { 568 | return &ClusterHealth{Name: host, Status: "unreachable"} 569 | } 570 | defer resp.Body.Close() 571 | 572 | health := &ClusterHealth{} 573 | dec := json.NewDecoder(resp.Body) 574 | err = dec.Decode(&health) 575 | 576 | return health 577 | } 578 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2012 The Go Authors. All rights reserved. 3 | # Use of this source code is governed by a BSD-style 4 | # license that can be found in the LICENSE file. 5 | 6 | # support functions for go cross compilation 7 | 8 | type setopt >/dev/null 2>&1 && setopt shwordsplit 9 | PLATFORMS="darwin/386 darwin/amd64 freebsd/386 freebsd/amd64 freebsd/arm linux/386 linux/amd64 linux/arm windows/386 windows/amd64" 10 | 11 | function go-alias { 12 | GOOS=${1%/*} 13 | GOARCH=${1#*/} 14 | eval "function go-${GOOS}-${GOARCH} { ( GOOS=${GOOS} GOARCH=${GOARCH} go \"\$@\" ) }" 15 | } 16 | 17 | function go-crosscompile-build { 18 | GOOS=${1%/*} 19 | GOARCH=${1#*/} 20 | cd $(go env GOROOT)/src ; GOOS=${GOOS} GOARCH=${GOARCH} ./make.bash --no-clean 2>&1 21 | } 22 | 23 | function go-crosscompile-build-all { 24 | FAILURES="" 25 | for PLATFORM in $PLATFORMS; do 26 | CMD="go-crosscompile-build ${PLATFORM}" 27 | echo "$CMD" 28 | $CMD || FAILURES="$FAILURES $PLATFORM" 29 | done 30 | if [ "$FAILURES" != "" ]; then 31 | echo "*** go-crosscompile-build-all FAILED on $FAILURES ***" 32 | return 1 33 | fi 34 | } 35 | 36 | function go-all { 37 | FAILURES="" 38 | for PLATFORM in $PLATFORMS; do 39 | GOOS=${PLATFORM%/*} 40 | GOARCH=${PLATFORM#*/} 41 | CMD="go-${GOOS}-${GOARCH} $@" 42 | echo "$CMD" 43 | $CMD || FAILURES="$FAILURES $PLATFORM" 44 | done 45 | if [ "$FAILURES" != "" ]; then 46 | echo "*** go-all FAILED on $FAILURES ***" 47 | return 1 48 | fi 49 | } 50 | 51 | function go-build-all { 52 | FAILURES="" 53 | for PLATFORM in $PLATFORMS; do 54 | GOOS=${PLATFORM%/*} 55 | GOARCH=${PLATFORM#*/} 56 | SRCFILENAME=`echo $@ | sed 's/\.go//'` 57 | CURDIRNAME=${PWD##*/} 58 | OUTPUT=${SRCFILENAME:-$CURDIRNAME} # if no src file given, use current dir name 59 | CMD="go-${GOOS}-${GOARCH} build -o $OUTPUT-${GOOS}-${GOARCH} $@" 60 | echo "$CMD" 61 | $CMD || FAILURES="$FAILURES $PLATFORM" 62 | done 63 | if [ "$FAILURES" != "" ]; then 64 | echo "*** go-build-all FAILED on $FAILURES ***" 65 | return 1 66 | fi 67 | } 68 | 69 | for PLATFORM in $PLATFORMS; do 70 | go-alias $PLATFORM 71 | done 72 | 73 | unset -f go-alias 74 | 75 | # crosscompile 76 | go-build-all 77 | # dont arm wtf 78 | rm -f elasticsearch-dump*arm* 79 | --------------------------------------------------------------------------------