├── .dockerignore ├── .gitignore ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile.fuzz ├── Dockerfile.locally-build-deb ├── LICENSE.txt ├── README.md ├── confd ├── conf.d │ ├── hekad.toml.toml │ ├── psql.sh.toml │ └── stellar-core.cfg.toml └── templates │ ├── hekad.toml.tmpl │ ├── psql.sh.tmpl │ └── stellar-core.cfg.tmpl ├── examples ├── compat_complete.env ├── compat_minimal.env ├── local.env └── single.env ├── fuzz ├── install ├── start └── trace ├── heka └── stellar_core_metrics_filter.lua ├── install ├── start └── utils ├── core_file_processor.py └── core_file_processor_test.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .gitignore 3 | README.md 4 | /examples 5 | 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | Please read the [Contribution Guide](https://github.com/stellar/docs/blob/master/CONTRIBUTING.md). 4 | 5 | Then please [sign the Contributor License Agreement](https://docs.google.com/forms/d/1g7EF6PERciwn7zfmfke5Sir2n10yddGGSXyZsq98tVY/viewform?usp=send_form). 6 | 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stellar/base:latest 2 | 3 | MAINTAINER Mat Schaffer 4 | 5 | ENV STELLAR_CORE_VERSION 15.3.0-1506-ff56e7f2 6 | 7 | EXPOSE 11625 8 | EXPOSE 11626 9 | 10 | VOLUME /data 11 | VOLUME /postgresql-unix-sockets 12 | VOLUME /heka 13 | 14 | ADD install / 15 | RUN /install 16 | 17 | ADD heka /heka 18 | ADD confd /etc/confd 19 | ADD utils /utils 20 | ADD start / 21 | 22 | CMD ["/start"] 23 | -------------------------------------------------------------------------------- /Dockerfile.fuzz: -------------------------------------------------------------------------------- 1 | FROM stellar/base:latest 2 | 3 | MAINTAINER Mat Schaffer 4 | 5 | ENV AFL_VERSION 2.53b 6 | ENV FUZZER_MODE tx 7 | 8 | ADD fuzz/install / 9 | RUN /install 10 | 11 | ADD utils /utils 12 | ADD fuzz/trace / 13 | ADD fuzz/start / 14 | 15 | CMD ["/start"] 16 | -------------------------------------------------------------------------------- /Dockerfile.locally-build-deb: -------------------------------------------------------------------------------- 1 | FROM stellar/base:latest 2 | 3 | MAINTAINER Mat Schaffer 4 | 5 | EXPOSE 11625 6 | EXPOSE 11626 7 | 8 | VOLUME /data 9 | VOLUME /heka 10 | 11 | ADD stellar-core.deb / 12 | ADD install / 13 | RUN /install 14 | 15 | ADD heka /heka 16 | ADD confd /etc/confd 17 | ADD utils /utils 18 | ADD start / 19 | 20 | CMD ["/start"] 21 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2015 Stellar Development Foundation 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # stellar-core 2 | 3 | Docker definitions for [stellar-core](https://github.com/stellar/stellar-core) 4 | 5 | # Usage 6 | 7 | 8 | ## A local full network 9 | 10 | This starts a 3 node local stellar-core network, all on the same docker host. 11 | 12 | Note that the provided local.env uses SDF S3 locations, so edit it to match the specifics of your environment. 13 | 14 | ```sh 15 | for N in 1 2; do 16 | docker run --name db$N -p 544$N:5432 --env-file examples/local.env -d stellar/stellar-core-state 17 | docker run --name node$N --net host -v ~/.aws:/root/.aws --volumes-from db$N --env-file examples/local.env -d stellar/stellar-core /start node$N fresh forcescp 18 | done 19 | 20 | for N in 3; do 21 | docker run --name db$N -p 544$N:5432 --env-file examples/local.env -d stellar/stellar-core-state 22 | docker run --name node$N --net host -v ~/.aws:/root/.aws --volumes-from db$N --env-file examples/local.env -d stellar/stellar-core /start node$N fresh 23 | done 24 | ``` 25 | 26 | The use of `-v ~/.aws:/root/.aws` here mounts your local aws credentials into the container which allows the network to use S3 for storage. 27 | 28 | You can check the cluster status with curl. The IP shown here is a typical boot2docker IP. Replace it with the IP of your docker host. 29 | 30 | ```sh 31 | watch 'echo 6 7 3 | xargs -n1 -I{} curl -s 192.168.59.103:1162{}/info' 32 | ``` 33 | 34 | Basic clean up involves simply wiping out all containers. S3 history must be removed seperately. Something like this should do the trick. 35 | 36 | ```sh 37 | docker ps -a | egrep '(node|db)\d+' | awk '{ print $1 }' | xargs -n1 docker rm -f -v 38 | ``` 39 | 40 | ## Single node configurations 41 | 42 | ### Catch up complete with SDF testnet 43 | 44 | ``` 45 | docker run --name db_compat_complete -p 5541:5432 --env-file examples/compat_complete.env -d stellar/stellar-core-state 46 | docker run --name compat_complete --net host --volumes-from db_compat_complete --env-file examples/compat_complete.env -d stellar/stellar-core:latest /start compat_complete fresh 47 | ``` 48 | 49 | ### Catch up minimal with SDF testnet 50 | 51 | ``` 52 | docker run --name db_compat_minimal -p 5641:5432 --env-file examples/compat_minimal.env -d stellar/stellar-core-state 53 | docker run --name compat_minimal --net host --volumes-from db_compat_minimal --env-file examples/compat_minimal.env -d stellar/stellar-core:latest /start compat_minimal fresh 54 | ``` 55 | 56 | ### Single node local network (with monitoring) 57 | 58 | Note that the monitoring container is invoked with the docker socket exposed. This allows the monitoring container to invoke `docker run stellar/stellar-core` to do things like process core dumps. 59 | 60 | ``` 61 | docker run --name single-state \ 62 | -p 5432:5432 \ 63 | --env-file examples/single.env \ 64 | -d stellar/stellar-core-state 65 | 66 | docker run --name single \ 67 | --net host \ 68 | --volumes-from single-state \ 69 | -v /volumes/main/cores:/cores -v /volumes/main/logs:/logs \ 70 | --env-file examples/single.env \ 71 | -d stellar/stellar-core \ 72 | /start main fresh forcescp 73 | 74 | # optionally 75 | docker run --name single-heka \ 76 | --net container:single \ 77 | --volumes-from single \ 78 | -v /var/run/docker.sock:/var/run/docker.sock \ 79 | --env-file examples/single.env \ 80 | -d stellar/heka 81 | ``` 82 | 83 | ## A note on capturing core files 84 | 85 | Capturing core files from container process is a bit involved. 86 | 87 | You'll need to first enable unlimited sized core dumps at the docker layer, then set a `core_pattern` to a location that the container has set up as a volume. 88 | 89 | If either are not set, the core will not be dumped. 90 | 91 | If you're on boot2docker you can set this by adding the following to the boot2docker profile: 92 | 93 | ```sh 94 | echo '/cores/%e_%h_%s_%p_%t.core' > /proc/sys/kernel/core_pattern 95 | EXTRA_ARGS="--default-ulimit core=-1" 96 | ``` 97 | 98 | To edit this profile use the following commands to first edit the file, then restart the docker daemon: 99 | 100 | ```console 101 | > boot2docker ssh -t sudo vi /var/lib/boot2docker/profile 102 | > boot2docker ssh 'sudo /etc/init.d/docker restart' 103 | ``` 104 | 105 | On docker-machine you can specify engine options when creating the machine, then use ssh to set the core pattern: 106 | 107 | ```console 108 | > docker-machine create \ 109 | --driver virtualbox \ 110 | --engine-opt 'default-ulimit=core=-1' core1 111 | > docker-machine ssh core1 \ 112 | "sudo sh -c 'echo \"/cores/%e_%h_%s_%p_%t.core\" > /proc/sys/kernel/core_pattern'" 113 | ``` 114 | 115 | ## Logs 116 | 117 | To display container logs use: 118 | 119 | ``` 120 | docker logs -f [container_name] 121 | ``` 122 | -------------------------------------------------------------------------------- /confd/conf.d/hekad.toml.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "hekad.toml.tmpl" 3 | dest = "/heka/hekad.toml" 4 | mode = "0644" 5 | -------------------------------------------------------------------------------- /confd/conf.d/psql.sh.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "psql.sh.tmpl" 3 | dest = "/etc/profile.d/psql.sh" 4 | mode = "0644" 5 | -------------------------------------------------------------------------------- /confd/conf.d/stellar-core.cfg.toml: -------------------------------------------------------------------------------- 1 | [template] 2 | src = "stellar-core.cfg.tmpl" 3 | dest = "/stellar-core.cfg" 4 | mode = "0644" 5 | -------------------------------------------------------------------------------- /confd/templates/hekad.toml.tmpl: -------------------------------------------------------------------------------- 1 | # -*- mode: toml -*- 2 | # vi: set ft=toml : 3 | 4 | [StellarCoreMetricsInput] 5 | type = "HttpInput" 6 | url = "http://localhost:{{getenv (printf "%s_HTTP_PORT" (getenv "SELF"))}}/metrics" 7 | ticker_interval = 1 8 | 9 | [StellarCoreMetricsFilter] 10 | type = "SandboxFilter" 11 | message_matcher = "Type == 'heka.httpinput.data'" 12 | filename = "/heka/stellar_core_metrics_filter.lua" 13 | ticker_interval = {{or (getenv "METRICS_INTERVAL") 60}} 14 | [StellarCoreMetricsFilter.config] 15 | environment = "{{getenv "ENVIRONMENT"}}" 16 | cluster = "{{getenv "CLUSTER_NAME"}}" 17 | node = "{{getenv "HOSTNAME"}}" 18 | 19 | {{if (getenv "INFLUX_ADDRESS")}} 20 | [StellarCoreMetricsInfluxOutput] 21 | type = "HttpOutput" 22 | message_matcher = "Type == 'heka.sandbox.stellar.core.metrics.influx'" 23 | address = "{{getenv "INFLUX_ADDRESS"}}" 24 | encoder = "PayloadEncoder" 25 | username = "{{getenv "INFLUX_USERNAME"}}" 26 | password = "{{getenv "INFLUX_PASSWORD"}}" 27 | {{end}} 28 | 29 | {{if (getenv "ATLAS_ADDRESS")}} 30 | [StellarCoreMetricsAtlasOutput] 31 | type = "HttpOutput" 32 | message_matcher = "Type == 'heka.sandbox.stellar.core.metrics.atlas'" 33 | address = "{{getenv "ATLAS_ADDRESS"}}" 34 | encoder = "PayloadEncoder" 35 | [StellarCoreMetricsAtlasOutput.headers] 36 | Content-Type = ["application/json"] 37 | {{end}} 38 | 39 | {{if (getenv "CORE_ALERT_RECIPIENT")}} 40 | [NullSplitter] 41 | 42 | [CoreFileProcessor] 43 | type = "ProcessInput" 44 | ticker_interval = 30 45 | splitter = "NullSplitter" 46 | stdout = true 47 | stderr = true 48 | [CoreFileProcessor.command.0] 49 | bin = "docker" 50 | args = ["run", 51 | "--net", "host", 52 | "-e", "CORE_ALERT_RECIPIENT={{getenv "CORE_ALERT_RECIPIENT"}}", 53 | "-e", "CORE_ARCHIVE_COMMAND={{getenv "CORE_ARCHIVE_COMMAND"}}", 54 | "-e", "CORE_LOG_FILTER={{getenv "CONTAINER_ID" | printf "%.12s"}}", 55 | {{if (getenv "AWS_CREDENTIAL_SOURCE")}} 56 | "-v", "{{getenv "AWS_CREDENTIAL_SOURCE"}}:/root/.aws:ro", 57 | {{end}} 58 | "-v", "/var/log/syslog:/host/syslog:ro", 59 | "--volumes-from", "{{getenv "CONTAINER_ID"}}", 60 | {{if (getenv "USE_SYSLOG")}} 61 | "--log-driver", "syslog", 62 | {{end}} 63 | "-t", "--rm", 64 | "stellar/stellar-core", 65 | "/utils/core_file_processor.py"] 66 | {{end}} 67 | 68 | [PayloadEncoder] 69 | 70 | [DashboardOutput] 71 | ticker_interval = 1 72 | 73 | [DebugOutput] 74 | message_matcher = "Type == 'heka.sandbox-terminated'" 75 | type = "LogOutput" 76 | encoder = "RstEncoder" 77 | 78 | [RstEncoder] 79 | -------------------------------------------------------------------------------- /confd/templates/psql.sh.tmpl: -------------------------------------------------------------------------------- 1 | export PGHOST=/postgresql-unix-sockets 2 | export PGDATABASE=postgres 3 | export PGUSER=postgres 4 | -------------------------------------------------------------------------------- /confd/templates/stellar-core.cfg.tmpl: -------------------------------------------------------------------------------- 1 | PEER_PORT={{getenv (printf "%s_PEER_PORT" (getenv "SELF"))}} 2 | 3 | RUN_STANDALONE=false 4 | LOG_FILE_PATH="{{getenv "LOG_FILE_PATH"}}" 5 | BUCKET_DIR_PATH="/data/buckets" 6 | 7 | HTTP_PORT={{getenv (printf "%s_HTTP_PORT" (getenv "SELF"))}} 8 | PUBLIC_HTTP_PORT=true 9 | 10 | {{if (getenv "CATCHUP_COMPLETE")}} 11 | CATCHUP_COMPLETE={{getenv "CATCHUP_COMPLETE"}} 12 | {{end}} 13 | 14 | {{if (getenv "CATCHUP_RECENT")}} 15 | CATCHUP_RECENT={{getenv "CATCHUP_RECENT"}} 16 | {{end}} 17 | 18 | {{if (getenv (printf "%s_NODE_SEED" (getenv "SELF"))) }} 19 | NODE_SEED="{{getenv (printf "%s_NODE_SEED" (getenv "SELF"))}}" 20 | {{end}} 21 | {{if (getenv "NODE_IS_VALIDATOR")}} 22 | NODE_IS_VALIDATOR={{getenv "NODE_IS_VALIDATOR"}} 23 | {{end}} 24 | 25 | {{if (getenv "TARGET_PEER_CONNECTIONS")}} 26 | TARGET_PEER_CONNECTIONS={{getenv "TARGET_PEER_CONNECTIONS"}} 27 | {{else}} 28 | TARGET_PEER_CONNECTIONS=20 29 | {{end}} 30 | 31 | {{if (getenv "MAX_ADDITIONAL_PEER_CONNECTIONS")}} 32 | MAX_ADDITIONAL_PEER_CONNECTIONS={{getenv "MAX_ADDITIONAL_PEER_CONNECTIONS"}} 33 | {{else}} 34 | MAX_ADDITIONAL_PEER_CONNECTIONS=20 35 | {{end}} 36 | 37 | {{if (getenv "MAX_CONCURRENT_SUBPROCESSES")}} 38 | MAX_CONCURRENT_SUBPROCESSES={{getenv "MAX_CONCURRENT_SUBPROCESSES"}} 39 | {{else}} 40 | MAX_CONCURRENT_SUBPROCESSES=64 41 | {{end}} 42 | 43 | {{if (getenv "PREFERRED_PEERS")}} 44 | PREFERRED_PEERS={{getenv "PREFERRED_PEERS"}} 45 | {{end}} 46 | 47 | {{if (getenv "KNOWN_PEERS")}} 48 | KNOWN_PEERS={{getenv "KNOWN_PEERS"}} 49 | {{end}} 50 | 51 | {{if (getenv "MANUAL_CLOSE")}} 52 | MANUAL_CLOSE={{getenv "MANUAL_CLOSE"}} 53 | {{end}} 54 | 55 | {{if (getenv "ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING")}} 56 | ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING={{getenv "ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING"}} 57 | {{end}} 58 | 59 | {{if (getenv "ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING")}} 60 | ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING={{getenv "ARTIFICIALLY_ACCELERATE_TIME_FOR_TESTING"}} 61 | {{end}} 62 | 63 | {{if (getenv "COMMANDS")}} 64 | COMMANDS={{getenv "COMMANDS"}} 65 | {{end}} 66 | 67 | {{if (getenv "FAILURE_SAFETY")}} 68 | FAILURE_SAFETY={{getenv "FAILURE_SAFETY"}} 69 | {{end}} 70 | {{if (getenv "UNSAFE_QUORUM")}} 71 | UNSAFE_QUORUM={{getenv "UNSAFE_QUORUM"}} 72 | {{end}} 73 | 74 | {{if (getenv "DATABASE")}} 75 | DATABASE="{{getenv "DATABASE"}}" 76 | {{else}} 77 | DATABASE="postgresql://dbname=stellar user=postgres host=/postgresql-unix-sockets" 78 | {{end}} 79 | 80 | {{if (getenv "NETWORK_PASSPHRASE")}} 81 | NETWORK_PASSPHRASE="{{getenv "NETWORK_PASSPHRASE"}}" 82 | {{else}} 83 | NETWORK_PASSPHRASE="Public Global Stellar Network ; September 2015" 84 | {{end}} 85 | 86 | {{if (getenv "AUTOMATIC_MAINTENANCE_PERIOD")}} 87 | AUTOMATIC_MAINTENANCE_PERIOD={{getenv "AUTOMATIC_MAINTENANCE_PERIOD"}} 88 | {{end}} 89 | 90 | {{if (getenv "AUTOMATIC_MAINTENANCE_COUNT")}} 91 | AUTOMATIC_MAINTENANCE_COUNT={{getenv "AUTOMATIC_MAINTENANCE_COUNT"}} 92 | {{end}} 93 | 94 | [QUORUM_SET] 95 | {{if (getenv "THRESHOLD_PERCENT")}} 96 | THRESHOLD_PERCENT={{getenv "THRESHOLD_PERCENT"}} 97 | {{end}} 98 | VALIDATORS={{getenv "VALIDATORS"}} 99 | 100 | {{range jsonArray (getenv "HISTORY_PEERS")}} 101 | [HISTORY.{{.}}] 102 | get="{{printf (getenv "HISTORY_GET") .}}" 103 | {{if (eq . (getenv "SELF"))}} 104 | put="{{printf (getenv "HISTORY_PUT") (getenv "SELF")}}" 105 | {{if (getenv "HISTORY_MKDIR")}} 106 | mkdir="{{printf (getenv "HISTORY_MKDIR") (getenv "SELF")}}" 107 | {{end}} 108 | {{end}} 109 | {{end}} 110 | 111 | -------------------------------------------------------------------------------- /examples/compat_complete.env: -------------------------------------------------------------------------------- 1 | POSTGRES_PASSWORD=mysecretpassword 2 | 3 | compat_complete_POSTGRES_PORT=5541 4 | compat_complete_PEER_PORT=11625 5 | compat_complete_HTTP_PORT=11626 6 | compat_complete_PEER_SEED=SBTEQJMDYCNRXRBEUUK7PTC737GVE24VSFVEHQXRALAARQCU5O246KVT 7 | compat_complete_VALIDATION_SEED=SACTBO4KQ3LYYRRKBST3ZCO24FYRDP2CYN3W5IPP2OTC3NUIKFGZSRRY 8 | 9 | CATCHUP_COMPLETE=true 10 | 11 | PREFERRED_PEERS=["core-testnet1.stellar.org", "core-testnet2.stellar.org", "core-testnet3.stellar.org"] 12 | 13 | UNSAFE_QUORUM=true 14 | FAILURE_SAFETY=0 15 | 16 | VALIDATORS=["GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y", "GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP", "GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z"] 17 | 18 | HISTORY_PEERS=["core_testnet_001", "core_testnet_002", "core_testnet_003"] 19 | 20 | HISTORY_GET=wget -q -O {1} https://history.stellar.org/prd/core-testnet/%s/{0} 21 | 22 | NETWORK_PASSPHRASE=Test SDF Network ; September 2015 23 | -------------------------------------------------------------------------------- /examples/compat_minimal.env: -------------------------------------------------------------------------------- 1 | POSTGRES_PASSWORD=mysecretpassword 2 | 3 | compat_minimal_POSTGRES_PORT=5641 4 | compat_minimal_PEER_PORT=11625 5 | compat_minimal_HTTP_PORT=11626 6 | compat_minimal_PEER_SEED=SBTEQJMDYCNRXRBEUUK7PTC737GVE24VSFVEHQXRALAARQCU5O246KVT 7 | compat_minimal_VALIDATION_SEED=SACTBO4KQ3LYYRRKBST3ZCO24FYRDP2CYN3W5IPP2OTC3NUIKFGZSRRY 8 | 9 | PREFERRED_PEERS=["core-testnet1.stellar.org", "core-testnet2.stellar.org", "core-testnet3.stellar.org"] 10 | 11 | UNSAFE_QUORUM=true 12 | FAILURE_SAFETY=0 13 | 14 | VALIDATORS=["GDKXE2OZMJIPOSLNA6N6F2BVCI3O777I2OOC4BV7VOYUEHYX7RTRYA7Y", "GCUCJTIYXSOXKBSNFGNFWW5MUQ54HKRPGJUTQFJ5RQXZXNOLNXYDHRAP", "GC2V2EFSXN6SQTWVYA5EPJPBWWIMSD2XQNKUOHGEKB535AQE2I6IXV2Z"] 15 | 16 | HISTORY_PEERS=["core_testnet_001", "core_testnet_002", "core_testnet_003"] 17 | 18 | HISTORY_GET=wget -q -O {1} https://history.stellar.org/prd/core-testnet/%s/{0} 19 | 20 | NETWORK_PASSPHRASE=Test SDF Network ; September 2015 21 | -------------------------------------------------------------------------------- /examples/local.env: -------------------------------------------------------------------------------- 1 | POSTGRES_PASSWORD=mysecretpassword 2 | HORIZON_PASSWORD=myreadonlyhorizonpassword 3 | 4 | node1_POSTGRES_PORT=5441 5 | node1_PEER_PORT=11625 6 | node1_HTTP_PORT=11626 7 | node1_NODE_SEED=SAJEJWKJAOELT2TJPTNLZXXSC3QXHKXDZD6NZLM3B6VQ7G22DQH5TC6C 8 | 9 | node2_POSTGRES_PORT=5442 10 | node2_PEER_PORT=11635 11 | node2_HTTP_PORT=11636 12 | node2_NODE_SEED=SACTBO4KQ3LYYRRKBST3ZCO24FYRDP2CYN3W5IPP2OTC3NUIKFGZSRRY 13 | 14 | node3_POSTGRES_PORT=5443 15 | node3_PEER_PORT=11645 16 | node3_HTTP_PORT=11646 17 | node3_NODE_SEED=SCVXYFYE6BQYZD4O6J5ULJXRZYXLMKMXZEI3O3HGTQEB43UOY4XUZV3L 18 | 19 | PREFERRED_PEERS=["127.0.0.1:11625", "127.0.0.1:11635", "127.0.0.1:11645"] 20 | 21 | THRESHOLD_PERCENT=51 22 | NODE_IS_VALIDATOR=true 23 | UNSAFE_QUORUM=true 24 | FAILURE_SAFETY=0 25 | 26 | VALIDATORS=["GBB2FMDSJY2VMKKB4QRAXU32OEYV5WPW4CRXB3NQSSD4YENKRMGAC7NB", "GB4FD5MNQ73WYA47YM2PP443ZE2KUBUI2I36LZAECGZ37ABLUN257GSI", "GACI4DOR3ILATD6CAPNDAAWVFNCOB3YRQ7BXJ3R7D5K7ICEZOYS26QUN"] 27 | 28 | HISTORY_GET=aws s3 cp --region eu-west-1 s3://history-stg.stellar.org/dev/core-docker/%s/{0} {1} 29 | HISTORY_PUT=aws s3 cp --region eu-west-1 {0} s3://history-stg.stellar.org/dev/core-docker/%s/{1} 30 | HISTORY_RESET=aws s3 rm --recursive --region eu-west-1 s3://history-stg.stellar.org/dev/core-docker/%s 31 | 32 | HISTORY_PEERS=["node1", "node2", "node3"] 33 | 34 | NETWORK_PASSPHRASE=Test SDF Network ; September 2015 35 | -------------------------------------------------------------------------------- /examples/single.env: -------------------------------------------------------------------------------- 1 | POSTGRES_PASSWORD=postgrespassword 2 | HORIZON_PASSWORD=myreadonlyhorizonpassword 3 | 4 | ENVIRONMENT=dev 5 | CLUSTER_NAME=docker-single 6 | 7 | # CORE_ALERT_RECIPIENT=mat@stellar.org 8 | # CORE_ARCHIVE_COMMAND=aws s3 --region eu-west-1 cp {0} s3://stellar-ops/cores/dev/{1} 9 | # AWS_CREDENTIAL_SOURCE=/path/to/your/.aws 10 | 11 | # INFLUX_ADDRESS=http://influxdb.services.stellar-ops.com:8086/db/stellar-core/series 12 | # INFLUX_USERNAME=root 13 | # INFLUX_PASSWORD=root 14 | 15 | # ATLAS_ADDRESS=http://localhost:7101/api/v1/publish 16 | 17 | main_POSTGRES_PORT=5432 18 | main_PEER_PORT=11625 19 | main_HTTP_PORT=11626 20 | 21 | main_NODE_SEED=SACJC372QBSSKJYTV5A7LWT4NXWHTQO6GHG4QDAVC2XDPX6CNNXFZ4JK 22 | 23 | NODE_IS_VALIDATOR=true 24 | UNSAFE_QUORUM=true 25 | FAILURE_SAFETY=0 26 | 27 | PREFERRED_PEERS=["127.0.0.1:39133"] 28 | VALIDATORS=["GD5KD2KEZJIGTC63IGW6UMUSMVUVG5IHG64HUTFWCHVZH2N2IBOQN7PS"] 29 | 30 | HISTORY_PEERS=["main"] 31 | 32 | HISTORY_GET=cp history/%s/{0} {1} 33 | HISTORY_PUT=cp {0} history/%s/{1} 34 | HISTORY_MKDIR=mkdir -p history/%s/{0} 35 | 36 | NETWORK_PASSPHRASE=Test SDF Network ; September 2015 37 | -------------------------------------------------------------------------------- /fuzz/install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - 6 | 7 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch main" 8 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-5.0 main" 9 | 10 | export DEBIAN_FRONTEND=noninteractive 11 | 12 | apt-get update 13 | 14 | apt-get install -y \ 15 | make \ 16 | git \ 17 | gcc \ 18 | autoconf \ 19 | automake \ 20 | libtool \ 21 | pkg-config \ 22 | flex \ 23 | bison \ 24 | clang-5.0 \ 25 | llvm-5.0 \ 26 | g++ \ 27 | libstdc++6 \ 28 | libpq5 \ 29 | libpq-dev 30 | 31 | update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-6 90 --slave /usr/bin/g++ g++ /usr/bin/g++-6 32 | update-alternatives --install /usr/bin/clang clang /usr/bin/clang-5.0 90 --slave /usr/bin/clang++ clang++ /usr/bin/clang++-5.0 33 | update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /usr/bin/llvm-symbolizer-5.0 90 34 | update-alternatives --install /usr/bin/llvm-config llvm-config /usr/bin/llvm-config-5.0 90 35 | 36 | # for the core_file_processor 37 | apt-get install -y python-pip 38 | pip install boto 39 | 40 | # for the fuzzer 41 | wget -nv -O afl-${AFL_VERSION}.tgz https://github.com/google/AFL/archive/v${AFL_VERSION}.tar.gz 42 | tar -zxf afl-${AFL_VERSION}.tgz 43 | cd AFL-${AFL_VERSION} 44 | make 45 | make -C llvm_mode CXX=g++ 46 | make install 47 | cd .. 48 | rm -rf AFL-${AFL_VERSION} 49 | rm afl-${AFL_VERSION}.tgz 50 | 51 | # build stellar-core under fuzzer 52 | git clone https://github.com/stellar/stellar-core stellar-core 53 | cd stellar-core 54 | ./autogen.sh 55 | ./configure --enable-afl 56 | make -j $(nproc) 57 | 58 | # clean out objects post-build, rebuild with make -t (touch) 59 | find / -name \*.o | xargs rm 60 | find / -name \*.a | xargs rm 61 | make -t 62 | 63 | # purge stuff we don't directly need 64 | apt-get purge -y sgml-base manpages liblocale-gettext-perl libtext-{charwidth,iconv,wrapi18n}-perl krb5-locales \ 65 | build-essential ucf gcc g++ xz-utils dbus gcc-6 g++-6 66 | apt-get autoremove -y 67 | apt-get clean 68 | 69 | # delete a handful of items we don't need and take up some actual space 70 | rm -rf /usr/share/locale/* 71 | rm -rf /usr/share/mime/* 72 | rm -rf /var/lib/apt/lists/* 73 | rm -rf /usr/lib/*/gconv/*.so 74 | rm -Rf /usr/lib/*/libicu*.so 75 | rm -rf /var/log/*.log /var/log/*/*.log 76 | rm -rf /usr/share/{file,man,doc} 77 | rm -rf /usr/lib/llvm-*/build /usr/share/llvm-*/cmake 78 | rm -rf /usr/lib/llvm-*/lib/*.a ./lib/x86_64-linux-gnu/*.a 79 | rm -rf /usr/lib/llvm-*/bin/{opt,lli,llc,llvm-tblgen,bugpoint,clang-*,pp-trace} 80 | rm -rf /usr/local/lib/python2.7/dist-packages/awscli/examples 81 | rm -rf /usr/share/postgresql/*/man 82 | -------------------------------------------------------------------------------- /fuzz/start: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd stellar-core 6 | make fuzz 7 | -------------------------------------------------------------------------------- /fuzz/trace: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CORES=$(find /cores -maxdepth 1 -name 'stellar-core*.core') 4 | 5 | mkdir -p /cores/traces 6 | 7 | if [[ -z "${CORES}" ]]; then 8 | echo No core files found 9 | exit 0 10 | fi 11 | 12 | wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - 13 | 14 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch main" 15 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-5.0 main" 16 | apt-get update -qq 17 | apt-get install -qq libstdc++6 libpq5 lldb-5.0 18 | 19 | cat > /etc/core_file_processor.ini < /cores/traces/$(basename $CORE).trace 27 | done 28 | -------------------------------------------------------------------------------- /heka/stellar_core_metrics_filter.lua: -------------------------------------------------------------------------------- 1 | local cj = require "cjson" 2 | local dt = require "date_time" 3 | local l = require "lpeg" 4 | local table = require "table" 5 | local math = require "math" 6 | 7 | local environment = read_config("environment") 8 | local node = read_config("node") 9 | local cluster = read_config("cluster") 10 | 11 | -- how many metrics to load into a single influx/atlas payload 12 | local max_metrics = read_config("max_metrics") or 80 13 | 14 | local latest_stats = {} 15 | 16 | function process_message() 17 | local raw_message = read_message("Payload") 18 | local ok, json = pcall(cj.decode, raw_message) 19 | 20 | if not ok then return -1 end 21 | 22 | if json["ts"] ~= nil then 23 | local ts = lpeg.match(dt.rfc3339, json["ts"]) 24 | if not ts then return -1 end 25 | 26 | local timestamp = dt.time_to_ns(ts) 27 | end 28 | 29 | for name, stats in pairs(json["metrics"]) do 30 | if stats.type == "timer" then 31 | process_metric(timestamp, name, stats, { 32 | "count", "1_min_rate", 33 | "min", "mean", "max", "95%", "99%", "99.9%" 34 | }) 35 | elseif stats.type == "meter" then 36 | process_metric(timestamp, name, stats, { 37 | "count", "1_min_rate", 38 | }) 39 | elseif stats.type == "counter" then 40 | process_metric(timestamp, name, stats, { 41 | "count" 42 | }) 43 | end 44 | end 45 | 46 | return 0 47 | end 48 | 49 | function send_to_influx(ns) 50 | local time = math.floor(ns / 1000000) 51 | local output = {} 52 | 53 | local queued_metrics = 0 54 | 55 | for name, stats in pairs(latest_stats) do 56 | if queued_metrics >= max_metrics then 57 | inject_for_influx(ns, output) 58 | output = {} 59 | queued_metrics = 0 60 | end 61 | 62 | name = environment .. "." .. node .. "." .. name 63 | 64 | local point_set = { 65 | name = name, 66 | columns = {"time"}, 67 | points = {{time}} 68 | } 69 | 70 | for stat, value in pairs(stats) do 71 | table.insert(point_set.columns, stat) 72 | table.insert(point_set.points[1], value) 73 | end 74 | 75 | table.insert(output, point_set) 76 | queued_metrics = queued_metrics + 1 77 | end 78 | 79 | inject_for_influx(ns, output) 80 | end 81 | 82 | function inject_for_influx(ns, output) 83 | if next(output) then 84 | local message = { 85 | Timestamp = ns, 86 | Type = "stellar.core.metrics.influx" 87 | } 88 | 89 | message.Payload = cjson.encode(output) 90 | 91 | inject_message(message) 92 | end 93 | end 94 | 95 | function send_to_atlas(ns) 96 | local time = math.floor(ns / 1000000) 97 | local output = { 98 | tags = { 99 | environment = environment, 100 | cluster = cluster, 101 | node = node, 102 | app = "stellar-core" 103 | }, 104 | metrics = { 105 | } 106 | } 107 | 108 | local queued_metrics = 0 109 | 110 | for name, stats in pairs(latest_stats) do 111 | if queued_metrics >= max_metrics then 112 | inject_for_atlas(ns, output) 113 | output.metrics = {} 114 | queued_metrics = 0 115 | end 116 | for stat, value in pairs(stats) do 117 | local metric_payload = { 118 | tags = { 119 | name = name, 120 | stat = stat, 121 | ["atlas.dstype"] = "gauge" 122 | }, 123 | timestamp = time, 124 | value = value 125 | } 126 | table.insert(output.metrics, metric_payload) 127 | end 128 | queued_metrics = queued_metrics + 1 129 | end 130 | 131 | inject_for_atlas(ns, output) 132 | end 133 | 134 | function inject_for_atlas(ns, output) 135 | if next(output.metrics) then 136 | local message = { 137 | Timestamp = ns, 138 | Type = "stellar.core.metrics.atlas" 139 | } 140 | 141 | message.Payload = cjson.encode(output) 142 | 143 | inject_message(message) 144 | end 145 | end 146 | 147 | function timer_event(ns) 148 | send_to_influx(ns) 149 | send_to_atlas(ns) 150 | end 151 | 152 | function process_metric(timestamp, name, stats, stat_whitelist) 153 | local selected_stats = {} 154 | 155 | for i, stat in ipairs(stat_whitelist) do 156 | selected_stats[stat] = stats[stat] 157 | end 158 | 159 | latest_stats[name] = selected_stats 160 | end 161 | -------------------------------------------------------------------------------- /install: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export DEBIAN_FRONTEND=noninteractive 6 | 7 | apt-get update 8 | apt-get install apt-transport-https 9 | 10 | wget -O - http://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - 11 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch main" 12 | add-apt-repository "deb http://apt.llvm.org/stretch/ llvm-toolchain-stretch-5.0 main" 13 | 14 | apt-get update 15 | 16 | apt-get install -y libstdc++6 libpq5 lldb-5.0 curl 17 | 18 | apt-get install -y python-pip 19 | pip install awscli --upgrade # for uploading history to s3 20 | pip install boto # for sending crash emails 21 | 22 | apt-get install -y postgresql-client sqlite3 23 | 24 | [[ -f stellar-core.deb ]] || wget -nv -O stellar-core.deb https://s3.amazonaws.com/stellar.org/releases/stellar-core/stellar-core-${STELLAR_CORE_VERSION}_amd64.deb 25 | 26 | dpkg -i stellar-core.deb 27 | rm stellar-core.deb 28 | 29 | # purge stuff we don't directly need 30 | apt-get purge -y sgml-base ucf gcc g++ xz-utils libpod-latex-perl dbus 31 | apt-get autoremove -y 32 | 33 | # put back 2 needed awscli packages 34 | apt-get install -y python-six python-colorama 35 | 36 | apt-get clean 37 | rm -rf /var/lib/apt/lists/* 38 | rm -rf /var/log/*.log /var/log/*/*.log 39 | 40 | # delete a handful of items we don't need and take up some actual space 41 | rm -rf /usr/include 42 | rm -rf /usr/share/{file,man,doc} 43 | rm -rf /usr/lib/llvm-5.0/build /usr/share/llvm-5.0/cmake 44 | rm -rf /usr/lib/llvm-5.0/lib/*.a ./lib/x86_64-linux-gnu/*.a 45 | rm -rf /usr/lib/llvm-5.0/bin/{opt,lli,llc,llvm-tblgen,bugpoint} 46 | rm -rf /usr/local/lib/python2.7/dist-packages/awscli/examples 47 | rm -rf /usr/share/postgresql/*/man 48 | -------------------------------------------------------------------------------- /start: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | export SELF="$1" 6 | 7 | if [[ -z "${SELF}" ]]; then 8 | echo "Usage: $0 [commands...]" >&2 9 | exit 1 10 | fi 11 | 12 | shift 13 | 14 | export CONTAINER_ID=$(grep cpu: /proc/self/cgroup | awk -F/ '{ print $3 }') 15 | 16 | confd -onetime -backend=env 17 | 18 | source /etc/profile 19 | 20 | if [[ -f "/stellar-core-override.cfg" ]]; then 21 | CONFIG_OPTION="--conf /stellar-core-override.cfg" 22 | fi 23 | 24 | if [[ "$1" == "nopsql" ]]; then 25 | NOPSQL=true 26 | shift 27 | else 28 | while ! psql -c 'select 1' >/dev/null 2>&1; do 29 | echo "Waiting for postgres to be available..." 30 | sleep 1 31 | done 32 | fi 33 | 34 | function newdb() { 35 | rm -rf /data/* 36 | 37 | if [[ -z "${NOPSQL}" ]]; then 38 | 39 | dropdb stellar || true 40 | createdb stellar 41 | 42 | if [[ -n "${HORIZON_PASSWORD}" ]]; then 43 | dropuser horizon || true 44 | createuser horizon 45 | psql -c "alter user horizon with password '${HORIZON_PASSWORD}'" 46 | psql >/dev/null <<-SQL 47 | GRANT CONNECT ON DATABASE stellar to horizon; 48 | \c stellar 49 | REVOKE ALL ON schema public FROM public; 50 | GRANT ALL ON schema public TO postgres; 51 | GRANT USAGE ON SCHEMA public to horizon; 52 | 53 | GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO horizon; 54 | GRANT SELECT ON ALL TABLES IN SCHEMA public TO horizon; 55 | 56 | ALTER DEFAULT PRIVILEGES FOR USER postgres IN SCHEMA public GRANT SELECT ON SEQUENCES TO horizon; 57 | ALTER DEFAULT PRIVILEGES FOR USER postgres IN SCHEMA public GRANT SELECT ON TABLES TO horizon; 58 | SQL 59 | fi 60 | 61 | fi 62 | 63 | stellar-core $CONFIG_OPTION new-db 64 | } 65 | 66 | while [[ -n "$1" ]]; do 67 | COMMAND="$1" 68 | shift 69 | 70 | case "${COMMAND}" in 71 | newdb) 72 | newdb 73 | ;; 74 | fresh) 75 | newdb 76 | [[ -n "${HISTORY_RESET}" ]] && $(printf "${HISTORY_RESET}" "${SELF}") || true 77 | [[ -n "${HISTORY_PUT}" ]] && stellar-core $CONFIG_OPTION new-hist "${SELF}" 78 | ;; 79 | newhist) 80 | [[ -n "${HISTORY_RESET}" ]] && $(printf "${HISTORY_RESET}" "${SELF}") || true 81 | [[ -n "${HISTORY_PUT}" ]] && stellar-core $CONFIG_OPTION new-hist "${SELF}" 82 | ;; 83 | forcescp) 84 | stellar-core $CONFIG_OPTION force-scp 85 | ;; 86 | catchupcomplete) 87 | stellar-core $CONFIG_OPTION catchup current/max 88 | ;; 89 | catchuprange) 90 | FROM=$1 91 | shift 92 | TO=$1 93 | shift 94 | if [ "${FROM}" -eq "${FROM}" -a "${TO}" -eq "${TO}" ]; then 95 | OUTPUT=$1 96 | COUNT=$((TO-FROM+1)) 97 | if [[ "${OUTPUT}" ]]; then 98 | stellar-core $CONFIG_OPTION catchup $TO/$COUNT --output-file "${OUTPUT}" 99 | shift 100 | else 101 | stellar-core $CONFIG_OPTION catchup $TO/$COUNT 102 | fi 103 | else 104 | echo "Valid ledger range required" >&2 105 | exit 1 106 | fi 107 | ;; 108 | catchupat) 109 | AT=$1 110 | shift 111 | if [[ "${AT}" == "current" || "${AT}" -eq "${AT}" ]]; then 112 | OUTPUT=$1 113 | if [[ "${OUTPUT}" ]]; then 114 | stellar-core $CONFIG_OPTION catchup $AT/0 --output-file "${OUTPUT}" 115 | shift 116 | else 117 | stellar-core $CONFIG_OPTION catchup $AT/0 118 | fi 119 | else 120 | echo "Valid ledger required" >&2 121 | exit 1 122 | fi 123 | ;; 124 | catchupto) 125 | TO=$1 126 | shift 127 | if [[ "${TO}" == "current" || "${TO}" -eq "${TO}" ]]; then 128 | OUTPUT=$1 129 | if [[ "${OUTPUT}" ]]; then 130 | stellar-core $CONFIG_OPTION catchup $TO/max --output-file "${OUTPUT}" 131 | shift 132 | else 133 | stellar-core $CONFIG_OPTION catchup $TO/max 134 | fi 135 | else 136 | echo "Valid ledger required" >&2 137 | exit 1 138 | fi 139 | ;; 140 | lasthistorycheckpoint) 141 | OUTPUT=$1 142 | if [[ "${OUTPUT}" ]]; then 143 | stellar-core $CONFIG_OPTION report-last-history-checkpoint --output-file "${OUTPUT}" 144 | shift 145 | else 146 | stellar-core $CONFIG_OPTION report-last-history-checkpoint 147 | fi 148 | SKIP_START=true 149 | ;; 150 | skipstart) 151 | SKIP_START=true 152 | ;; 153 | test) 154 | if [[ -z "${NOPSQL}" ]]; then 155 | for i in `seq 1 10`; do 156 | dropdb test$i || true 157 | createdb test$i 158 | done 159 | fi 160 | 161 | TEST_FILTER=$1 162 | shift 163 | OUTPUT=$1 164 | if [[ "${OUTPUT}" ]]; then 165 | stellar-core test "$TEST_FILTER" -r xml > "${OUTPUT}" 166 | shift 167 | else 168 | stellar-core test "$TEST_FILTER" -r xml 169 | fi 170 | SKIP_START=true 171 | ;; 172 | *) 173 | echo "Unknown container command $COMMAND" >&2 174 | exit 1 175 | esac 176 | done 177 | 178 | if [[ -z "${SKIP_START}" ]]; then 179 | exec /init -- stellar-core $CONFIG_OPTION run 180 | else 181 | echo "Setup complete. Skipping server start." 182 | fi 183 | -------------------------------------------------------------------------------- /utils/core_file_processor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import logging 4 | import sys 5 | import ConfigParser 6 | import os 7 | import socket 8 | import time 9 | import subprocess 10 | from string import Template 11 | import textwrap 12 | import boto.ses 13 | 14 | 15 | def format_time(epoch_time): 16 | time_format = "%Y-%m-%dT%H:%M:%S" 17 | return time.strftime(time_format, time.gmtime(epoch_time)) 18 | 19 | 20 | class CoreMailer(object): 21 | def __init__(self, config): 22 | self.config = config 23 | self.hostname = self.config.get('Config', 'hostname') 24 | self.out = sys.stdout 25 | 26 | def find_core(self): 27 | path = self.config.get('Config', 'cores') 28 | core_filter = self.config.get('Config', 'core_filter') 29 | 30 | cores = [os.path.join(path, core) for core in os.listdir(path) if core_filter in core] 31 | 32 | if len(cores): 33 | return max(cores, key=os.path.getctime) 34 | 35 | def filter_logs(self, logs): 36 | log_filter = self.config.get('Config', 'log_filter') 37 | if not log_filter: 38 | return logs 39 | 40 | def strip_prefix(line): 41 | first_space = line.index(' ') 42 | following_colon = line.index(':', first_space) 43 | return line[0:first_space] + line[following_colon:] 44 | 45 | lines = logs.split("\n") 46 | filtered = filter(lambda line: log_filter in line, lines) 47 | stripped = map(strip_prefix, filtered) 48 | return "\n".join(stripped) 49 | 50 | def find_logs(self, epoch_time): 51 | log = self.config.get('Config', 'log') 52 | formatted_time = format_time(epoch_time) 53 | logging.info('Searching %s for logs around %s', log, formatted_time) 54 | command = ["egrep", 55 | "-C1000", 56 | ("^%s" % formatted_time), 57 | log] 58 | try: 59 | return self.filter_logs(subprocess.check_output(command)) 60 | except subprocess.CalledProcessError: 61 | return 'Unable to retrieve logs around %s' % formatted_time 62 | 63 | def get_trace(self, core): 64 | binary = self.config.get('Config', 'bin') 65 | logging.info('Processing core file %s with binary %s', core, binary) 66 | 67 | # matschaffer: this is really awful 68 | # But lldb just exits with no output and exit code -11 if I try to run 69 | # this script as a container entry point 70 | lldb_command = "lldb-3.6 -f %(binary)s -c %(core)s --batch " + \ 71 | "-o 'target create -c \"%(core)s\" \"%(binary)s\"' " + \ 72 | "-o 'script import time; time.sleep(1)' " + \ 73 | "-o 'thread backtrace all'" 74 | command = ["script", "-c", 75 | (lldb_command % {"core": core, "binary": binary})] 76 | 77 | return subprocess.check_output(command, stderr=subprocess.STDOUT) 78 | 79 | def send_alert(self, epoch_time, trace, logs): 80 | template_vars = { 81 | "hostname": self.hostname, 82 | "binary": self.config.get('Config', 'bin'), 83 | "formatted_time": format_time(epoch_time), 84 | "trace": trace, 85 | "logs": logs 86 | } 87 | 88 | sender = self.config.get('Config', 'from') 89 | recipient = self.config.get('Config', 'to') 90 | 91 | subject = 'stellar-core crash on %(hostname)s' % template_vars 92 | template = textwrap.dedent(""" 93 |

${binary} on ${hostname} crashed at ${formatted_time} with the 94 | following back traces:

95 | 96 |

 97 |           ${trace}
 98 |           
99 | 100 |

Extracted logs

101 | 102 |

103 |           ${logs}
104 |           
105 | """) 106 | body = Template(template).substitute(template_vars) 107 | 108 | logging.info("Sending core alert from %s to %s", sender, recipient) 109 | self.send_email(sender, recipient, subject, body) 110 | 111 | def send_email(self, sender, recipient, subject, body): 112 | conn = boto.ses.connect_to_region(self.config.get('Config', 'region')) 113 | # noinspection PyTypeChecker 114 | conn.send_email(sender, subject, None, [recipient], html_body=body) 115 | 116 | def output_trace(self, epoch_time, trace): 117 | template_vars = { 118 | "hostname": self.hostname, 119 | "binary": self.config.get('Config', 'bin'), 120 | "formatted_time": format_time(epoch_time), 121 | "trace": trace 122 | } 123 | 124 | template = textwrap.dedent(""" 125 | ${binary} on ${hostname} crashed at ${formatted_time} with the 126 | following back traces: 127 | 128 | ${trace} 129 | """) 130 | body = Template(template).substitute(template_vars) 131 | self.out.write(body) 132 | 133 | def archive_core(self, core): 134 | command_string = self.config.get('Config', 'archive_command') 135 | if command_string: 136 | core_path = os.path.join(self.hostname, os.path.basename(core)) 137 | command_string = command_string.format(core, core_path) 138 | logging.info(subprocess.check_output(command_string.split(' '))) 139 | else: 140 | logging.warn("No archive command, just removing core file") 141 | os.remove(core) 142 | 143 | def run(self, single_core): 144 | core = single_core or self.find_core() 145 | mode = self.config.get('Config', 'mode') 146 | 147 | if core: 148 | logging.info('Found core file %s', core) 149 | epoch_time = os.path.getctime(core) 150 | trace = self.get_trace(core) 151 | 152 | if mode == "aws": 153 | logs = self.find_logs(epoch_time) 154 | self.send_alert(epoch_time, trace, logs) 155 | self.archive_core(core) 156 | elif mode == "local": 157 | self.output_trace(epoch_time, trace) 158 | else: 159 | logging.fatal("Unknown MODE setting: %s", mode) 160 | sys.exit(1) 161 | else: 162 | logging.info('No core file found for processing') 163 | 164 | 165 | if __name__ == "__main__": 166 | if len(sys.argv) > 1: 167 | single_core = sys.argv[1] 168 | else: 169 | single_core = None 170 | 171 | config_file = "/etc/core_file_processor.ini" 172 | 173 | logging.basicConfig(level=logging.INFO) 174 | 175 | config_parser = ConfigParser.ConfigParser({ 176 | "region": "us-east-1", 177 | "cores": "/cores", 178 | "log": "/host/syslog", 179 | "log_filter": os.environ.get('CORE_LOG_FILTER'), 180 | "core_filter": "stellar-core", 181 | "hostname": socket.gethostname(), 182 | "from": "%(hostname)s ", 183 | "to": os.environ.get('CORE_ALERT_RECIPIENT'), 184 | "bin": "/usr/local/bin/stellar-core", 185 | "archive_command": os.environ.get('CORE_ARCHIVE_COMMAND'), 186 | "mode": os.environ.get('MODE', 'aws') 187 | }) 188 | 189 | config_parser.add_section("Config") 190 | config_parser.read(config_file) 191 | 192 | mailer = CoreMailer(config_parser) 193 | mailer.run(single_core) 194 | -------------------------------------------------------------------------------- /utils/core_file_processor_test.py: -------------------------------------------------------------------------------- 1 | from nose.tools import assert_equals, assert_regexp_matches 2 | 3 | from core_file_processor import CoreMailer 4 | 5 | import textwrap 6 | import ConfigParser 7 | from StringIO import StringIO 8 | 9 | def create_mailer(options=None): 10 | defaults = { 11 | "cores": "fakecoredir", 12 | "log": "fakelogfile", 13 | "hostname": "unittests", 14 | "bin": "/usr/local/bin/stellar-core", 15 | "from": "ops+unittests@stellar.org", 16 | "to": "mat@stellar.org", 17 | "region": "us-east-1" 18 | } 19 | if options: 20 | defaults.update(options) 21 | config = ConfigParser.ConfigParser(defaults) 22 | config.add_section("Config") 23 | core_mailer = CoreMailer(config) 24 | return core_mailer 25 | 26 | 27 | def test_log_filtering(): 28 | logs = textwrap.dedent(""" 29 | 2015-06-16T21:07:04.007435+00:00 core-delivery-001.stg.stellar001.internal.stellar-ops.com docker/b60e5fc489e2[29424]: terminate called after throwing an instance of 'std::runtime_error' 30 | 2015-06-16T21:07:04.007836+00:00 core-delivery-001.stg.stellar001.internal.stellar-ops.com docker/b60e5fc489e2[29424]: what(): baseCheckDecode decoded to <5 bytes 31 | 2015-06-16T21:07:04.010012+00:00 core-delivery-001.stg.stellar001.internal.stellar-ops.com docker/b60e5fc489e2[29424]: /start: line 29: 28 Aborted (core dumped) stellar-core --newdb 32 | 2015-06-16T21:07:04.622539+00:00 core-delivery-001.stg.stellar001.internal.stellar-ops.com kernel: [1542072.767438] docker0: port 1(vethdd214d1) entered disabled state 33 | """) 34 | expected_logs = textwrap.dedent(""" 35 | 2015-06-16T21:07:04.007435+00:00: terminate called after throwing an instance of 'std::runtime_error' 36 | 2015-06-16T21:07:04.007836+00:00: what(): baseCheckDecode decoded to <5 bytes 37 | 2015-06-16T21:07:04.010012+00:00: /start: line 29: 28 Aborted (core dumped) stellar-core --newdb 38 | """).strip() 39 | mailer = create_mailer({"log_filter": "b60e5fc489e2"}) 40 | assert_equals(mailer.filter_logs(logs), expected_logs) 41 | 42 | def test_local_mode(): 43 | mailer = create_mailer({"mode": "local"}) 44 | mailer.find_core = lambda: __file__ 45 | mailer.get_trace = lambda _: "some traces" 46 | mailer.out = StringIO() 47 | mailer.run() 48 | assert_regexp_matches(mailer.out.getvalue(), "some traces") 49 | 50 | def test_aws_mode(): 51 | captures = {} 52 | 53 | def capture_output(sender, recipient, subject, body): 54 | captures["sender"] = sender 55 | captures["recipient"] = recipient 56 | captures["subject"] = subject 57 | captures["body"] = body 58 | 59 | mailer = create_mailer({"mode": "aws"}) 60 | 61 | mailer.find_core = lambda: __file__ 62 | mailer.find_logs = lambda _: "some logs" 63 | mailer.archive_core = lambda _: None 64 | mailer.get_trace = lambda _: "some traces" 65 | mailer.send_email = capture_output 66 | 67 | mailer.run() 68 | assert_regexp_matches(captures["subject"], "crash") 69 | assert_regexp_matches(captures["body"], "some traces") 70 | --------------------------------------------------------------------------------