├── .gitignore ├── README.md ├── tier-1 ├── salt-test │ ├── Dockerfile │ └── docker-compose.yml ├── salt │ ├── build-essential.sls │ ├── connector │ │ ├── files │ │ │ └── launch.config.js │ │ └── init.sls │ ├── git.sls │ ├── gui.sls │ ├── httpie.sls │ ├── node.sls │ ├── pm2.sls │ └── top.sls └── terraform │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tfvars │ └── variables.tf └── tier-2 ├── salt-test ├── Dockerfile └── docker-compose.yml ├── salt ├── build-essential.sls ├── connector │ ├── files │ │ └── launch.config.js │ └── init.sls ├── git.sls ├── gui.sls ├── httpie.sls ├── node.sls ├── pm2.sls └── top.sls └── terraform ├── main.tf ├── outputs.tf ├── terraform.tfvars └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | **/*.swp 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform Connector 2 | > Terraform files for the ILP connector 3 | 4 | - [Overview](#overview) 5 | - [Selecting your setup](#selecting-your-setup) 6 | - [Tier 1 with XRP and AWS](#tier-1-with-xrp-and-aws) 7 | - [Set up your domain](#set-up-your-domain) 8 | - [Acting as a server](#acting-as-a-server) 9 | - [Adding another peer](#adding-another-peer) 10 | - [Upgrading to SSL](#upgrading-to-ssl) 11 | - [Tier 2 with XRP and AWS](#tier-2-with-xrp-and-aws) 12 | - [Access your Connector](#access-your-connector) 13 | - [Use as Moneyd](#use-as-moneyd) 14 | - [Monitor with Moneyd-GUI](#monitor-with-moneyd-gui) 15 | - [Run Both](#run-both) 16 | 17 | ## Overview 18 | 19 | This repo contains instructions for how to run an Interledger connector. As the 20 | community creates more ways to deploy the connector, they'll be added to this 21 | repository. 22 | 23 | These instructions are intended for people who want to take part in the early 24 | Interledger network as connectors. You'll have to find other members of the 25 | community to peer with, and will have to maintain your connector in order to 26 | stay on the network. 27 | 28 | These instructions will not be perfect, so don't hesitate to ask for help in 29 | our [Gitter](https://gitter.im/interledger/Lobby). If you find any mistakes, 30 | please submit a PR to this repo to help future readers. 31 | 32 | **If you want to try Interledger out as a regular user, look at 33 | [moneyd](https://github.com/sharafian/moneyd)**. Moneyd is a piece of software 34 | that runs a "home router" for the interledger. It exposes Interledger access to 35 | applications on your machine, and will forward packets to an upstream provider. 36 | 37 | ### Selecting your setup 38 | 39 | The instructions you'll want to follow depend on: 40 | 41 | 1. The ledger(s) you'll be peering over 42 | 2. Whether you'll have a parent connector 43 | 3. What hosting provider you want to use 44 | 45 | **1**: Interledger currently has functioning integrations for both XRP and Ethereum. 46 | Connectors on the live network are currently using XRP, but the first 47 | connectors peering over Ethereum will be deployed soon. Instructions for a 48 | connector over Ethereum will be added to this repository once this happens. 49 | 50 | **2**: The only difference between "Tier 1" and "Tier 2" connectors is in the 51 | routing topology. A Tier 1 connector acts like a Tier 1 ISP. It is a backbone 52 | node in the network and requires more upkeep. You must also find other 53 | connectors on the network willing to manually peer with you. If you're 54 | interested in running a Tier 1 connector, you can find a peer on the 55 | Interledger Gitter or mailing list, both accessible from 56 | [interledger.org](https://interledger.org). 57 | 58 | **3**: The terraform files currently in this repo are specific to Amazon AWS. 59 | The salt files used to provision an already running instance are portable 60 | across any hosting provider. If any community members want to add terraform 61 | files and instructions for their hosting provider of choice, they can submit a 62 | PR to this repo. 63 | 64 | ## Tier 1 with XRP and AWS 65 | 66 | - Start out by cloning this repo. Then `cd` into the `tier-1` directory. 67 | 68 | - Open `./terraform/terraform.tfvars` in your editor of choice. This contains 69 | some details that Terraform uses to create your server. 70 | 71 | - Replace `~/.ssh/id_rsa.pub` (line 2) with the path of your public key. This should be 72 | whatever key you ordinarily use for SSH. When you deploy, terraform will 73 | upload it to your server so that you can SSH in. 74 | 75 | - Add your private key to ssh-agent using the command `ssh-add ~/.ssh/id_rsa` 76 | where `id_rsa` is your private key. Use `ssh-add -l` to verify that your private key 77 | has been loaded into the ssh-agent. 78 | 79 | - Replace `us-east-1` (line 3) with the AWS region you want to run your connector in. 80 | You can find the different options in `./terraform/variables.tf`. 81 | 82 | - Replace `example.com` (line 4) with a domain that you own. Once you've 83 | deployed, follow the [set up your domain](#set-up-your-domain) instructions 84 | to point it at your connector. 85 | 86 | - Open `./salt/connector/files/launch.config.js` in your editor of choice. This 87 | file contains the configuration for your connector. If you want to do any 88 | advanced configuration of this file, look at the [ILP connector 89 | README](https://github.com/interledgerjs/ilp-connector). 90 | 91 | - Replace `YOUR_HOT_WALLET_RIPPLE_ADDRESS` (line 4) with your hot wallet ripple 92 | address. This should be an address with at least 35 XRP. Do not keep too much 93 | money on this address, in case your server is ever compromised. 94 | 95 | - Replace `YOUR_HOT_WALLET_RIPPLE_SECRET` (line 5) with your hot wallet ripple secret. 96 | 97 | - Ask your peer to add a peer plugin for your connector. They'll have to follow 98 | the [Adding another peer](#adding-another-peer) instructions, and then will 99 | be able to give you a URI to connect to their server. Replace 100 | `SERVER_URI_GIVEN_TO_YOU_BY_YOUR_PEER` (line 19) with this URI. 101 | 102 | - (If you want to run a websocket server for this peering relationship instead 103 | of using your peer's server, follow the [Acting as a 104 | server](#acting-as-a-server) instructions). 105 | 106 | - Ask your peer for their ripple hot wallet address. Replace 107 | `RIPPLE_ADDRESS_OF_PEER` (line 23) with their ripple hot wallet address. 108 | 109 | - Choose a unique global prefix for your connector, and put it in place of `MY 110 | ILP ADDRESS` (line 41). Some examples of prefixes that have already been used 111 | are `g.zero`, `g.africa`, and `g.pando`. 112 | 113 | - Replace the `sdb.amazonaws.com` URL with the SDB URL corresponding to your AWS region 114 | (the one you entered into `./terraform/variables.tf`). [Here's the list of the SimpleDB URLs 115 | for each AWS region.](https://docs.aws.amazon.com/general/latest/gr/rande.html#sdb_region) 116 | 117 | - Go to your AWS management dashboard and open the IAM service. If you do not have 118 | an AWS account, create one and add your billing details. 119 | 120 | - In IAM, go to "Manage Users" and add a new user. Use an existing policy, and select 121 | "AdministratorAccess". Set the user's name to "connector". 122 | 123 | - Once the user is created, save the Access Key and Secret Key. Create a file called 124 | `~/terraform.sh` and copy in the following: 125 | 126 | ``` 127 | #!/bin/bash 128 | 129 | AWS_ACCESS_KEY=XXXXXXX AWS_SECRET_KEY=XXXXXXXX terraform $* 130 | ``` 131 | 132 | - Replace the values in `~/terraform.sh` with the values you copied from IAM. 133 | 134 | - Install [Terraform](https://www.terraform.io/) on your machine. 135 | 136 | - Now it's time for you to deploy. Run: 137 | 138 | ``` 139 | cd terraform 140 | bash ~/terraform.sh init 141 | bash ~/terraform.sh apply 142 | ``` 143 | 144 | - Enter 'yes' when Terraform asks you to confirm. Wait for the deploy to 145 | finish. It should end by printing your server's IP address. If there was an 146 | error, 147 | 148 | - If you did not encounter any errors, then your connector is running! Follow 149 | [Access your Connector](#access-your-connector) to start using it. 150 | 151 | - If you encounter any issues, you can use the IP address that Terraform 152 | returned to SSH into the machine. Once you're inside the machine, you can 153 | use `sudo pm2 logs` to see the connector's logs. You can fix the issue in the 154 | configuration files on your local machine, then [redeploy](#redeploy). 155 | 156 | ### Set up your domain 157 | 158 | - Your connector must be deployed already. Complete the deploy instructions, 159 | then continue here. 160 | 161 | - Go to your AWS management console and open the "Route 53" service. 162 | 163 | - Under "Hosted Zones," you should see an entry for the domain that you 164 | configured on your connector. Click that entry. 165 | 166 | - Select the nameservers on the hosted zone, and configure your domain to point 167 | at them. Give the change a little while to propagate. 168 | 169 | - You're done! Your domain now can be used for your peering relationships. 170 | 171 | ### Acting as a server 172 | 173 | - Open your `./salt/connector/files/launch.config.js`. 174 | 175 | - On the peer that you want to be a server for, replace: 176 | 177 | ``` 178 | server: ".....", 179 | ``` 180 | 181 | With 182 | 183 | ``` 184 | listener: { 185 | port: 8080, 186 | secret: "GENERATE_A_SECURE_RANDOM_SECRET" 187 | }, 188 | ``` 189 | 190 | - If you already have a `listener` with port 8080, you'll have to use a different port. 191 | If you're using port 8080, skip the following indented steps. 192 | 193 | - If you're using a port other than 8080, open `./terraform/main.tf`. For 194 | example, let's say you're using port 1080. 195 | 196 | - In `resource "aws_security_group" "elb"`, add the following block: 197 | 198 | ``` 199 | ingress { 200 | from_port = 1080 201 | to_port = 1080 202 | protocol = "tcp" 203 | cidr_blocks = ["0.0.0.0/0"] 204 | } 205 | ``` 206 | 207 | - In `resource "aws_security_group" "default"`, add the following block: 208 | 209 | ``` 210 | ingress { 211 | from_port = 1080 212 | to_port = 1080 213 | protocol = "tcp" 214 | cidr_blocks = ["10.0.0.0/16"] 215 | } 216 | ``` 217 | 218 | - In `resource "aws_elb" "web"`, add the following block: 219 | 220 | ``` 221 | listener { 222 | instance_port = 1080 223 | instance_protocol = "tcp" 224 | lb_port = 1080 225 | lb_protocol = "tcp" 226 | } 227 | ``` 228 | 229 | - Now the `server` you can give your peer is 230 | `btp+ws://:GENERATE_A_SECURE_RANDOM_SECRET@btp.example.com:PORT` with: 231 | - `example.com` as your domain 232 | - `GENERATE_A_SECURE_RANDOM_SECRET` as your generated secret 233 | - `8080` as your instance port (the load balancer will expose 80). If you 234 | followed the indented steps above replace 8080 with the port you used (e.g. 1080). 235 | 236 | (Change to `wss` if you already completed [Upgrading to SSL](#upgrading-to-ssl)) 237 | 238 | - Make sure you [Set up your domain](#set-up-your-domain) once your connector 239 | is deployed, if it is not deployed already. 240 | 241 | - [Redeploy](#redeploy) your connector if it is already deployed. 242 | 243 | ### Adding another peer 244 | 245 | - Open `./salt/connector/files/launch.config.js` in your editor of choice. 246 | 247 | - Add the following block, after the constants declared at the top of the file: 248 | 249 | ``` 250 | const secondPeerPlugin = { 251 | relation: 'peer', 252 | plugin: 'ilp-plugin-xrp-paychan', 253 | assetCode: 'XRP', 254 | assetScale: 9, 255 | balance: { 256 | maximum: '10000000', 257 | settleThreshold: '-5000000', 258 | settleTo: '0' 259 | }, 260 | options: { 261 | assetScale: 9, 262 | server: 'SERVER_URI_GIVEN_TO_YOU_BY_YOUR_PEER', 263 | rippledServer: 'wss://s1.ripple.com', 264 | secret, 265 | address, 266 | peerAddress: 'RIPPLE_ADDRESS_OF_PEER' 267 | } 268 | } 269 | ``` 270 | 271 | - Follow the instructions in [Tier 1 with XRP and 272 | AWS](#tier-1-with-xrp-and-aws) to fill in the placeholder fields. If you are 273 | the websocket server in this relationship, you'll also have to follow [Acting 274 | as a Server](#acting-as-a-server). 275 | 276 | - In the `CONNECTOR_ACCOUNTS` object, add another entry that says: 277 | 278 | ``` 279 | secondPeer: secondPeerPlugin 280 | ``` 281 | 282 | - [Redeploy](#redeploy) your connector. 283 | 284 | ### Upgrading to SSL 285 | 286 | - Go to your AWS management console. Select the "Certificate Manager" service. 287 | 288 | - Select "Request a Certificate," and request `*.example.com`, where `example.com` 289 | is the domain you put in your `./terraform/terraform.tfvars`. 290 | 291 | - Follow the instructions that AWS provides. If you've configured your domain 292 | via Route 53 (which Terraform should have done automatically), AWS will go 293 | through the process automatically. 294 | 295 | - Open `./terraform/main.tf` in your editor of choice. 296 | 297 | - Add the following block at the top of the file (replacing `example.com` with 298 | your domain): 299 | 300 | ``` 301 | data "aws_acm_certificate" "web-cert" { 302 | domain = "*.example.com" 303 | statuses = ["ISSUED"] 304 | } 305 | ``` 306 | 307 | - In the `resource "aws_security_group" "elb"` block, replace the entire 308 | `ingress` block containing `from_port = 80` with the following block: 309 | 310 | ``` 311 | ingress { 312 | from_port = 443 313 | to_port = 443 314 | protocol = "tcp" 315 | cidr_blocks = ["0.0.0.0/0"] 316 | } 317 | ``` 318 | 319 | - In the `resource "aws_elb" "web"` block, change `lb_port` to `443` wherever 320 | it previously said `80`, in any `listener` block. Change `lb_protocol` to 321 | `ssl` on all `listener` blocks. In every `listener` block, add the following 322 | line at the end: 323 | 324 | ``` 325 | ssl_certificate_id = "${data.aws_acm_certificate.web-cert.arn}" 326 | ``` 327 | 328 | - If you've already deployed, change directories into `./terraform` and run 329 | `bash ~/terraform.sh apply` to apply these changes. You don't need to taint 330 | anything; Terraform is smart enough to notice which blocks you've edited. 331 | 332 | ### Redeploy 333 | 334 | In order to redeploy, you must be in your terraform directory. This will fail 335 | if you aren't on the same machine that you initially deployed from. 336 | 337 | ``` 338 | bash ~/terraform.sh taint aws_instance.web 339 | bash ~/terraform.sh apply 340 | ``` 341 | 342 | ## Tier 2 with XRP and AWS 343 | 344 | - Start out by cloning this repo. Then `cd` into the `tier-2` directory. 345 | 346 | - Open `./terraform/terraform.tfvars` in your editor of choice. This contains 347 | some details that Terraform uses to create your server. 348 | 349 | - Replace `~/.ssh/id_rsa.pub` (line 2) with the path of your public key. This should be 350 | whatever key you ordinarily use for SSH. When you deploy, terraform will 351 | upload it to your server so that you can SSH in. 352 | 353 | - Add your private key to ssh-agent using the command `ssh-add ~/.ssh/id_rsa` 354 | where `id_rsa` is your private key. Use `ssh-add -l` to verify that your private key 355 | has been loaded into the ssh-agent. 356 | 357 | - Replace `us-east-1` (line 3) with the AWS region you want to run your connector in. 358 | You can find the different options in `./terraform/variables.tf`. 359 | 360 | - Replace `example.com` (line 4) with a domain that you own. Once you've 361 | deployed, follow the [set up your domain](#set-up-your-domain) instructions 362 | to point it at your connector. 363 | 364 | - Open `./salt/connector/files/launch.config.js` in your editor of choice. This 365 | file contains the configuration for your connector. If you want to do any 366 | advanced configuration of this file, look at the [ILP connector 367 | README](https://github.com/interledgerjs/ilp-connector). 368 | 369 | - Replace `YOUR_HOT_WALLET_RIPPLE_ADDRESS` (line 4) with your hot wallet ripple 370 | address. This should be an address with at least 35 XRP. Do not keep too much 371 | money on this address, in case your server is ever compromised. 372 | 373 | - Replace `YOUR_HOT_WALLET_RIPPLE_SECRET` (line 5) with your hot wallet ripple secret. 374 | 375 | - Find a parent BTP host on the current [Connector 376 | List](https://github.com/sharafian/moneyd#connector-list). You can also ask 377 | for a suitable parent on the [Gitter](https://gitter.im/interledger/Lobby). 378 | Replace `YOUR_PARENT_HOST` (line 13) with this host. 379 | 380 | - Replace the `sdb.amazonaws.com` URL with the SDB URL corresponding to your AWS region 381 | (the one you entered into `./terraform/variables.tf`). [Here's the list of the SimpleDB URLs 382 | for each AWS region.](https://docs.aws.amazon.com/general/latest/gr/rande.html#sdb_region) 383 | 384 | - Go to your AWS management dashboard and open the IAM service. If you do not have 385 | an AWS account, create one and add your billing details. 386 | 387 | - In IAM, go to "Manage Users" and add a new user. Use an existing policy, and select 388 | "AdministratorAccess". Set the user's name to "connector". 389 | 390 | - Once the user is created, save the Access Key and Secret Key. Create a file called 391 | `~/terraform.sh`, and copy in the following: 392 | 393 | ``` 394 | #!/bin/bash 395 | 396 | AWS_ACCESS_KEY=XXXXXXX AWS_SECRET_KEY=XXXXXXXX terraform $* 397 | ``` 398 | 399 | - Replace the values in `~/terraform.sh` with the values you copied from IAM. 400 | 401 | - Install [Terraform](https://www.terraform.io/) on your machine. 402 | 403 | - Now it's time for you to deploy. Run: 404 | 405 | ``` 406 | cd terraform 407 | bash ~/terraform.sh init 408 | bash ~/terraform.sh apply 409 | ``` 410 | 411 | - Enter 'yes' when Terraform asks you to confirm. Wait for the deploy to 412 | finish. It should end by printing your server's IP address. If there was an 413 | error, 414 | 415 | - If you did not encounter any errors, then your connector is running! Follow 416 | [Access your Connector](#access-your-connector) to start using it. 417 | 418 | - If you encounter any issues, you can use the IP address that Terraform 419 | returned to SSH into the machine. Once you're inside the machine, you can 420 | use `sudo pm2 logs` to see the connector's logs. You can fix the issue in the 421 | configuration files on your local machine, then [redeploy](#redeploy). 422 | 423 | ## Access your Connector 424 | 425 | ### Use as your Moneyd 426 | 427 | You can access your deployed connector by tunnelling its 428 | `ilp-plugin-mini-accounts` instance to your local machine. Then any application 429 | can access it via port 7768, just as though you were running moneyd. 430 | 431 | You should have an IP address for your connector, once it's deployed. 432 | To get access to your funds locally, just run the following command: 433 | 434 | ``` 435 | ssh -N -L 7768:localhost:7768 ubuntu@YOUR_IP_ADDRESS 436 | ``` 437 | 438 | Replace `YOUR_IP_ADDRESS` with your IP address. This command should produce no 439 | output; just keep the command running to keep the port-forward running. 440 | 441 | To test your ILP connection, try these [examples from moneyd's 442 | README.](https://github.com/sharafian/moneyd#sending-payments) 443 | 444 | ### Monitor with Moneyd-GUI 445 | 446 | The connector you deployed comes with a GUI to view routes, ping destinations, 447 | and send test payments. This GUI runs as a webserver. 448 | 449 | To access it, forward the GUI's port to your local machine. 450 | 451 | ``` 452 | ssh -N -L 7770:localhost:7770 ubuntu@YOUR_IP_ADDRESS 453 | ``` 454 | 455 | You should have an IP address for your connector, once it's deployed. Replace 456 | `YOUR_IP_ADDRESS` with this IP address. This command should produce no output; 457 | just keep the command running to keep the port-forward running. 458 | 459 | Open `http://localhost:7770` to see your connector's control panel. 460 | 461 | #### Run Both 462 | 463 | If you want to forward both Moneyd and Moneyd GUI, the port-forward commands 464 | can be combined 465 | 466 | ``` 467 | ssh -N -L 7770:localhost:7770 -L 7768:localhost:7768 ubuntu@YOUR_IP_ADDRESS 468 | ``` 469 | -------------------------------------------------------------------------------- /tier-1/salt-test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | ENV SALT_VERSION=2017.7.1 4 | 5 | RUN apt-get update \ 6 | && apt-get install -y vim-nox curl \ 7 | && curl -L https://repo.saltstack.com/apt/ubuntu/16.04/amd64/archive/${SALT_VERSION}/SALTSTACK-GPG-KEY.pub | apt-key add - \ 8 | && echo "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/archive/${SALT_VERSION} xenial main" > /etc/apt/sources.list.d/saltstack.list \ 9 | && apt-get update \ 10 | && apt-get install -y libdbus-1-3 libnih-dbus1 sudo software-properties-common iputils-ping apt-transport-https debian-archive-keyring \ 11 | && apt-get install -y salt-minion=${SALT_VERSION}* \ 12 | 13 | # fix for getty consume 100% cpu 14 | && systemctl disable getty@tty1.service \ 15 | 16 | # fix missing resolvconf 17 | && cd /tmp \ 18 | && apt-get download resolvconf \ 19 | && dpkg --unpack resolvconf_*_all.deb \ 20 | && rm /var/lib/dpkg/info/resolvconf.postinst \ 21 | && dpkg --configure resolvconf \ 22 | && apt-get install -yf \ 23 | && apt-mark hold resolvconf \ 24 | 25 | # cleanup 26 | && rm -rf /var/lib/apt/lists/* \ 27 | && apt-get -y autoremove \ 28 | && apt-get clean 29 | 30 | CMD ["salt-call","--local","state.apply"] 31 | # CMD ["salt-call","--local","state.apply","-l","debug"] 32 | -------------------------------------------------------------------------------- /tier-1/salt-test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | web: 4 | build: . 5 | volumes: 6 | - "../salt:/srv/salt" 7 | -------------------------------------------------------------------------------- /tier-1/salt/build-essential.sls: -------------------------------------------------------------------------------- 1 | build-essential: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-1/salt/connector/files/launch.config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const path = require('path') 4 | const address = 'YOUR_HOT_WALLET_RIPPLE_ADDRESS' 5 | const secret = 'YOUR_HOT_WALLET_RIPPLE_SECRET' 6 | 7 | const peerPlugin = { 8 | relation: 'peer', 9 | plugin: 'ilp-plugin-xrp-paychan', 10 | assetCode: 'XRP', 11 | assetScale: 9, 12 | balance: { 13 | maximum: '10000000', 14 | settleThreshold: '-5000000', 15 | settleTo: '0' 16 | }, 17 | options: { 18 | assetScale: 9, 19 | server: 'SERVER_URI_GIVEN_TO_YOU_BY_YOUR_PEER', 20 | rippledServer: 'wss://s1.ripple.com', 21 | secret, 22 | address, 23 | peerAddress: 'RIPPLE_ADDRESS_OF_PEER' 24 | } 25 | } 26 | 27 | const miniAccounts = { 28 | relation: 'child', 29 | plugin: 'ilp-plugin-mini-accounts', 30 | assetCode: 'XRP', 31 | assetScale: 9, 32 | options: { 33 | port: 7768 34 | } 35 | } 36 | 37 | const connectorApp = { 38 | name: 'connector', 39 | env: { 40 | DEBUG: 'ilp*,connector*', 41 | CONNECTOR_ILP_ADDRESS: 'MY ILP ADDRESS', 42 | CONNECTOR_ENV: 'production', 43 | CONNECTOR_BACKEND: 'one-to-one', 44 | CONNECTOR_ADMIN_API: true, 45 | CONNECTOR_ADMIN_API_PORT: 7769, 46 | CONNECTOR_SPREAD: '0', 47 | CONNECTOR_STORE: 'ilp-store-simpledb', 48 | CONNECTOR_STORE_CONFIG: JSON.stringify({ 49 | host: 'sdb.amazonaws.com', 50 | domain: 'connector', 51 | role: 'connector-instance' 52 | }), 53 | CONNECTOR_ACCOUNTS: JSON.stringify({ 54 | peer: peerPlugin, 55 | local: miniAccounts 56 | }) 57 | }, 58 | script: path.resolve(__dirname, 'src/index.js') 59 | } 60 | 61 | module.exports = { apps: [ connectorApp ] } 62 | -------------------------------------------------------------------------------- /tier-1/salt/connector/init.sls: -------------------------------------------------------------------------------- 1 | connector-dir: 2 | file.directory: 3 | - name: /srv/app 4 | - user: ubuntu 5 | 6 | connector-data-dir: 7 | file.directory: 8 | - name: /var/lib/connector 9 | - user: ubuntu 10 | 11 | connector-clone: 12 | git.latest: 13 | - name: https://github.com/interledgerjs/ilp-connector.git 14 | - user: ubuntu 15 | - target: /srv/app 16 | - rev: master 17 | - require: 18 | - pkg: build-essential 19 | - pkg: nodejs 20 | - pkg: git 21 | 22 | connector-install: 23 | cmd.run: 24 | - name: npm install --json 25 | - runas: ubuntu 26 | - cwd: /srv/app 27 | # npm.bootstrap: 28 | # - name: /srv/app 29 | 30 | connector-install-plugins: 31 | cmd.run: 32 | - name: npm install ilp-plugin-xrp-paychan ilp-plugin-mini-accounts ilp-store-simpledb 33 | - runas: ubuntu 34 | - cwd: /srv/app 35 | 36 | connector-launch-script: 37 | file.managed: 38 | - name: /srv/app/launch.config.js 39 | - user: ubuntu 40 | - source: 41 | - salt://connector/files/launch.config.js 42 | 43 | connector-start: 44 | cmd.run: 45 | - name: pm2 start launch.config.js 46 | - runas: ubuntu 47 | - cwd: /srv/app 48 | -------------------------------------------------------------------------------- /tier-1/salt/git.sls: -------------------------------------------------------------------------------- 1 | git: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-1/salt/gui.sls: -------------------------------------------------------------------------------- 1 | gui-dir: 2 | file.directory: 3 | - name: /srv/gui 4 | - user: ubuntu 5 | 6 | gui-clone: 7 | git.latest: 8 | - name: https://github.com/sharafian/moneyd-gui.git 9 | - target: /srv/gui 10 | - rev: master 11 | - user: ubuntu 12 | - require: 13 | - pkg: build-essential 14 | - pkg: nodejs 15 | - pkg: git 16 | 17 | gui-install: 18 | cmd.run: 19 | - name: npm install --unsafe-perm --json --production 20 | - runas: ubuntu 21 | - cwd: /srv/gui 22 | 23 | gui-start: 24 | cmd.run: 25 | - name: pm2 start index.js 26 | - runas: ubuntu 27 | - cwd: /srv/gui 28 | -------------------------------------------------------------------------------- /tier-1/salt/httpie.sls: -------------------------------------------------------------------------------- 1 | httpie: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-1/salt/node.sls: -------------------------------------------------------------------------------- 1 | nodejs: 2 | pkgrepo.managed: 3 | - humanname: Node.js 4 | - name: deb https://deb.nodesource.com/node_8.x xenial main 5 | - dist: xenial 6 | - file: /etc/apt/sources.list.d/nodesource.list 7 | - require_in: 8 | - pkg: nodejs 9 | - gpgcheck: 1 10 | - key_url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key 11 | pkg.installed: [] 12 | -------------------------------------------------------------------------------- /tier-1/salt/pm2.sls: -------------------------------------------------------------------------------- 1 | pm2: 2 | npm.installed: [] 3 | -------------------------------------------------------------------------------- /tier-1/salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - node 4 | - build-essential 5 | - git 6 | - httpie 7 | - pm2 8 | - connector 9 | - gui 10 | -------------------------------------------------------------------------------- /tier-1/terraform/main.tf: -------------------------------------------------------------------------------- 1 | # Specify the provider and access details 2 | provider "aws" { 3 | region = "${var.aws_region}" 4 | } 5 | 6 | # Create a VPC to launch our instances into 7 | resource "aws_vpc" "default" { 8 | cidr_block = "10.0.0.0/16" 9 | } 10 | 11 | # Create an internet gateway to give our subnet access to the outside world 12 | resource "aws_internet_gateway" "default" { 13 | vpc_id = "${aws_vpc.default.id}" 14 | } 15 | 16 | # Grant the VPC internet access on its main route table 17 | resource "aws_route" "internet_access" { 18 | route_table_id = "${aws_vpc.default.main_route_table_id}" 19 | destination_cidr_block = "0.0.0.0/0" 20 | gateway_id = "${aws_internet_gateway.default.id}" 21 | } 22 | 23 | # Create a subnet to launch our instances into 24 | resource "aws_subnet" "default" { 25 | vpc_id = "${aws_vpc.default.id}" 26 | cidr_block = "10.0.1.0/24" 27 | map_public_ip_on_launch = true 28 | } 29 | 30 | # A security group for the ELB so it is accessible via the web 31 | resource "aws_security_group" "elb" { 32 | name = "connector_elb" 33 | description = "Used for btp.${var.my_domain} load-balancer" 34 | vpc_id = "${aws_vpc.default.id}" 35 | 36 | # HTTP access from anywhere 37 | ingress { 38 | from_port = 80 39 | to_port = 80 40 | protocol = "tcp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | 44 | # outbound internet access 45 | egress { 46 | from_port = 0 47 | to_port = 0 48 | protocol = "-1" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | } 52 | 53 | # Our default security group to access 54 | # the instances over SSH and HTTP 55 | resource "aws_security_group" "default" { 56 | name = "connector" 57 | description = "Used for btp.${var.my_domain}" 58 | vpc_id = "${aws_vpc.default.id}" 59 | 60 | # SSH access from anywhere 61 | ingress { 62 | from_port = 22 63 | to_port = 22 64 | protocol = "tcp" 65 | cidr_blocks = ["0.0.0.0/0"] 66 | } 67 | 68 | # HTTP access from the VPC 69 | ingress { 70 | from_port = 8080 71 | to_port = 8080 72 | protocol = "tcp" 73 | cidr_blocks = ["10.0.0.0/16"] 74 | } 75 | 76 | # outbound internet access 77 | egress { 78 | from_port = 0 79 | to_port = 0 80 | protocol = "-1" 81 | cidr_blocks = ["0.0.0.0/0"] 82 | } 83 | } 84 | 85 | resource "aws_elb" "web" { 86 | name = "connector-elb" 87 | 88 | subnets = ["${aws_subnet.default.id}"] 89 | security_groups = ["${aws_security_group.elb.id}"] 90 | instances = ["${aws_instance.web.id}"] 91 | 92 | listener { 93 | instance_port = 8080 94 | instance_protocol = "tcp" 95 | lb_port = 80 96 | lb_protocol = "tcp" 97 | } 98 | } 99 | 100 | resource "aws_key_pair" "auth" { 101 | key_name = "${var.key_name}" 102 | public_key = "${file(var.public_key_path)}" 103 | } 104 | 105 | resource "aws_simpledb_domain" "connector" { 106 | name = "connector" 107 | } 108 | 109 | resource "aws_iam_policy" "connector-policy" { 110 | name = "connector-policy" 111 | policy = < /etc/apt/sources.list.d/saltstack.list \ 9 | && apt-get update \ 10 | && apt-get install -y libdbus-1-3 libnih-dbus1 sudo software-properties-common iputils-ping apt-transport-https debian-archive-keyring \ 11 | && apt-get install -y salt-minion=${SALT_VERSION}* \ 12 | 13 | # fix for getty consume 100% cpu 14 | && systemctl disable getty@tty1.service \ 15 | 16 | # fix missing resolvconf 17 | && cd /tmp \ 18 | && apt-get download resolvconf \ 19 | && dpkg --unpack resolvconf_*_all.deb \ 20 | && rm /var/lib/dpkg/info/resolvconf.postinst \ 21 | && dpkg --configure resolvconf \ 22 | && apt-get install -yf \ 23 | && apt-mark hold resolvconf \ 24 | 25 | # cleanup 26 | && rm -rf /var/lib/apt/lists/* \ 27 | && apt-get -y autoremove \ 28 | && apt-get clean 29 | 30 | CMD ["salt-call","--local","state.apply"] 31 | # CMD ["salt-call","--local","state.apply","-l","debug"] 32 | -------------------------------------------------------------------------------- /tier-2/salt-test/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | web: 4 | build: . 5 | volumes: 6 | - "../salt:/srv/salt" 7 | -------------------------------------------------------------------------------- /tier-2/salt/build-essential.sls: -------------------------------------------------------------------------------- 1 | build-essential: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-2/salt/connector/files/launch.config.js: -------------------------------------------------------------------------------- 1 | 'use strict' 2 | 3 | const path = require('path') 4 | const address = 'YOUR_HOT_WALLET_RIPPLE_ADDRESS' 5 | const secret = 'YOUR_HOT_WALLET_RIPPLE_SECRET' 6 | 7 | const parentPlugin = { 8 | relation: 'parent', 9 | plugin: 'ilp-plugin-xrp-asym-client', 10 | assetCode: 'XRP', 11 | assetScale: 9, 12 | options: { 13 | assetScale: 9, 14 | server: 'btp+wss://YOUR_PARENT_HOST', 15 | address, 16 | secret 17 | } 18 | } 19 | 20 | const miniAccounts = { 21 | relation: 'child', 22 | plugin: 'ilp-plugin-mini-accounts', 23 | assetCode: 'XRP', 24 | assetScale: 9, 25 | options: { 26 | port: 7768 27 | } 28 | } 29 | 30 | const connectorApp = { 31 | name: 'connector', 32 | env: { 33 | DEBUG: 'ilp*,connector*', 34 | CONNECTOR_ENV: 'production', 35 | CONNECTOR_BACKEND: 'one-to-one', 36 | CONNECTOR_ADMIN_API: true, 37 | CONNECTOR_ADMIN_API_PORT: 7769, 38 | CONNECTOR_SPREAD: '0', 39 | CONNECTOR_STORE: 'ilp-store-simpledb', 40 | CONNECTOR_STORE_CONFIG: JSON.stringify({ 41 | host: 'sdb.amazonaws.com', 42 | domain: 'connector', 43 | role: 'connector-instance' 44 | }), 45 | CONNECTOR_ACCOUNTS: JSON.stringify({ 46 | parent: parentPlugin, 47 | local: miniAccounts 48 | }) 49 | }, 50 | script: path.resolve(__dirname, 'src/index.js') 51 | } 52 | 53 | module.exports = { apps: [ connectorApp ] } 54 | -------------------------------------------------------------------------------- /tier-2/salt/connector/init.sls: -------------------------------------------------------------------------------- 1 | connector-dir: 2 | file.directory: 3 | - name: /srv/app 4 | - user: ubuntu 5 | 6 | connector-data-dir: 7 | file.directory: 8 | - name: /var/lib/connector 9 | - user: ubuntu 10 | 11 | connector-clone: 12 | git.latest: 13 | - name: https://github.com/interledgerjs/ilp-connector.git 14 | - user: ubuntu 15 | - target: /srv/app 16 | - rev: master 17 | - require: 18 | - pkg: build-essential 19 | - pkg: nodejs 20 | - pkg: git 21 | 22 | connector-install: 23 | cmd.run: 24 | - name: npm install --json 25 | - runas: ubuntu 26 | - cwd: /srv/app 27 | # npm.bootstrap: 28 | # - name: /srv/app 29 | 30 | connector-install-plugins: 31 | cmd.run: 32 | - name: npm install ilp-plugin-xrp-asym-client ilp-plugin-mini-accounts ilp-store-simpledb 33 | - runas: ubuntu 34 | - cwd: /srv/app 35 | 36 | connector-launch-script: 37 | file.managed: 38 | - name: /srv/app/launch.config.js 39 | - user: ubuntu 40 | - source: 41 | - salt://connector/files/launch.config.js 42 | 43 | connector-start: 44 | cmd.run: 45 | - name: pm2 start launch.config.js 46 | - runas: ubuntu 47 | - cwd: /srv/app 48 | -------------------------------------------------------------------------------- /tier-2/salt/git.sls: -------------------------------------------------------------------------------- 1 | git: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-2/salt/gui.sls: -------------------------------------------------------------------------------- 1 | gui-dir: 2 | file.directory: 3 | - name: /srv/gui 4 | - runas: ubuntu 5 | 6 | gui-clone: 7 | git.latest: 8 | - name: https://github.com/sharafian/moneyd-gui.git 9 | - target: /srv/gui 10 | - rev: master 11 | - runas: ubuntu 12 | - require: 13 | - pkg: build-essential 14 | - pkg: nodejs 15 | - pkg: git 16 | 17 | gui-install: 18 | cmd.run: 19 | - name: npm install --unsafe-perm --json --production 20 | - runas: ubuntu 21 | - cwd: /srv/gui 22 | 23 | gui-start: 24 | cmd.run: 25 | - name: pm2 start index.js 26 | - runas: ubuntu 27 | - cwd: /srv/gui 28 | -------------------------------------------------------------------------------- /tier-2/salt/httpie.sls: -------------------------------------------------------------------------------- 1 | httpie: 2 | pkg.installed 3 | -------------------------------------------------------------------------------- /tier-2/salt/node.sls: -------------------------------------------------------------------------------- 1 | nodejs: 2 | pkgrepo.managed: 3 | - humanname: Node.js 4 | - name: deb https://deb.nodesource.com/node_8.x xenial main 5 | - dist: xenial 6 | - file: /etc/apt/sources.list.d/nodesource.list 7 | - require_in: 8 | - pkg: nodejs 9 | - gpgcheck: 1 10 | - key_url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key 11 | pkg.installed: [] 12 | -------------------------------------------------------------------------------- /tier-2/salt/pm2.sls: -------------------------------------------------------------------------------- 1 | pm2: 2 | npm.installed: [] 3 | -------------------------------------------------------------------------------- /tier-2/salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - node 4 | - build-essential 5 | - git 6 | - httpie 7 | - pm2 8 | - connector 9 | - gui 10 | -------------------------------------------------------------------------------- /tier-2/terraform/main.tf: -------------------------------------------------------------------------------- 1 | # Specify the provider and access details 2 | provider "aws" { 3 | region = "${var.aws_region}" 4 | } 5 | 6 | # Create a VPC to launch our instances into 7 | resource "aws_vpc" "default" { 8 | cidr_block = "10.0.0.0/16" 9 | } 10 | 11 | # Create an internet gateway to give our subnet access to the outside world 12 | resource "aws_internet_gateway" "default" { 13 | vpc_id = "${aws_vpc.default.id}" 14 | } 15 | 16 | # Grant the VPC internet access on its main route table 17 | resource "aws_route" "internet_access" { 18 | route_table_id = "${aws_vpc.default.main_route_table_id}" 19 | destination_cidr_block = "0.0.0.0/0" 20 | gateway_id = "${aws_internet_gateway.default.id}" 21 | } 22 | 23 | # Create a subnet to launch our instances into 24 | resource "aws_subnet" "default" { 25 | vpc_id = "${aws_vpc.default.id}" 26 | cidr_block = "10.0.1.0/24" 27 | map_public_ip_on_launch = true 28 | } 29 | 30 | # A security group for the ELB so it is accessible via the web 31 | resource "aws_security_group" "elb" { 32 | name = "connector_elb" 33 | description = "Used for btp.${var.my_domain} load-balancer" 34 | vpc_id = "${aws_vpc.default.id}" 35 | 36 | # HTTP access from anywhere 37 | ingress { 38 | from_port = 80 39 | to_port = 80 40 | protocol = "tcp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | 44 | # outbound internet access 45 | egress { 46 | from_port = 0 47 | to_port = 0 48 | protocol = "-1" 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | } 52 | 53 | # Our default security group to access 54 | # the instances over SSH and HTTP 55 | resource "aws_security_group" "default" { 56 | name = "connector" 57 | description = "Used for btp.${var.my_domain}" 58 | vpc_id = "${aws_vpc.default.id}" 59 | 60 | # SSH access from anywhere 61 | ingress { 62 | from_port = 22 63 | to_port = 22 64 | protocol = "tcp" 65 | cidr_blocks = ["0.0.0.0/0"] 66 | } 67 | 68 | # HTTP access from the VPC 69 | ingress { 70 | from_port = 8080 71 | to_port = 8080 72 | protocol = "tcp" 73 | cidr_blocks = ["10.0.0.0/16"] 74 | } 75 | 76 | # outbound internet access 77 | egress { 78 | from_port = 0 79 | to_port = 0 80 | protocol = "-1" 81 | cidr_blocks = ["0.0.0.0/0"] 82 | } 83 | } 84 | 85 | resource "aws_elb" "web" { 86 | name = "connector-elb" 87 | 88 | subnets = ["${aws_subnet.default.id}"] 89 | security_groups = ["${aws_security_group.elb.id}"] 90 | instances = ["${aws_instance.web.id}"] 91 | 92 | listener { 93 | instance_port = 8080 94 | instance_protocol = "tcp" 95 | lb_port = 80 96 | lb_protocol = "tcp" 97 | } 98 | } 99 | 100 | resource "aws_key_pair" "auth" { 101 | key_name = "${var.key_name}" 102 | public_key = "${file(var.public_key_path)}" 103 | } 104 | 105 | resource "aws_simpledb_domain" "connector" { 106 | name = "connector" 107 | } 108 | 109 | resource "aws_iam_policy" "connector-policy" { 110 | name = "connector-policy" 111 | policy = <