├── .dockerignore ├── CHANGELOG.md ├── Dockerfile ├── LICENSE ├── README.md ├── devstep.yml ├── docs ├── addons │ ├── docker.md │ ├── heroku-toolbelt.md │ ├── memcached.md │ ├── oracle-java.md │ ├── postgresql.md │ └── redis.md ├── buildpacks │ ├── bats.md │ ├── custom.md │ ├── golang.md │ ├── inline.md │ ├── nodejs.md │ ├── phantomjs.md │ ├── php.md │ ├── python.md │ └── ruby.md ├── cli │ ├── aliases-and-binstubs.md │ ├── commands.md │ ├── configuration.md │ ├── installation.md │ ├── plugins.md │ └── tips.md ├── getting-started.md ├── index.md ├── introduction.md └── template.html ├── image ├── fix-permissions.sh └── prepare.sh ├── mk-image └── stack ├── addons ├── docker │ └── bin │ │ ├── configure │ │ ├── start-daemon │ │ └── wrapdocker ├── heroku-toolbelt │ └── bin │ │ └── configure ├── memcached │ └── bin │ │ ├── configure │ │ └── start-server ├── oracle-java-8 │ └── bin │ │ └── configure ├── postgresql │ ├── bin │ │ ├── configure │ │ ├── detect │ │ └── start-server │ └── conf │ │ ├── pg_hba.conf │ │ └── postgresql.conf └── redis │ └── bin │ ├── configure │ ├── detect │ └── start-server ├── bashrc ├── bin ├── build-project ├── configure-addons ├── create-cache-symlinks ├── entrypoint ├── exec-entrypoint ├── fix-permissions ├── forward-linked-ports ├── hack └── init ├── buildpacks ├── bats │ └── bin │ │ ├── compile │ │ └── detect ├── golang │ └── bin │ │ ├── compile │ │ └── detect ├── inline │ └── bin │ │ ├── compile │ │ ├── detect │ │ └── provision ├── nodejs │ ├── bin │ │ ├── compile │ │ ├── detect │ │ └── install-dependencies │ └── lib │ │ ├── binaries.sh │ │ ├── dependencies.sh │ │ └── environment.sh ├── phantomjs │ └── bin │ │ ├── compile │ │ └── detect ├── php │ └── bin │ │ ├── compile │ │ ├── detect │ │ └── install-dependencies ├── python │ └── bin │ │ ├── compile │ │ ├── detect │ │ ├── install-dependencies │ │ └── steps │ │ ├── cryptography │ │ ├── gdal │ │ ├── pip-install │ │ ├── pip-uninstall │ │ ├── pylibmc │ │ └── python └── ruby │ └── bin │ ├── compile │ └── detect └── load-env.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | docs 3 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## [0.4.0](https://github.com/fgrehm/devstep/compare/v0.3.1...v0.4.0) (2015-07-06) 2 | 3 | BREAKING CHANGES: 4 | 5 | - Switched to `heroku/cedar:14` [image](https://registry.hub.docker.com/u/heroku/cedar/) 6 | since it [has been brought up to speed](https://github.com/heroku/stack-images/pull/15) 7 | with `progrium/cedarish`. 8 | - Autobuild image support has been removed [[GH-93]] 9 | 10 | [GH-93]: https://github.com/fgrehm/devstep/issues/93 11 | 12 | IMPROVEMENTS: 13 | 14 | - buildpacks/golang: Download test dependencies by default 15 | - buildpacks/golang: Backport recent Golang buildpack updates 16 | - buildpacks/golang: Bump default Go to 1.4.2 17 | - buildpacks/php: Backport recent oficial Heroku buildpack updates 18 | - buildpacks/php: Enable support for HHVM 19 | - buildpacks/php: Composer's `vendor` dir is now kept inside images instead of the host machine when using the CLI 20 | - buildpacks/python: Backport recent oficial Heroku buildpack updates 21 | - buildpacks/python: Bump default Python to 2.7.10 22 | - buildpacks/nodejs: Backport recent oficial Heroku buildpack updates (including support for iojs) 23 | - buildpacks/nodejs: `node_modules` are now kept inside images instead of the host machine when using the CLI 24 | - buildpacks/ruby: Bump default Ruby to 2.2.2 25 | - buildpacks/ruby: Bump default Bundler to 1.10.5 26 | 27 | ## [0.3.1](https://github.com/fgrehm/devstep/compare/v0.3.0...v0.3.1) (2015-03-04) 28 | 29 | BUG FIXES: 30 | 31 | - baseimage: Install `nodejs-legacy` to fix Yeoman usage [[GH-91]] 32 | - addons/postgres: Fix installation script [[GH-91]] 33 | 34 | [GH-91]: https://github.com/fgrehm/devstep/pull/91 35 | [GH-92]: https://github.com/fgrehm/devstep/pull/92 36 | 37 | ## [0.3.0](https://github.com/fgrehm/devstep/compare/v0.2.0...v0.3.0) (2015-02-12) 38 | 39 | BREAKING CHANGES: 40 | 41 | - Switched to the latest `progrium/cedarish:cedar14` image which uses an unmodified 42 | Heroku `cedar14.sh` base stack source. More info [here](https://github.com/progrium/cedarish/tree/master/cedar14) 43 | - Organization of Devstep's "stack" within the Docker image got changed, please see 44 | commits associated with [GH-63] for more information 45 | 46 | [GH-63]: https://github.com/fgrehm/devstep/issues/63 47 | 48 | FEATURES: 49 | 50 | - [Inline buildpack now supports reading commands from `devstep.yml`](http://fgrehm.viewdocs.io/devstep/buildpacks/inline) 51 | - [New Oracle Java 8 addon](http://fgrehm.viewdocs.io/devstep/addons/oracle-java) 52 | - [New Heroku toolbelt addon](http://fgrehm.viewdocs.io/devstep/addons/heroku-toolbelt) 53 | 54 | IMPROVEMENTS: 55 | 56 | - Reduced output by default and added support for `DEVSTEP_LOG` for setting the log level 57 | - addons/docker: Lock installation to 1.5.0 instead of latest 58 | - addons/docker: Support specifying a Docker version to be installed with `DEVSTEP_DOCKER_VERSION` env var 59 | - buildpacks/golang: Bump default version to 1.4.1 60 | - buildpacks/nodejs: Bump default Node to 0.10.35 61 | - buildpacks/php: Backport recent oficial Heroku buildpack updates 62 | - buildpacks/php: Download buildpack dependencies on demand 63 | - buildpacks/python: Backport recent oficial Heroku buildpack updates 64 | - buildpacks/python: Download buildpack dependencies on demand 65 | - buildpacks/ruby: Bump default Bundler to 1.8.0 66 | - buildpacks/ruby: Bump default Ruby to 2.2.0 67 | 68 | BUG FIXES: 69 | 70 | - buildpacks/nodejs: Skip Node.js installation if already installed 71 | 72 | ## [0.2.0](https://github.com/fgrehm/devstep/compare/v0.1.0...v0.2.0) (2014-09-24) 73 | 74 | BREAKING CHANGES: 75 | 76 | - [New CLI](http://fgrehm.viewdocs.io/devstep/cli/installation) and [configuration format](http://fgrehm.viewdocs.io/devstep/cli/configuration) 77 | - Updated the base Docker image to latest [progrium/cedarish:cedar14](https://github.com/progrium/cedarish/tree/cedar14), reducing the image size (from `1.168GB` to `867.7MB`). 78 | 79 | FEATURES: 80 | 81 | - addons/postgresql: Support for configurable data directory with `POSTGRESQL_DATA` [[GH-67]] 82 | 83 | [GH-67]: https://github.com/fgrehm/devstep/issues/67 84 | 85 | IMPROVEMENTS: 86 | 87 | - baseimage: "Shorten" `PS1` [[GH-71]] 88 | - buildpacks/golang: Update default installed version to 1.3.1 [[GH-72]] 89 | - buildpacks/ruby: Bump default Ruby to 2.1.3 90 | - buildpacks/ruby: Support loading rubies from `.ruby-version` [[GH-41]] 91 | - buildpacks/ruby: Remove dependency on RVM and make use of use Heroku's rubies [[GH-69]] 92 | 93 | [GH-41]: https://github.com/fgrehm/devstep/issues/41 94 | [GH-69]: https://github.com/fgrehm/devstep/issues/69 95 | [GH-71]: https://github.com/fgrehm/devstep/issues/71 96 | [GH-72]: https://github.com/fgrehm/devstep/issues/72 97 | 98 | ## [0.1.0](https://github.com/fgrehm/devstep/compare/v0.0.1...v0.1.0) (2014-08-22) 99 | 100 | BREAKING CHANGES: 101 | 102 | - Removed support for the the `devstep-sa` image, it will be made available again if there is enough demand 103 | - baseimage: Updated to Ubuntu 14.04 along with latest [progrium/cedarish](https://github.com/progrium/cedarish) 104 | - init: Removed workaround for [docker#5510], (this will break things on Docker pre 1.0.0) [[GH-48]] 105 | - init: Removed support for executing `/etc/rc.local` during startup 106 | - buildpacks/all: Keep cached packages "namespaced" by the buildpack, which means that the cache created with Devstep 0.0.1 won't be used 107 | - buildpacks/php: No longer starts a PHP server by default 108 | 109 | [docker#5510]: https://github.com/docker/docker/issues/5510 110 | [GH-48]: https://github.com/fgrehm/devstep/issues/48 111 | 112 | FEATURES: 113 | 114 | - baseimage: Added `reload-env` shortcut alias to make things easier after bootstrapping 115 | - baseimage: Install bash completion 116 | 117 | IMPROVEMENTS: 118 | 119 | - addons/postgresql: Install 9.3 120 | - builder: Error out in case the root directory is specified to the builder script [[GH-57]] 121 | - buildpacks/php: Backport recent oficial Heroku buildpack updates 122 | - buildpacks/python: Backport recent oficial Heroku buildpack updates 123 | - buildpacks/python: Support for pip packages caching 124 | - buildpacks/ruby: Remove `--binary` flag when installing rubies so that any ruby can be installed. 125 | - buildpacks/ruby: Make use of system libraries when installing nokogiri 126 | - buildpacks/ruby: Use stable versions of RVM instead of latest master 127 | - buildpacks/ruby: Keep a cache of gems tarballs 128 | 129 | [GH-57]: https://github.com/fgrehm/devstep/issues/57 130 | 131 | BUG FIXES: 132 | 133 | - addons/docker: Make it work again [[GH-49]] 134 | - buildpacks/golang: Fix ownership of bind mounted dirs under `GOPATH`s during build [[GH-56]] 135 | - buildpacks/golang: Fix check for whether go is installed [[GH-55]] 136 | - buildpacks/golang: Fix `GOPATH` symlinking when the remote URL begins with a username (like `git@`) [[GH-52]] 137 | - buildpacks/python: Make it work with python 2.7.5 [[GH-65]] 138 | 139 | [GH-49]: https://github.com/fgrehm/devstep/issues/49 140 | [GH-52]: https://github.com/fgrehm/devstep/issues/52 141 | [GH-55]: https://github.com/fgrehm/devstep/issues/55 142 | [GH-56]: https://github.com/fgrehm/devstep/issues/56 143 | [GH-65]: https://github.com/fgrehm/devstep/issues/65 144 | 145 | ## 0.0.1 (June 29, 2014) 146 | 147 | First public release 148 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM heroku/cedar:14 2 | MAINTAINER Fabio Rehm "fgrehm@gmail.com" 3 | 4 | ENV HOME=/home/devstep \ 5 | DEVSTEP_PATH=/opt/devstep \ 6 | DEVSTEP_BIN=/opt/devstep/bin \ 7 | DEVSTEP_CONF=/etc/devstep \ 8 | LANG=en_US.UTF-8 \ 9 | LC_ALL=en_US.UTF-8 \ 10 | LC_CTYPE=en_US.UTF-8 11 | 12 | ##################################################################### 13 | # Copy over our build scripts 14 | ADD image/ /tmp/build 15 | 16 | ##################################################################### 17 | # Install required packages and do some additional setup 18 | RUN /tmp/build/prepare.sh 19 | 20 | ##################################################################### 21 | # Devstep goodies (ADDed at the end to increase image "cacheability") 22 | ADD stack $DEVSTEP_PATH 23 | RUN for script in $DEVSTEP_PATH/buildpacks/*/bin/install-dependencies; do \ 24 | $script; \ 25 | done 26 | 27 | ##################################################################### 28 | # Fix permissions and set up init 29 | RUN /tmp/build/fix-permissions.sh 30 | 31 | ##################################################################### 32 | # Setup default user 33 | USER developer 34 | ENV USER developer 35 | 36 | ##################################################################### 37 | # Use our init 38 | ENTRYPOINT ["/opt/devstep/bin/entrypoint"] 39 | 40 | # Start a bash session by default 41 | CMD ["/bin/bash"] 42 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 Fabio Rehm 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Devstep 2 | 3 | Dead simple, no frills development environments based around a simple goal: 4 | 5 | > I want to `git clone` and run a single command to hack on any software project. 6 | 7 | For more information please check http://fgrehm.viewdocs.io/devstep 8 | 9 | ## :warning: Unmaintained :warning: 10 | 11 | This project is looking for maintainers, please send @fgrehm an email if you are interested in maintaining it. 12 | -------------------------------------------------------------------------------- /devstep.yml: -------------------------------------------------------------------------------- 1 | privileged: true 2 | volumes: 3 | # FIXME: This only work on my machine 4 | - '{{env "HOME"}}/devstep/development/docker:/var/lib/docker' 5 | - '{{env "HOME"}}/projects/oss/devstep-examples:/devstep-examples' 6 | - '{{env "HOME"}}/projects/oss/devstep-cli/build/linux_amd64:/home/devstep/bin/devstep' 7 | commands: 8 | make: 9 | # No custom options 10 | provision: 11 | - ['configure-addons', 'docker'] 12 | -------------------------------------------------------------------------------- /docs/addons/docker.md: -------------------------------------------------------------------------------- 1 | # Docker Addon 2 | -------------- 3 | 4 | This addon will install Docker 1.3.0 from https://get.docker.io/builds/Linux/x86_64 5 | and will set things up for running nested Docker containers based on https://github.com/jpetazzo/dind. 6 | 7 | In order to use it, you need to provide both the `--privileged` flag and because 8 | `/var/lib/docker` cannot be on AUFS, so we need to make it a volume with `-v /var/lib/docker`. 9 | 10 | To install it you can run `configure-addons docker` from within the container. 11 | 12 | To specify a Docker version, use the the `DEVSTEP_DOCKER_VERSION` environmental 13 | variable. 14 | -------------------------------------------------------------------------------- /docs/addons/heroku-toolbelt.md: -------------------------------------------------------------------------------- 1 | # Heroku Toolbelt Addon 2 | ----------------------- 3 | 4 | This addon will install the latest [Heroku Toolbelt](https://toolbelt.heroku.com/) 5 | version available for the Ubuntu 14.04 release. 6 | 7 | To install it you can run `configure-addons oracle-java-8` from within the container. 8 | -------------------------------------------------------------------------------- /docs/addons/memcached.md: -------------------------------------------------------------------------------- 1 | # Memcached Addon 2 | ----------------- 3 | 4 | This addon will install the latest Memcached version available for the Ubuntu 14.04 5 | release and it will set things up in a way that it is automatically started along 6 | with your containers. 7 | 8 | To install it you can run `configure-addons memcached` from within the container. 9 | -------------------------------------------------------------------------------- /docs/addons/oracle-java.md: -------------------------------------------------------------------------------- 1 | # Oracle Java Addon 2 | ------------------- 3 | 4 | This addon will install the latest version available for the Ubuntu 14.04 release 5 | and will leverage Devstep's caching mechanism so that the downloaded files are 6 | reused between environments. 7 | 8 | To install it you can run `configure-addons oracle-java-8` from within the container. 9 | -------------------------------------------------------------------------------- /docs/addons/postgresql.md: -------------------------------------------------------------------------------- 1 | # PostgreSQL Addon 2 | ------------------ 3 | 4 | This addon will install the latest PostgreSQL version available for the Ubuntu 14.04 5 | release and it will set things up in a way that it is automatically started along 6 | with your containers. 7 | 8 | To install it you can run `configure-addons postgresql` from within the container. 9 | -------------------------------------------------------------------------------- /docs/addons/redis.md: -------------------------------------------------------------------------------- 1 | # Redis Addon 2 | ------------- 3 | 4 | This addon will install the latest Redis version available for the Ubuntu 14.04 5 | release and it will set things up in a way that it is automatically started along 6 | with your containers. 7 | 8 | To install it you can run `configure-addons redis` from within the container. 9 | -------------------------------------------------------------------------------- /docs/buildpacks/bats.md: -------------------------------------------------------------------------------- 1 | # Bats buildpack 2 | ---------------- 3 | 4 | This buildpack will install the latest release of [Bats](https://github.com/sstephenson/bats) 5 | if a file with the `.bats` extension is found. 6 | -------------------------------------------------------------------------------- /docs/buildpacks/custom.md: -------------------------------------------------------------------------------- 1 | # Using a custom buildpack 2 | -------------------------- 3 | 4 | In order to use a custom buildpack you can build your own images with it, or 5 | you can bind mount its directory with Devstep containers. 6 | 7 | For example, you can place your buildpack sources at `$HOME/projects/my-buildpack` 8 | and add the following line to your `$HOME/devstep.yml` so that it is available to 9 | all Devstep environments: 10 | 11 | ```yaml 12 | volumes: 13 | - '{{env "HOME"}}/projects/my-buildpack:/opt/devstep/buildpacks/my-buildpack' 14 | ``` 15 | 16 | If you want to use a custom base image, you can add the following line to your 17 | project's `devstep.yml` or `$HOME/devstep.yml`: 18 | 19 | ```sh 20 | source_image: 'my-user/an-image:a-tag' 21 | ``` 22 | 23 | For more information on creating buildpacks, please have a look at 24 | [Heroku's documentation](https://devcenter.heroku.com/articles/buildpacks) and 25 | [Devstep's built in buildpacks sources](https://github.com/fgrehm/devstep/tree/master/buildpacks) 26 | for inspiration. 27 | -------------------------------------------------------------------------------- /docs/buildpacks/golang.md: -------------------------------------------------------------------------------- 1 | # Go buildpack 2 | -------------- 3 | 4 | This buildpack is based on the [Go Heroku buildpack](https://github.com/kr/heroku-buildpack-go) 5 | and will install [Go](http://golang.org/) if a file with the `.go` extension 6 | is found. 7 | 8 | If a `Godep` dir is found, this buildpack will download and install [godep](https://github.com/kr/godep) 9 | into your `$PATH` and will parse the `GoVersion` and `ImportPath` attributes 10 | when setting things up. 11 | 12 | Since currently devstep [does not support](https://github.com/fgrehm/devstep/issues/51) 13 | setting the workspace directory used inside the container, this buildpack will 14 | attempt to parse your project's import path for you using the following approach: 15 | 16 | 1. `ImportPath` from Godep configs 17 | 2. `.godir` file 18 | 3. Remote github repository URL 19 | 4. `GO_PROJECT_NAME` environmental variable 20 | 21 | After identifying the import path, the buildpack will symlink your project sources 22 | into the appropriate path under the `$GOPATH/src` dir. 23 | 24 | If godep is configured, the buildpack will attempt a `godep go build` for you, 25 | otherwise it will download project's dependencies with `go get` so you can 26 | start hacking right away. 27 | 28 | This buildpack will also fix `$GOPATH/src` ownership so that you can safely 29 | mount a local checkout of a project dependency into the container without 30 | running into permission issues. 31 | 32 | To install a specific Go version, please use the `GOVERSION` environmental 33 | variable. 34 | -------------------------------------------------------------------------------- /docs/buildpacks/inline.md: -------------------------------------------------------------------------------- 1 | # Inline buildpack 2 | ------------------ 3 | 4 | This is a buildpack for projects that wish to build themselves and is partially 5 | based on https://github.com/kr/heroku-buildpack-inline 6 | 7 | First, it checks if there is a `provision` directive on `devstep.yml` under 8 | the project's root and it uses the instructions provided during container 9 | provisioning. 10 | 11 | For example: 12 | 13 | ```yaml 14 | provision: 15 | - ['configure-addons', 'redis'] 16 | - ['configure-addons', 'heroku-toolbelt'] 17 | ``` 18 | 19 | Will configure the Redis and Heroku toolbelt addons. 20 | 21 | If no `provision` instructions are found, the buildpack will look for an 22 | executable file under `bin/compile` of the project root and will run the script 23 | if found. 24 | -------------------------------------------------------------------------------- /docs/buildpacks/nodejs.md: -------------------------------------------------------------------------------- 1 | # Node.js buildpack 2 | ------------------- 3 | 4 | This buildpack is based on the [official Heroku buildpack](https://github.com/heroku/heroku-buildpack-nodejs) 5 | and will install [Node.js](http://nodejs.org/) and configure project's dependencies 6 | defined on a `package.json` manifest. 7 | 8 | ## How it Works 9 | 10 | Here's an overview of what this buildpack does: 11 | 12 | - Uses the [semver.io](https://semver.io) webservice to find the latest version of node that satisfies the [engines.node semver range](https://npmjs.org/doc/json.html#engines) in your `package.json`. 13 | - Allows any recent version of node to be used, including [pre-release versions](https://semver.io/node.json). 14 | - Uses an [S3 caching proxy](https://github.com/heroku/s3pository#readme) of nodejs.org for faster downloads of the node binary. 15 | - Discourages use of dangerous semver ranges like `*` and `>0.10`. 16 | - Uses the version of `npm` that comes bundled with `node`. 17 | - Puts `node` and `npm` on the `PATH` so they can be executed on a hacking session. 18 | - Always runs `npm install` to ensure [npm script hooks](https://npmjs.org/doc/misc/npm-scripts.html) are executed. 19 | 20 | For more technical details, see the [compile script](buildpacks/nodejs/bin/compile). 21 | 22 | ## Documentation 23 | 24 | For more information about using Node.js, please refer to the [Heroku Node.js Support](https://devcenter.heroku.com/articles/nodejs-support) 25 | page. 26 | -------------------------------------------------------------------------------- /docs/buildpacks/phantomjs.md: -------------------------------------------------------------------------------- 1 | # PhantomJS buildpack 2 | --------------------- 3 | 4 | This buildpack will install the latest release of [PhantomJS](http://phantomjs.org) 5 | and currently does provide automatic detection. 6 | 7 | In order to use it, you'll need to manually invoke it with `build-project -b phantomjs` 8 | from within your image or from a `Dockerfile`. 9 | 10 | By default it will install the 1.9.7 version but a specific version can be installed 11 | by specifying the `PHANTOMJS_VERSION` env var during the build. For example: 12 | `PHANTOMJS_VERSION='1.9.6' build-project -b phantomjs` will install PhantomJS 1.9.6. 13 | -------------------------------------------------------------------------------- /docs/buildpacks/php.md: -------------------------------------------------------------------------------- 1 | # PHP buildpack 2 | --------------- 3 | 4 | This buildpack is based on the [PHP Heroku buildpack](https://github.com/heroku/heroku-buildpack-php) 5 | and will install [PHP](https://php.net/) if a `index.php` or `composer.json` files 6 | are found. 7 | 8 | If a `composer.json` file is found, this buildpack will download and install [composer](https://getcomposer.org/) 9 | into your `$PATH` unless a `composer.phar` is found alongside your project. With 10 | composer in place, the project's dependencies will be installed and a script to 11 | start a server for you app will also be configured. 12 | 13 | For more information on setting things up, please read the [Heroku Docs](https://devcenter.heroku.com/categories/php) 14 | or ask for help on [Devstep's issue tracker](https://github.com/fgrehm/devstep/issues). 15 | 16 | Instructions for running the server (either Apache2 or NGINX) will be provided 17 | at the end of the build process. 18 | -------------------------------------------------------------------------------- /docs/buildpacks/python.md: -------------------------------------------------------------------------------- 1 | # Python buildpack 2 | ------------------ 3 | 4 | This buildpack is based on the [Python Heroku buildpack](https://github.com/heroku/heroku-buildpack-python) 5 | powered by [pip](http://www.pip-installer.org/). It will install [Python](https://www.python.org) 6 | if a `requirements.txt` or `setup.py` files are found. 7 | 8 | ## Specify a Runtime 9 | 10 | You can also provide arbitrary releases Python with a `runtime.txt` file. 11 | 12 | $ cat runtime.txt 13 | python-3.4.1 14 | 15 | Runtime options include: 16 | 17 | - python-2.7.10 (default) 18 | - python-3.4.1 19 | - pypy-1.9 (experimental) 20 | 21 | Other [unsupported runtimes](https://github.com/heroku/heroku-buildpack-python/tree/master/builds/runtimes) 22 | are available as well and are not guaranteed to play well with devstep. 23 | -------------------------------------------------------------------------------- /docs/buildpacks/ruby.md: -------------------------------------------------------------------------------- 1 | # Ruby buildpack 2 | ---------------- 3 | 4 | This buildpack will install [Ruby](https://www.ruby-lang.org/en) 1.9.3+ and will 5 | use [Bundler](http://bundler.io/) for dependency management. It will be used if 6 | a `Gemfile` is found. 7 | 8 | The installed Ruby will be the same one that gets installed on Heroku's Cedar 14 9 | stack with a fallback to a Ruby from the Cedar stack. By default Ruby 2.2.2 and 10 | Bundler 1.10.5 will be installed. 11 | 12 | To specify a Ruby version, use the [`ruby` directive](http://bundler.io/v1.6/gemfile_ruby.html) 13 | of your project's `Gemfile`, the `DEVSTEP_RUBY_VERSION` environmental variable or 14 | a `.ruby-version` on you project's root. 15 | 16 | To specify a Bundler version, use the the `DEVSTEP_BUNDLER_VERSION` environmental 17 | variable. 18 | -------------------------------------------------------------------------------- /docs/cli/aliases-and-binstubs.md: -------------------------------------------------------------------------------- 1 | # CLI Aliases and Binstubs 2 | 3 | The [`commands` configurations set from `devstep.yml` files](configuration) 4 | can be used to enable some powerful aliases in case you need to frequently 5 | run one off commands against a previously build project image. 6 | 7 | For example, you can keep a terminal session with a Rails server running on a 8 | tab and on a separate tab you can keep a `devstep hack` session open for 9 | development tasks so you can easily run tests or execute some `rake` tasks. 10 | 11 | By specifying the following command on your `devstep.yml` you'll be able to 12 | `devstep run server` instead of `devstep run -p 3000:3000 -- bundle exec rails server`: 13 | 14 | ```yaml 15 | commands: 16 | server: 17 | cmd: ["bundle", "exec", "rails", "server"] 18 | publish: ["3000:3000"] 19 | ``` 20 | 21 | ## Using binstubs 22 | 23 | Given the configuration above, you might also want to skip the `devstep run` 24 | part from `devstep run server` while keeping the specified configs like published 25 | ports or volumes specified on `devstep.yml` files. 26 | 27 | By running the `devstep binstubs` command from your project's root, a bash script 28 | will be created under `.devstep/bin` for each specified command, so you are a `export PATH=".devstep/bin:$PATH"` 29 | away from running just `server` using the configs outlined above. 30 | -------------------------------------------------------------------------------- /docs/cli/commands.md: -------------------------------------------------------------------------------- 1 | # CLI Commands 2 | -------------- 3 | 4 | 1. [Hack](#user-content-hack) 5 | 1. [Build](#user-content-build) 6 | 1. [Boostrap](#user-content-bootstrap) 7 | 1. [Other commands](#user-content-other-commands) 8 | 9 | -------------- 10 | 11 | ## **Hack** 12 | 13 | **Command: `devstep hack [OPTIONS]`** 14 | 15 | This is the easiest way to get started with Devstep. By running the command 16 | from your project's root, Devstep will: 17 | 18 | 1. Create a Docker container based on `fgrehm/devstep:v0.4.0` with project 19 | sources bind mounted at `/workspace`. 20 | 2. Detect and install project's dependencies on the new container using the 21 | available buildpacks. 22 | 3. Start a `bash` session with everything in place for you to do your work. 23 | 24 | Once you `exit` the `bash` session, the container will be garbage collected 25 | (aka `docker rm`ed). 26 | 27 | **Options** 28 | 29 | * `-w, --working_dir` - Working directory inside the container 30 | * `-p, --publish` - Publish a container's port to the host (hostPort:containerPort) 31 | * `--link` - Add link to another container (name:alias) 32 | * `-e, --env` - Set environment variables 33 | * `--privileged` - Give extended privileges to this container 34 | 35 | **Example** 36 | 37 | ```sh 38 | devstep hack -p 80:8080 --link postgres:db --link memcached:mc -e DEVSTEP_BUNDLER_VERSION='1.6.0' 39 | ``` 40 | 41 | ## **Build** 42 | 43 | **Command: `devstep build`** 44 | 45 | By running the command from your project's root, Devstep will: 46 | 47 | 1. Create a Docker container based on `fgrehm/devstep:v0.4.0` with project 48 | sources bind mounted at `/workspace`. 49 | 2. Detect and install project's dependencies on the new container using the 50 | available buildpacks. 51 | 3. `docker commit` the container to a `devstep/:` image. 52 | 4. `docker tag` the new image as `devstep/:latest`. 53 | 54 | The `devstep/` images act like snapshots of your project dependencies 55 | and will be used as the source image for subsequent `devstep` commands instead 56 | of the `fgrehm/devstep:v0.4.0` image. 57 | 58 | For example, running a `devstep hack` after building the image will use `devstep/:latest` 59 | as the base container for new "hacking sessions" so that you don't have to build 60 | your project's environment from scratch. The same applies for a new `devstep build`, 61 | which will build on top of the latest image reducing the overall build time when 62 | compared to reconfiguring the environment from scratch using 63 | `Dockerfile`s. 64 | 65 | Because the `build` command bind mounts your project sources on the Docker container 66 | during the configuration process, you'll have to provide the full path to sources 67 | on the host machine as a Docker volume in order to work on the project. 68 | `devstep hack` can take care of that for you or you can manually: 69 | 70 | ```sh 71 | docker run -ti -v `pwd`:/workspace devstep/ 72 | ``` 73 | 74 | ## **Bootstrap** 75 | 76 | **Command: `devstep bootstrap [OPTIONS]`** 77 | 78 | As you might have guessed, this command can be used bootstrap new projects without 79 | cluttering your machine with development tools just to scaffold a new project. 80 | 81 | That command will start an interactive `bash` session with the current directory 82 | bind mounted as `/workspace` on the container and you'll have to manually configure 83 | the tools required to scaffold your new project. You can even force a specific 84 | buildpack to run from there. 85 | 86 | **Options** 87 | 88 | * `-r, --repository` - Repository name used when commiting the Docker image. 89 | 90 | **Example** 91 | 92 | For example, scaffolding a new Rails project means: 93 | 94 | ```sh 95 | cd $HOME/projects # or whatever directory you keep your projects 96 | devstep bootstrap -r my_app 97 | 98 | build-project -b ruby 99 | reload-env 100 | gem install rails 101 | rails new my_app 102 | 103 | exit # Or do some extra setup before exiting 104 | ``` 105 | 106 | Once you `exit` the container, you should end up with a `devstep/my_app` image 107 | and a brand new Rails app under `$HOME/projects/my_app` on your machine. To 108 | abort the bootstrap process, just `exit 1` from within the container and answer 109 | 'No' when asked for confirmation for commiting the image. 110 | 111 | As with `devstep build`, subsequent `devstep` commands like `build` and `hack` 112 | will use `devstep/my_app:latest` as the source image. Project sources 113 | also won't get stored inside the Docker image and you'll need to provide the 114 | full path to its sources on the host machine as a Docker volume in order to 115 | work on the project. `devstep hack` can take care of that for you or you can 116 | manually: 117 | 118 | ```sh 119 | docker run -ti -v `pwd`:/workspace devstep/ 120 | ``` 121 | 122 | To bootstrap projects for other platforms and frameworks you can follow a similar 123 | approach, replacing the Ruby / Rails specifics with the platform / framework 124 | of choice. 125 | 126 | 127 | ## **Other commands** 128 | 129 | * `info` - Show information about the current environment 130 | * `run` - Run a one off command against the current base image 131 | * `exec` - Run a one off command against the last container created for the current project 132 | * `binstubs` - Generate binstubs for the commands specified on devstep.yml 133 | * `clean` - Remove previously built images for the current environment 134 | * `pristine` - Rebuild project image from scratch 135 | * `help, h` - Shows a list of commands or help for one command 136 | 137 | For the most up-to-date list of supported commands, run `devstep --help`. 138 | -------------------------------------------------------------------------------- /docs/cli/configuration.md: -------------------------------------------------------------------------------- 1 | # CLI Configuration 2 | ------------------- 3 | 4 | Devstep's CLI has a configuration mechanism in the form of [YAML](http://www.yaml.org/) 5 | files that can be used to customize its behavior globally or for a specific project. 6 | 7 | The available options are described below: 8 | 9 | ```yaml 10 | # The Docker repository to keep images built by devstep 11 | # DEFAULT: 'devstep/' 12 | repository: 'repo/name' 13 | 14 | # The image used by devstep when building environments from scratch 15 | # DEFAULT: 'fgrehm/devstep:v0.4.0' 16 | source_image: 'source/image:tag' 17 | 18 | # The host cache dir that gets mounted inside the container at `/home/devstep/cache` 19 | # for speeding up the dependencies installation process. 20 | # DEFAULT: '/tmp/devstep/cache' 21 | cache_dir: '{{env "HOME"}}/devstep/cache' 22 | 23 | # The directory where project sources should be mounted inside the container. 24 | # DEFAULT: '/workspace' 25 | working_dir: '/home/devstep/gocode/src/github.com/fgrehm/devstep-cli' 26 | 27 | # Link to other existing containers (like a database for example). 28 | # Please note that devstep won't start the associated containers automatically 29 | # and an error will be raised in case the linked container does not exist or 30 | # if it is not running. 31 | # DEFAULT: 32 | links: 33 | - "postgres:db" 34 | - "memcached:mc" 35 | 36 | # Additional Docker volumes to share with the container. 37 | # DEFAULT: 38 | volumes: 39 | - "/path/on/host:/path/on/guest" 40 | 41 | # Environment variables. 42 | # DEFAULT: 43 | environment: 44 | RAILS_ENV: "development" 45 | 46 | # Custom command aliases that can be used with `devstep run` to save some 47 | # typing. It is also used for generating project specific binstubs. 48 | # DEFAULT: 49 | commands: 50 | # This can be run with `devstep run server` 51 | server: 52 | cmd: ["rails", "server"] 53 | # Here you can use some of the configs described above 54 | publish: ["3000:3000"] 55 | volumes: 56 | - '{{env "HOME"}}/certs/some-certificate.crt:/home/devstep/some-certificate.crt' 57 | - '{{env "HOME"}}/projects/some-gem-sources:/home/devstep/some-gem-sources' 58 | links: 59 | - 'redis:redis' 60 | environment: 61 | RAILS_ENV: "hacking" 62 | ruby: 63 | # No custom options, used only for generating binstubs 64 | 65 | # Custom provisioning steps that can be used when the available buildpacks are not 66 | # enough. Use it to configure addons or run additional commands during the build. 67 | # DEFAULT: 68 | provision: 69 | - ['configure-addons', 'redis'] 70 | ``` 71 | 72 | During a `devstep` command run, the CLI will start by loading global config 73 | options from `$HOME/devstep.yml` and project specific options from a `devstep.yml` 74 | file located on the directory where the command is run and will merge them before 75 | creating containers. 76 | 77 | To figure out what are the configured values for a specific project after 78 | merging settings you can run `devstep info`. 79 | -------------------------------------------------------------------------------- /docs/cli/installation.md: -------------------------------------------------------------------------------- 1 | # CLI Installation 2 | ------------------ 3 | 4 | The CLI is [written in Golang](https://github.com/fgrehm/devstep-cli) and precompiled 5 | binaries are available for each GitHub tagged release. Installing it is a matter 6 | of downloading it from GitHub, placing the binary on a directory available on your 7 | `PATH` and making it executable. 8 | 9 | This one liner can handle it for you assuming that `$HOME/bin` is available 10 | on your `PATH`: 11 | 12 | ```sh 13 | L=$HOME/bin/devstep && curl -sL https://github.com/fgrehm/devstep-cli/releases/download/v0.4.0/linux_amd64 > $L && chmod +x $L 14 | ``` 15 | 16 | Please note that the CLI is currently limited to connecting to a local `/var/run/docker.sock` 17 | socket only and the user that runs `devstep` commands will need [non-root access to it](http://docs.docker.io/installation/ubuntulinux/#giving-non-root-access). 18 | Support for execution over TCP is likely to be added at some point. 19 | 20 | 21 | > **IMPORTANT**: A `developer` user will be used by Devstep and it assumes your 22 | user and group ids are equal to `1000` when using the CLI or the container's init 23 | process will be aborted. This is to guarantee that files created within Docker 24 | containers have the appropriate permissions so that you can manipulate them 25 | on the host without the need to use `sudo`. This is currently a Devstep limitation 26 | that will be worked around in case there is enough demand or will be fixed once 27 | Docker adds support for user namespaces. 28 | 29 | > The `1000` id was chosen because it is the default uid / gid of Ubuntu Desktop users 30 | that are created during the installation process. To work around this limitation 31 | you can build your own image with the appropriate ids and add a `source_image: ':'` 32 | line to your `~/devstep.yml` so that the image is used as a source for your projects. 33 | 34 | ## Bash autocomplete 35 | 36 | An autocompletion script can be installed using the one liner below: 37 | 38 | ```sh 39 | curl -sL https://github.com/codegangsta/cli/raw/master/autocomplete/bash_autocomplete | sed 's/$PROG/devstep/' | sudo tee /etc/bash_completion.d/devstep 40 | ``` 41 | -------------------------------------------------------------------------------- /docs/cli/plugins.md: -------------------------------------------------------------------------------- 1 | # CLI Plugins 2 | ----------- 3 | 4 | Devstep's CLI has an experimental support for plugins in the form of JavaScript 5 | files that can be used to hook into the CLI runtime to modify its configuration 6 | at specific points during commands execution. 7 | 8 | Plugins should be installed to `$HOME/devstep/plugins//plugin.js` 9 | on the machine that is executing `devstep` commands and the only requirement is 10 | that a plugin folder should have a `plugin.js` file. 11 | 12 | ## Plugin API 13 | 14 | The current functionality is very rudimentary and is likely to be changed so right 15 | now it is best explained by the [squid3-ssl proxy](https://github.com/fgrehm/devstep-squid3-ssl) 16 | plugin source which is currently the only plugin available: 17 | 18 | ```js 19 | // `_currentPluginPath` is the host path where the JavaScript file is located 20 | // and is provided by Devstep's CLI plugin runtime, we keep its value on a 21 | // separate variable because its value gets changed for each plugin that 22 | // gets loaded. 23 | squidRoot = _currentPluginPath; 24 | 25 | // squidShared is the path were squid will keep both downloaded files on the host 26 | // machine and also the generated self signed certificate so that Devstep 27 | // containers can trust. 28 | squidShared = squidRoot + "/shared"; 29 | 30 | // Hook into the `configLoaded` event that gets triggered right after configuration 31 | // files are loaded (eg: `$HOME/devstep.yml` and `CURRENT_DIR/devstep.yml`) 32 | devstep.on('configLoaded', function(config) { 33 | config 34 | // Link CLI created containers with the squid container 35 | .addLink('squid3:squid3.dev') 36 | // Share the certificate file with Devstep containers 37 | .addVolume(squidShared + '/certs/squid3.dev.crt:/usr/share/ca-certificates/squid3.dev.crt') 38 | .setEnv('HTTPS_PROXY_CERT', 'squid3.dev.crt'); 39 | // Inject the script that will trust the squid container certificate 40 | .addVolume(squidRoot + '/proxy.sh:/etc/devstep/init.d/proxy.sh') 41 | 42 | // Sets environmental variables so that programs make use of the cache 43 | .setEnv('http_proxy', 'http://squid3.dev:3128') 44 | .setEnv('https_proxy', 'http://squid3.dev:3128') 45 | }); 46 | ``` 47 | 48 | The code above is the equivalent of passing in `-e`, `-v` and `--link` parameters 49 | to `devstep` commands. 50 | 51 | > The current functionality provided by the plugin runtime is pretty rudimentary 52 | so if you have ideas for other plugins that you think would be useful, feel free to 53 | reach out on the [CLI issue tracker](https://github.com/fgrehm/devstep-cli/issues/new) 54 | or on [Gitter](https://gitter.im/fgrehm/devstep) so that it can be further discussed 55 | as it will likely involve changes on the CLI itself. 56 | -------------------------------------------------------------------------------- /docs/cli/tips.md: -------------------------------------------------------------------------------- 1 | # CLI Tips and Tricks 2 | --------------------- 3 | 4 | This is a list of tips that can make you more productive on your daily work. 5 | 6 | ## Using SSH keys inside the container 7 | 8 | You can either configure [SSH agent forwarding](https://developer.github.com/guides/using-ssh-agent-forwarding/) 9 | with: 10 | 11 | ```yaml 12 | volumes: 13 | - '{{env "SSH_AUTH_SOCK"}}:/tmp/ssh-auth-sock' 14 | environment: 15 | SSH_AUTH_SOCK: "/tmp/ssh-auth-sock" 16 | ``` 17 | 18 | Or you can just share your SSH keys with the container using: 19 | 20 | ```yaml 21 | volumes: 22 | - '{{env "HOME"}}/.ssh:/home/devstep/.ssh' 23 | ``` 24 | 25 | ## Making project's dependencies cache persist between host restarts 26 | 27 | Since Devstep's cache is kept at `/tmp/devstep/cache` on the host by default, 28 | it is likely that the OS will have it cleaned when it gets restarted. In order 29 | to make it persistent, just set it to a folder that doesn't have that behavior 30 | (like some dir under your `$HOME`). 31 | 32 | For example, you can add the line below to your `$HOME/devstep.yml` to configure 33 | cached packages to be kept on `$HOME/devstep/cache`: 34 | 35 | ```yaml 36 | cache_dir: '{{env "HOME"}}/devstep/cache' 37 | ``` 38 | 39 | ## Reuse Git configurations from inside containers 40 | 41 | ```yaml 42 | volumes: 43 | - '{{env "HOME"}}/.gitconfig:/home/devstep/.gitconfig' 44 | ``` 45 | 46 | ## Sharing RubyGems credentials with containers 47 | 48 | If you are a RubyGem author, you will want to publish the gem to https://rubygems.org 49 | at some point. To avoid logging in all the time when you need to do that just 50 | share an existing credentials file with the containers using: 51 | 52 | ```yaml 53 | volumes: 54 | - '{{env "HOME"}}/.gem/credentials:/home/devstep/.gem/credentials' 55 | ``` 56 | 57 | ## Sharing Heroku credentials with containers 58 | 59 | If you deploy apps to Heroku, you will need to eventually use the Heroku Client 60 | to interact with it. To avoid logging in all the time when you need to do that 61 | just share the credentials file with the containers using: 62 | 63 | ```yaml 64 | volumes: 65 | - '{{env "HOME"}}/.netrc:/home/devstep/.netrc' 66 | ``` 67 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting Started 2 | ----------------- 3 | 4 | Devstep comes in two flavors, you can either use the provided CLI or you can build 5 | on top of the provided images from `Dockerfile`s. 6 | 7 | Regardless of the flavor you choose, it is a good idea to `docker pull fgrehm/devstep:v0.4.0` 8 | before creating your first container / image for a better user experience. Docker 9 | will download that image as needed when using `Dockerfile`s but the Devstep CLI won't. 10 | 11 | ## Sanity check 12 | --------------- 13 | 14 | This project is being developed and tested on an Ubuntu 14.04 host with Docker 15 | 1.7.0, while it is likely to work on other distros / Docker versions / 16 | [boot2docker](http://boot2docker.io/), I'm not sure how it will behave on the wild. 17 | 18 | Please note that the CLI is currently limited to connecting to a local `/var/run/docker.sock` 19 | socket only and the user that runs `devstep` commands will need [non-root access to it](http://docs.docker.io/installation/ubuntulinux/#giving-non-root-access). 20 | Support for execution over TCP is likely to be added at some point in the future. 21 | 22 | ## Getting started with the CLI 23 | ------------------------------- 24 | 25 | > **IMPORTANT**: A `developer` user will be used by Devstep and it assumes your 26 | user and group ids are equal to `1000` when using the CLI or the container's init 27 | process will be aborted. This is to guarantee that files created within Docker 28 | containers have the appropriate permissions so that you can manipulate them 29 | on the host without the need to use `sudo`. This is currently a Devstep limitation 30 | that will be worked around in case there is enough demand or will be fixed once 31 | Docker adds support for user namespaces. 32 | 33 | > The `1000` id was chosen because it is the default uid / gid of Ubuntu Desktop users 34 | that are created during the installation process. To work around this limitation 35 | you can build your own image with the appropriate ids and add a `source_image: ':'` 36 | line to your `~/devstep.yml` so that the image is used as a source for your projects. 37 | 38 | To install the CLI, you can run the one liner below and read on for more: 39 | 40 | ```sh 41 | L=$HOME/bin/devstep && curl -sL https://github.com/fgrehm/devstep-cli/releases/download/v0.4.0/linux_amd64 > $L && chmod +x $L 42 | ``` 43 | 44 | _The snippet above assumes `$HOME/bin` is on your `PATH`, change `$HOME/bin` to 45 | an appropriate path in case your system is not configured like that._ 46 | 47 | ### Doing a quick hack on a project 48 | 49 | With the CLI and Docker in place, just `cd` into your project and run `devstep hack`, 50 | it should be all you need to start working on your project. Devstep will create 51 | a Docker container, will install your project dependencies in it and at the end 52 | you'll be dropped on a `bash` session inside the container with project sources 53 | available at `/workspace`. 54 | 55 | From inside the container, you can do your work as you would on your own machine. 56 | For example, you can use `rake test` to run your Ruby tests or `go build` to 57 | compile Golang apps while editing files from your machine using your favorite IDE. 58 | 59 | When you are done hacking, just `exit` the container and it will be "garbage 60 | collected" (aka `docker rm`ed) and no project specific dependencies will be kept 61 | on your machine. 62 | 63 | ### Taking snapshots of the environment to reduce startup time 64 | 65 | Building an environment from scratch all the time you need to work on a project 66 | is not very productive. To alleviate that pain you can use the `devstep build` 67 | command which will create a Docker image with all dependencies required to hack 68 | on your project so that further `devstep hack`s have a reduced startup time. 69 | 70 | When your project dependencies are changed (like when a new RubyGem is needed 71 | for a Ruby app), you can run `devstep build` again and it will reuse the previously 72 | built image as a starting point for building the new environment instead of 73 | starting from scratch, so use it for projects that you hack on every day. 74 | 75 | ### Accessing web apps from the host machine 76 | 77 | The `devstep hack` command accepts an additional `-p` parameter that accepts ports 78 | in the same way that the [Docker CLI](https://docs.docker.com/reference/commandline/cli/#run) 79 | does. For example, if your app runs on the `8080` port, you can start your hacking 80 | sessions with: 81 | 82 | ```sh 83 | devstep hack -p 8080:8080 84 | ``` 85 | 86 | And it will redirect the `8080` port on your host to the `8080` port within the 87 | container so you can just hit `http://localhost:8080` on your browser to see your 88 | app running after it is up. 89 | 90 | ### Using databases or other services from within containers 91 | 92 | In order to connect your project to additional services you can either [link containers](http://docs.docker.com/userguide/dockerlinks/#container-linking) 93 | to the appropriate service if you want to manage it from outside or install and 94 | configure it by hand inside the container. 95 | 96 | Connecting to services that runs from other containers are as simple as passing 97 | in a `--link` argument to the `devstep hack` command. The provided base image is 98 | smart enough to detect that a link has been provided and will automatically forward 99 | a `localhost` port to the external service published port. 100 | 101 | For example, you can start a PostgreSQL service on the background with: 102 | 103 | ```sh 104 | docker run -d --name postgres postgres:9.3 105 | ``` 106 | 107 | And then start your hacking session with: 108 | 109 | ```sh 110 | devstep hack --link postgres:db 111 | ``` 112 | 113 | From inside the container you'll be able to access the external PostgreSQL service 114 | using the local `5432` port that is redirected to the same port [exposed](http://docs.docker.com/reference/builder/#expose) 115 | by that image. 116 | 117 | Installing services inside the container makes sense if, for example, you are 118 | developing a library that interacts with it. Please note that since the Docker 119 | image does not run Ubuntu's default init process (it uses [runit](http://smarden.org/runit/) 120 | instead), some additional steps will be required to start the service. Recipes 121 | for installing some services are available in the form of "addons" so you don't 122 | have to worry about that. 123 | 124 | For example, installing and configuring [memcached](http://memcached.org/) inside 125 | the container is a matter of running `configure-addons memcached` from there. 126 | 127 | ### Bootstrapping a new project (AKA solving the chicken or the egg problem) 128 | 129 | Assuming you are willing to use Docker / Devstep to avoid cluttering your machine 130 | with development tools, there's no point on installing Ruby / Python / ... on your 131 | computer in order to scaffold a new project. To make that process easier, you can 132 | use the `devstep bootstrap` command and manually trigger a buildpack build from 133 | there using the `build-project` command. 134 | 135 | For example, scaffolding a new Rails project means: 136 | 137 | ```sh 138 | cd $HOME/projects # or whatever directory you keep your projects 139 | devstep bootstrap -r devstep/my_app 140 | 141 | build-project -b ruby 142 | reload-env 143 | gem install rails 144 | rails new my_app 145 | 146 | exit # Or do some extra setup before exiting 147 | ``` 148 | 149 | Once you `exit` the container, you will end up with a `devstep/my_app` image 150 | and a brand new Rails app under `$HOME/projects/my_app` on the host machine. 151 | 152 | To bootstrap projects for other platforms and frameworks you can follow a similar 153 | approach, replacing the Ruby / Rails specifics with the platform / framework 154 | of choice. 155 | 156 | As with `devstep build`, subsequent `devstep` commands like `build` and `hack` 157 | will use `devstep/my_app:latest` as the source image so that the environment 158 | don't have to be rebuilt from scratch. 159 | 160 | ### Caching project's dependencies packages on the host 161 | 162 | As mentioned on the [introduction](introduction) section, Devstep is also capable 163 | of reducing disk space and initial configuration times by caching packages on the 164 | host using a strategy similar to [vagrant-cachier's cache buckets](http://fgrehm.viewdocs.io/vagrant-cachier/how-does-it-work). 165 | 166 | This behavior is enabled by default and will be further documented on the future. 167 | For now you need to know that the `/tmp/devstep/cache` dir on the host will be bind 168 | mounted to containers created by the CLI under `/home/devstep/cache` and most of your 169 | project's dependencies packages will be downloaded there. Note that the dependencies 170 | themselves are extracted and kept inside the images built by the CLI and you can 171 | safely clean things up or disable the caching behavior at will. 172 | 173 | ## Building images using `Dockerfile`s 174 | -------------------------------------- 175 | 176 | In case your project require additional dependencies to work you can use the provided 177 | `fgrehm/devstep` image as a starting point for your `Dockerfile`s. 178 | 179 | The `fgrehm/devstep` image is the base image used for Devstep environments and 180 | requires you to manually trigger the build: 181 | 182 | ```Dockerfile 183 | FROM fgrehm/devstep:v0.4.0 184 | 185 | # Add project to the image and build it 186 | ADD . /workspace 187 | WORKDIR /workspace 188 | RUN CLEANUP=1 /opt/devstep/bin/build-project /workspace 189 | ``` 190 | 191 | By using a `Dockerfile` to build your images (instead of using `devstep build`) 192 | you'll be able to skip mounting project's sources on the container when running 193 | it and a simple `docker run -it ` should do the trick. **_Keep in mind 194 | that changes made to project sources will be kept inside the container and 195 | you'll lose them when you `docker rm` it._** 196 | 197 | ## More information 198 | ------------------- 199 | 200 | If you reached this point it means you should have a good understanding of how 201 | Devstep works and what you can do with it. For more information please check out 202 | the links on the menu above and if you are still wondering how you can use the 203 | tool or benefit from it, please create a [new issue](https://github.com/fgrehm/devstep/issues/new) 204 | or reach out on [Gitter](https://gitter.im/fgrehm/devstep) so that we can have 205 | a chat :) 206 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Devstep 2 | 3 | Dead simple, no frills development environments based around a simple goal: 4 | 5 | > I want to `git clone` and run a single command to hack on any software project. 6 | 7 | [Devstep demo on Vimeo](https://vimeo.com/99482658) 8 | 9 |
10 | 11 | ## WAT?! 12 | 13 | Yeah, just give Devstep your code and let it take care of the rest (including 14 | automatic port forwarding to linked containers). Check out this demo of using 15 | Devstep with [Discourse](http://www.discourse.org/): 16 | 17 | [Discourse demo on Vimeo](http://vimeo.com/99212562) 18 | 19 |
20 | 21 | And before you get the wrong impression, Devstep is not just for the web, it is also suitable for building 22 | / developing libraries and CLI tools: 23 | 24 | [Sidekiq demo on Vimeo](https://vimeo.com/99487410) 25 | 26 | If you new to Devstep, check out the [introduction](introduction) and [getting started](getting-started) 27 | guides. 28 | 29 | ## Project status 30 | 31 | This is mostly the result of many different hacks that suits my needs for developing 32 | Ruby on Rails and Golang apps. It has been working fine for my use cases since April / 33 | March 2014 and it also seems to play really well with other platforms based on my 34 | [testing](https://github.com/fgrehm/devstep-examples). If things break for you 35 | [please let me know](https://github.com/fgrehm/devstep/issues/new)! 36 | -------------------------------------------------------------------------------- /docs/introduction.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | -------------- 3 | 4 | Have you ever deployed an app to a platform like Heroku? How awesome it is to 5 | `git push` some code and see it running without worrying about the infrastructure 6 | it is going to run on? Now imagine that idea applied to any type of project, 7 | regardless of whether they are web apps or not. This is what Devstep is all about. 8 | 9 | At Devstep's heart, there is a self suficient [Docker image](http://docs.docker.com/introduction/understanding-docker/#docker-images) 10 | that leverages the [buildpack](https://devcenter.heroku.com/articles/buildpacks) 11 | abstraction for automatic detection and installation of project dependencies. The 12 | image comes with a script that takes your app's source as an input and installs 13 | everything that is required for hacking on it. 14 | 15 | Be it a CLI tool, a plugin for some framework or a web app, it doesn't matter, 16 | Devstep can do the heavy lifting of preparing an isolated and disposable 17 | environment using as close to "zero configuration" as possible so that Developers 18 | can **focus on writing and delivering working software**. 19 | 20 | 21 | ## Benefits 22 | 23 | Configuring a base system from scratch to hack on a project (using Docker or not) 24 | is not an easy task for many people. Yes, there are plenty of platform specific 25 | images available for download on [Docker Hub](https://hub.docker.com/) but because 26 | Devstep's base image provides an environment that is [similar to Heroku's](https://github.com/progrium/cedarish), 27 | it should be capable of building and running a wide range of applications / tools 28 | / libraries from a single image without the need to worry about writing `Dockerfile`s. 29 | 30 | With Devstep's CLI, we can also reduce the disk space and initial configuration 31 | times by (optionally) caching packages on the host machine using a strategy similar 32 | to [vagrant-cachier's cache buckets](http://fgrehm.viewdocs.io/vagrant-cachier/how-does-it-work), 33 | where project dependencies packages are kept on the host while its contents are 34 | extracted inside the container. 35 | 36 | 37 | ## Usage 38 | 39 | Devstep can be used to build development environments in at least two different 40 | ways: from the provided CLI or from `Dockerfile`s. To run the images built, you 41 | can use the provided `devstep hack` command, use other tools (like [docker-compose](http://docs.docker.com/compose/)) 42 | or just `docker run` them by hand. 43 | 44 | 45 | ## What's included on the base [Docker image](https://registry.hub.docker.com/u/fgrehm/devstep/)? 46 | 47 | That image is based on [Heroku's `cedar:14` image](https://registry.hub.docker.com/u/heroku/cedar/) 48 | which makes up for the [Cedar-14](https://devcenter.heroku.com/articles/cedar) 49 | stack. So everything that is available to it (and as a consequence, available to 50 | Heroku apps) will be available for `fgrehm/devstep` environments. 51 | 52 | On top of `heroku/cedar:14`, we: 53 | 54 | * Create a `developer` user to avoid using `root` and creating files with wrong permissions during development. 55 | * Install some extra devel packages (like `libyaml-dev`) and other "nice to have" 56 | packages and utilities (like `tmux` and `vim`). 57 | * Configure PostgreSQL and MySQL clients. 58 | * Set the image `ENTRYPOINT` to our own init system and the default command to a `bash` login shell. 59 | * Configure a couple of startup scripts (like automatic port forwading to linked containers). 60 | * And add the supported buildpacks. 61 | 62 | _For more information please have a look at the [Dockerfile](https://github.com/fgrehm/devstep/blob/master/Dockerfile)._ 63 | 64 | 65 | ## Supported buildpacks 66 | 67 | * [Bats](buildpacks/bats) 68 | * [Golang](buildpacks/golang) 69 | * [Inline](buildpacks/inline) 70 | * [Node.js](buildpacks/nodejs) 71 | * [PhantomJS](buildpacks/phantomjs) 72 | * [Python](buildpacks/python) 73 | * [Ruby](buildpacks/ruby) 74 | 75 | 76 | ## Why standard Heroku buildpacks are not used? 77 | 78 | Because development environments have a few different needs than production 79 | environments and not all projects are web apps. For example, PHP apps are likely 80 | to have [opcache](http://www.php.net/manual/en/intro.opcache.php) enabled 81 | on production to improve app's performance but have it disabled during development 82 | and it is a good practice to have Ruby on Rails assets precompiled on production. 83 | But I did my best to stay as close as possible to the official buildpacks. 84 | 85 | -------------------------------------------- 86 | 87 | That's it for a brief introduction, from here you can have a look at the [getting started](getting-started) 88 | guide for a quick start on how to use Devstep. 89 | -------------------------------------------------------------------------------- /docs/template.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {{NAME}} :: viewdocs.io 5 | 6 | 7 | 8 | 9 | 10 | 11 | 80 | 81 | 82 | 86 | 87 | 88 |
89 | 151 | 152 |
153 | {{CONTENT}} 154 |
155 |
156 | 157 | 167 | 168 | 169 | 170 | 183 | 191 | 192 | 193 | -------------------------------------------------------------------------------- /image/fix-permissions.sh: -------------------------------------------------------------------------------- 1 | ##################################################################### 2 | # Fix permissions, set up init 3 | cp $DEVSTEP_PATH/bashrc $HOME/.bashrc && \ 4 | chown -R developer:developer $HOME && \ 5 | chown -R developer:developer $DEVSTEP_PATH && \ 6 | chown -R developer:developer $DEVSTEP_CONF && \ 7 | chmod u+s /usr/bin/sudo && \ 8 | ln -s $DEVSTEP_PATH/bin/fix-permissions $DEVSTEP_CONF/init.d/05-fix-permissions.sh && \ 9 | ln -s $DEVSTEP_PATH/bin/create-cache-symlinks $DEVSTEP_CONF/init.d/10-create-cache-symlinks.sh && \ 10 | ln -s $DEVSTEP_PATH/bin/forward-linked-ports $DEVSTEP_CONF/init.d/10-forward-linked-ports.sh && \ 11 | chmod +x $DEVSTEP_PATH/bin/* && \ 12 | chmod +x $DEVSTEP_CONF/init.d/* 13 | -------------------------------------------------------------------------------- /image/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | 6 | ##################################################################### 7 | # * Install and configure PostgreSQL and MySQL clients 8 | # * Install bash-completion to save us a few keystrokes 9 | # * Install vim because editing files with plain old vi sucks 10 | # * Install `htop` because it has a nicer UI than plain old `top` 11 | # * Install tmux so that we can run lots of shells within the same 12 | # bash session (without the need of running through SSH) 13 | # * Install nodejs for Rails apps, Bazaar and Mecurial for Golang projects 14 | # (will be installed on demand by buildpacks on a future release) 15 | # * Python and runit are dependencies for our init script based on 16 | # phusion/baseimage-docker 17 | apt-get update 18 | apt-get install -y --force-yes --no-install-recommends \ 19 | sudo \ 20 | libreadline5 \ 21 | libmcrypt4 \ 22 | libffi-dev \ 23 | postgresql-client \ 24 | mysql-client \ 25 | sqlite3 \ 26 | libsqlite3-dev \ 27 | vim \ 28 | htop \ 29 | tmux \ 30 | mercurial \ 31 | bzr \ 32 | nodejs-legacy \ 33 | libssl0.9.8 \ 34 | software-properties-common \ 35 | bash-completion \ 36 | python \ 37 | runit 38 | 39 | ##################################################################### 40 | # Bring back apt .deb caching as they'll be either removed on the 41 | # build process by devstep itself or cached on host machine 42 | rm /etc/apt/apt.conf.d/docker-clean 43 | 44 | ##################################################################### 45 | # Clean things up to reduce the image size 46 | apt-get clean 47 | apt-get autoremove 48 | rm -rf /var/lib/apt/lists/* 49 | 50 | ##################################################################### 51 | # Devstep environment 52 | mkdir -p $HOME/cache 53 | mkdir -p $HOME/.profile.d 54 | mkdir -p $HOME/bin 55 | mkdir -p $HOME/log 56 | mkdir -p $DEVSTEP_PATH/bin 57 | mkdir -p $DEVSTEP_CONF/service 58 | mkdir -p $DEVSTEP_CONF/init.d 59 | 60 | ##################################################################### 61 | # Create a default user to avoid using the container as root, we set 62 | # the user and group ids to 1000 as it is the most common id for 63 | # single user Ubuntu machines. 64 | # The provided /usr/bin/fix-permissions script can be used at startup 65 | # to ensure the 'developer' user id / group id are the same as the 66 | # directory bind mounted into the container. 67 | echo "developer:x:1000:1000:Developer,,,:/home/devstep:/bin/bash" >> /etc/passwd 68 | echo "developer:x:1000:" >> /etc/group 69 | echo "developer ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/developer 70 | echo -e "[client]\nprotocol=tcp\nuser=root" >> $HOME/.my.cnf 71 | echo "export PGHOST=localhost" >> $HOME/.profile.d/postgresql.sh 72 | echo "export PGUSER=postgres" >> $HOME/.profile.d/postgresql.sh 73 | chmod 0440 /etc/sudoers.d/developer 74 | 75 | ##################################################################### 76 | # * Download and install jq as it is being used by a few buildpacks 77 | # See http://stedolan.github.io/jq for more info 78 | curl -L -s http://stedolan.github.io/jq/download/linux64/jq > $DEVSTEP_PATH/bin/jq 79 | chmod +x $DEVSTEP_PATH/bin/jq 80 | -------------------------------------------------------------------------------- /mk-image: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | tag='v0.4.0' 6 | 7 | echo "===> Building fgrehm/devstep:${tag}" 8 | docker build -t fgrehm/devstep:${tag} . 9 | -------------------------------------------------------------------------------- /stack/addons/docker/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | CACHE_DIR=${2:-"${HOME}/cache/docker"} 8 | DOCKER_CMD="${HOME}/bin/docker" 9 | DEVSTEP_DOCKER_VERSION=${DEVSTEP_DOCKER_VERSION:-"1.5.0"} 10 | URL=https://get.docker.com/builds/Linux/x86_64/docker-${DEVSTEP_DOCKER_VERSION} 11 | 12 | mkdir -p $CACHE_DIR 13 | 14 | if ! [ -x "${DOCKER_CMD}" ]; then 15 | if ! [ -f "${CACHE_DIR}/docker-${DEVSTEP_DOCKER_VERSION}" ]; then 16 | echo "-----> [docker] Downloading ${DEVSTEP_DOCKER_VERSION} version from ${URL}" 17 | curl -l -s $URL > ${CACHE_DIR}/docker-${DEVSTEP_DOCKER_VERSION} 18 | fi 19 | 20 | cp ${CACHE_DIR}/docker-${DEVSTEP_DOCKER_VERSION} ${DOCKER_CMD} 21 | chmod +x "${DOCKER_CMD}" 22 | fi 23 | 24 | if ! [ -x /sbin/iptables ] || ! [ -x /sbin/apparmor_parser ]; then 25 | echo "-----> [docker] Installing iptables and apparmor" 26 | sudo apt-get update -q && sudo apt-get install iptables apparmor -y --force-yes -q 27 | fi 28 | 29 | echo "-----> [docker] Installed $(${HOME}/bin/docker --version)" 30 | 31 | addon_basedir="$( cd -P "$( dirname "$0" )" && pwd )" 32 | 33 | cp ${addon_basedir}/wrapdocker "$DEVSTEP_CONF/init.d/00-wrapdocker.sh" 34 | chmod +x "$DEVSTEP_CONF/init.d/00-wrapdocker.sh" 35 | ${addon_basedir}/wrapdocker 36 | 37 | cp $addon_basedir/start-daemon $DEVSTEP_CONF/init.d/01-start-docker.sh 38 | chmod +x $DEVSTEP_CONF/init.d/01-start-docker.sh 39 | 40 | echo "-----> [docker] Starting daemon" 41 | $DEVSTEP_CONF/init.d/01-start-docker.sh &>/dev/null || true 42 | 43 | echo "-----> [docker] Finished configuration!" 44 | -------------------------------------------------------------------------------- /stack/addons/docker/bin/start-daemon: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | # If a pidfile is still around (for example after a container restart), 8 | # delete it so that docker can start. 9 | mkdir -p ${HOME}/run 10 | rm -f ${HOME}/run/docker.pid 11 | 12 | ! [ -L /var/run/docker.sock ] && 13 | sudo ln -s ${HOME}/run/docker.sock /var/run/docker.sock 14 | 15 | sudo ${HOME}/bin/docker -d -p ${HOME}/run/docker.pid -G developer -H unix://${HOME}/run/docker.sock &> ${HOME}/log/docker.log & &>/dev/null 16 | -------------------------------------------------------------------------------- /stack/addons/docker/bin/wrapdocker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | # FROM https://github.com/jpetazzo/dind/blob/master/wrapdocker 8 | 9 | # First, make sure that cgroups are mounted correctly. 10 | CGROUP=/sys/fs/cgroup 11 | : {LOG:=stdio} 12 | 13 | [ -d $CGROUP ] || 14 | sudo mkdir $CGROUP 15 | 16 | sudo mountpoint -q $CGROUP || 17 | sudo mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { 18 | echo "Could not make a tmpfs mount. Did you use --privileged?" 19 | exit 1 20 | } 21 | 22 | if [ -d /sys/kernel/security ] && ! sudo mountpoint -q /sys/kernel/security 23 | then 24 | sudo mount -t securityfs none /sys/kernel/security || { 25 | echo "Could not mount /sys/kernel/security." 26 | echo "AppArmor detection and --privileged mode might break." 27 | } 28 | fi 29 | 30 | # Mount the cgroup hierarchies exactly as they are in the parent system. 31 | for SUBSYS in $(cut -d: -f2 /proc/1/cgroup) 32 | do 33 | [ -d $CGROUP/$SUBSYS ] || sudo mkdir $CGROUP/$SUBSYS 34 | sudo mountpoint -q $CGROUP/$SUBSYS || 35 | sudo mount -n -t cgroup -o $SUBSYS cgroup $CGROUP/$SUBSYS 36 | 37 | # The two following sections address a bug which manifests itself 38 | # by a cryptic "lxc-start: no ns_cgroup option specified" when 39 | # trying to start containers withina container. 40 | # The bug seems to appear when the cgroup hierarchies are not 41 | # mounted on the exact same directories in the host, and in the 42 | # container. 43 | 44 | # Named, control-less cgroups are mounted with "-o name=foo" 45 | # (and appear as such under /proc//cgroup) but are usually 46 | # mounted on a directory named "foo" (without the "name=" prefix). 47 | # Systemd and OpenRC (and possibly others) both create such a 48 | # cgroup. To avoid the aforementioned bug, we symlink "foo" to 49 | # "name=foo". This shouldn't have any adverse effect. 50 | echo $SUBSYS | grep -q ^name= && { 51 | NAME=$(echo $SUBSYS | sed s/^name=//) 52 | if ! [ -L $CGROUP/$NAME ]; then 53 | sudo ln -s $SUBSYS $CGROUP/$NAME 54 | fi 55 | } 56 | 57 | # Likewise, on at least one system, it has been reported that 58 | # systemd would mount the CPU and CPU accounting controllers 59 | # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" 60 | # but on a directory called "cpu,cpuacct" (note the inversion 61 | # in the order of the groups). This tries to work around it. 62 | [ $SUBSYS = cpuacct,cpu ] && sudo ln -s $SUBSYS $CGROUP/cpu,cpuacct 63 | done 64 | 65 | # Note: as I write those lines, the LXC userland tools cannot setup 66 | # a "sub-container" properly if the "devices" cgroup is not in its 67 | # own hierarchy. Let's detect this and issue a warning. 68 | grep -q :devices: /proc/1/cgroup || 69 | echo "WARNING: the 'devices' cgroup should be in its own hierarchy." 70 | grep -qw devices /proc/1/cgroup || 71 | echo "WARNING: it looks like the 'devices' cgroup is not mounted." 72 | 73 | # Now, close extraneous file descriptors. 74 | pushd /proc/self/fd >/dev/null 75 | for FD in * 76 | do 77 | case "$FD" in 78 | # Keep stdin/stdout/stderr 79 | [012]) 80 | ;; 81 | # Nuke everything else 82 | *) 83 | eval exec "$FD>&-" 84 | ;; 85 | esac 86 | done 87 | popd >/dev/null 88 | -------------------------------------------------------------------------------- /stack/addons/heroku-toolbelt/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | if ! $(which heroku &> /dev/null); then 8 | echo "-----> [heroku-toolbelt] Installing Heroku toolbelt..." 9 | wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh | sudo sh &>/dev/null 10 | echo "-----> [heroku-toolbelt] Finished configuration!" 11 | fi 12 | -------------------------------------------------------------------------------- /stack/addons/memcached/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | if ! $(which memcached &> /dev/null); then 8 | echo "-----> [memcached] Installing server..." 9 | sudo apt-get update -q && sudo apt-get install memcached -y --force-yes -q 10 | fi 11 | 12 | addon_basedir="$( cd -P "$( dirname "$0" )" && pwd )" 13 | 14 | echo "-----> [memcached] Enabling autostart" 15 | mkdir -p ${DEVSTEP_CONF}/service/memcached 16 | cp $addon_basedir/start-server ${DEVSTEP_CONF}/service/memcached/run 17 | chmod +x ${DEVSTEP_CONF}/service/memcached/run 18 | 19 | echo "-----> [memcached] Starting server" 20 | sv start ${DEVSTEP_CONF}/service/memcached &>/dev/null || true 21 | 22 | echo "-----> [memcached] Finished configuration!" 23 | -------------------------------------------------------------------------------- /stack/addons/memcached/bin/start-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p ${HOME}/log 4 | 5 | exec /usr/bin/memcached &> ${HOME}/log/memcached.log 6 | -------------------------------------------------------------------------------- /stack/addons/oracle-java-8/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CACHE_DIR=${2:-"${HOME}/cache/oracle-java-8"} 4 | 5 | if $(which java &>/dev/null); then 6 | echo 'Java already installed, skipping' 7 | exit 0 8 | fi 9 | 10 | echo '-----> Installing Java 8...' 11 | 12 | mkdir -p $CACHE_DIR 13 | sudo ln -s $CACHE_DIR /var/cache/oracle-jdk8-installer 14 | 15 | ( 16 | sudo add-apt-repository -y ppa:webupd8team/java 17 | sudo apt-get update 18 | echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections 19 | sudo apt-get install -y oracle-java8-installer 20 | ) &>> /tmp/configure-oracle-java-8.log && \ 21 | rm /tmp/configure-oracle-java-8.log 22 | 23 | echo "-----> Installed $(java -version 2>&1 | head -n 1)" 24 | -------------------------------------------------------------------------------- /stack/addons/postgresql/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | if ! [ -f /usr/lib/postgresql/9.3/bin/postgresql ]; then 8 | echo "-----> [postgres] Installing server" 9 | ( 10 | sudo apt-get update -q && 11 | sudo apt-get install postgresql-9.3 postgresql-contrib -y --force-yes -q 12 | ) &>> /tmp/configure-pg.log 13 | 14 | echo "export PGHOST=" >> ${HOME}/.profile.d/postgresql.sh 15 | source ${HOME}/.profile.d/postgresql.sh 16 | fi 17 | 18 | addon_basedir="$( cd -P "$( dirname "$0" )" && pwd )" 19 | 20 | echo "-----> [postgres] Enabling autostart" 21 | mkdir -p ${DEVSTEP_CONF}/service/postgresql 22 | 23 | sudo cp $(readlink -f $addon_basedir/../conf/postgresql.conf) /etc/postgresql/9.3/main/postgresql.conf 24 | sudo cp $(readlink -f $addon_basedir/../conf/pg_hba.conf) /etc/postgresql/9.3/main/pg_hba.conf 25 | 26 | cp $addon_basedir/start-server ${DEVSTEP_CONF}/service/postgresql/run 27 | chmod +x ${DEVSTEP_CONF}/service/postgresql/run 28 | 29 | echo "-----> [postgres] Starting server" 30 | sv start ${DEVSTEP_CONF}/service/postgresql &>/dev/null || true 31 | 32 | echo "-----> [postgres] Finished configuration!" 33 | -------------------------------------------------------------------------------- /stack/addons/postgresql/bin/detect: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Usage: bin/detect 3 | 4 | if [ -f "${1}/Gemfile.lock" ] && $(grep -q '^\s*pg\s\+' "${1}/Gemfile.lock"); then 5 | echo 'postgresql' 6 | exit 0 7 | fi 8 | 9 | echo 'no postgresql' 10 | exit 1 11 | -------------------------------------------------------------------------------- /stack/addons/postgresql/bin/start-server: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | # set -x 4 | 5 | POSTGRESQL_USER=${POSTGRESQL_USER:-"postgres"} 6 | POSTGRESQL_PASS=${POSTGRESQL_PASS:-"postgres"} 7 | POSTGRESQL_DB=${POSTGRESQL_DB:-"postgres"} 8 | POSTGRESQL_TEMPLATE=${POSTGRESQL_TEMPLATE:-"DEFAULT"} 9 | POSTGRESQL_DATA=${POSTGRESQL_DATA:-"/shared/postgresql"} 10 | 11 | POSTGRESQL_BIN=/usr/lib/postgresql/9.3/bin/postgres 12 | POSTGRESQL_CONFIG_FILE=/etc/postgresql/9.3/main/postgresql.conf 13 | 14 | if [ $POSTGRESQL_DATA != '/shared/postgresql' ]; then 15 | sudo sed -i 's|\/shared\/postgresql|'$POSTGRESQL_DATA'|g' $POSTGRESQL_CONFIG_FILE 16 | fi 17 | 18 | if [ ! -d $POSTGRESQL_DATA ] || [ ! -f $POSTGRESQL_DATA/PG_VERSION ]; then 19 | sudo mkdir -p $POSTGRESQL_DATA 20 | sudo chown -R developer:developer $POSTGRESQL_DATA 21 | /usr/lib/postgresql/9.3/bin/initdb -D $POSTGRESQL_DATA &>> ${HOME}/log/postgresql.log 22 | fi 23 | sudo chown -R developer:developer $POSTGRESQL_DATA 24 | 25 | sudo chown -R developer:developer /var/run/postgresql 26 | sudo chown developer:developer /etc/postgresql/9.3/main/*.conf 27 | 28 | mkdir -p ${HOME}/log 29 | 30 | POSTGRESQL_SINGLE="$POSTGRESQL_BIN --single --config-file=$POSTGRESQL_CONFIG_FILE postgres" 31 | $POSTGRESQL_SINGLE <<< "CREATE USER $POSTGRESQL_USER WITH SUPERUSER;" &> ${HOME}/log/postgresql.log || true 32 | $POSTGRESQL_SINGLE <<< "CREATE DATABASE $POSTGRESQL_DB OWNER $POSTGRESQL_USER TEMPLATE $POSTGRESQL_TEMPLATE;" &>> ${HOME}/log/postgresql.log || true 33 | $POSTGRESQL_SINGLE <<< "ALTER USER $POSTGRESQL_USER WITH PASSWORD '$POSTGRESQL_PASS';" &>> ${HOME}/log/postgresql.log || true 34 | 35 | # Based on https://gist.github.com/ffmike/877447 36 | POSTGRESQL_SINGLE="$POSTGRESQL_BIN --single --config-file=$POSTGRESQL_CONFIG_FILE postgres" 37 | $POSTGRESQL_SINGLE <<< "update pg_database set datallowconn = TRUE where datname = 'template0';" &>> ${HOME}/log/postgresql.log || true 38 | POSTGRESQL_SINGLE="$POSTGRESQL_BIN --single --config-file=$POSTGRESQL_CONFIG_FILE template0" 39 | $POSTGRESQL_SINGLE <<< "update pg_database set datistemplate = FALSE where datname = 'template1';" &>> ${HOME}/log/postgresql.log || true 40 | $POSTGRESQL_SINGLE <<< "drop database template1;" &> ${HOME}/log/postgresql.log || true 41 | $POSTGRESQL_SINGLE <<< "create database template1 with template = template0 encoding = 'UTF8';" &>> ${HOME}/log/postgresql.log || true 42 | $POSTGRESQL_SINGLE <<< "update pg_database set datistemplate = TRUE where datname = 'template1';" &>> ${HOME}/log/postgresql.log || true 43 | POSTGRESQL_SINGLE="$POSTGRESQL_BIN --single --config-file=$POSTGRESQL_CONFIG_FILE template1" 44 | $POSTGRESQL_SINGLE <<< "update pg_database set datallowconn = FALSE where datname = 'template0';" &>> ${HOME}/log/postgresql.log || true 45 | 46 | exec $POSTGRESQL_BIN --config-file=$POSTGRESQL_CONFIG_FILE &>> ${HOME}/log/postgresql.log 47 | -------------------------------------------------------------------------------- /stack/addons/postgresql/conf/pg_hba.conf: -------------------------------------------------------------------------------- 1 | # PostgreSQL Client Authentication Configuration File 2 | # =================================================== 3 | # 4 | # Refer to the "Client Authentication" section in the PostgreSQL 5 | # documentation for a complete description of this file. A short 6 | # synopsis follows. 7 | # 8 | # This file controls: which hosts are allowed to connect, how clients 9 | # are authenticated, which PostgreSQL user names they can use, which 10 | # databases they can access. Records take one of these forms: 11 | # 12 | # local DATABASE USER METHOD [OPTIONS] 13 | # host DATABASE USER ADDRESS METHOD [OPTIONS] 14 | # hostssl DATABASE USER ADDRESS METHOD [OPTIONS] 15 | # hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] 16 | # 17 | # (The uppercase items must be replaced by actual values.) 18 | # 19 | # The first field is the connection type: "local" is a Unix-domain 20 | # socket, "host" is either a plain or SSL-encrypted TCP/IP socket, 21 | # "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a 22 | # plain TCP/IP socket. 23 | # 24 | # DATABASE can be "all", "sameuser", "samerole", "replication", a 25 | # database name, or a comma-separated list thereof. The "all" 26 | # keyword does not match "replication". Access to replication 27 | # must be enabled in a separate record (see example below). 28 | # 29 | # USER can be "all", a user name, a group name prefixed with "+", or a 30 | # comma-separated list thereof. In both the DATABASE and USER fields 31 | # you can also write a file name prefixed with "@" to include names 32 | # from a separate file. 33 | # 34 | # ADDRESS specifies the set of hosts the record matches. It can be a 35 | # host name, or it is made up of an IP address and a CIDR mask that is 36 | # an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that 37 | # specifies the number of significant bits in the mask. A host name 38 | # that starts with a dot (.) matches a suffix of the actual host name. 39 | # Alternatively, you can write an IP address and netmask in separate 40 | # columns to specify the set of hosts. Instead of a CIDR-address, you 41 | # can write "samehost" to match any of the server's own IP addresses, 42 | # or "samenet" to match any address in any subnet that the server is 43 | # directly connected to. 44 | # 45 | # METHOD can be "trust", "reject", "md5", "password", "gss", "sspi", 46 | # "krb5", "ident", "peer", "pam", "ldap", "radius" or "cert". Note that 47 | # "password" sends passwords in clear text; "md5" is preferred since 48 | # it sends encrypted passwords. 49 | # 50 | # OPTIONS are a set of options for the authentication in the format 51 | # NAME=VALUE. The available options depend on the different 52 | # authentication methods -- refer to the "Client Authentication" 53 | # section in the documentation for a list of which options are 54 | # available for which authentication methods. 55 | # 56 | # Database and user names containing spaces, commas, quotes and other 57 | # special characters must be quoted. Quoting one of the keywords 58 | # "all", "sameuser", "samerole" or "replication" makes the name lose 59 | # its special character, and just match a database or username with 60 | # that name. 61 | # 62 | # This file is read on server startup and when the postmaster receives 63 | # a SIGHUP signal. If you edit the file on a running system, you have 64 | # to SIGHUP the postmaster for the changes to take effect. You can 65 | # use "pg_ctl reload" to do that. 66 | 67 | # Put your actual configuration here 68 | # ---------------------------------- 69 | # 70 | # If you want to allow non-local connections, you need to add more 71 | # "host" records. In that case you will also need to make PostgreSQL 72 | # listen on a non-local interface via the listen_addresses 73 | # configuration parameter, or via the -i or -h command line switches. 74 | 75 | 76 | 77 | 78 | # DO NOT DISABLE! 79 | # If you change this first entry you will need to make sure that the 80 | # database superuser can access the database using some other method. 81 | # Noninteractive access to all databases is required during automatic 82 | # maintenance (custom daily cronjobs, replication, and similar tasks). 83 | # 84 | # Database administrative login by Unix domain socket 85 | #local all postgres peer 86 | 87 | # TYPE DATABASE USER ADDRESS METHOD 88 | 89 | # "local" is for Unix domain socket connections only 90 | local all all trust 91 | # IPv4 local connections: 92 | host all all 127.0.0.1/32 trust 93 | # IPv6 local connections: 94 | host all all ::1/128 trust 95 | # Allow replication connections from localhost, by a user with the 96 | # replication privilege. 97 | #local replication postgres peer 98 | #host replication postgres 127.0.0.1/32 md5 99 | #host replication postgres ::1/128 md5 100 | host all all all trust 101 | -------------------------------------------------------------------------------- /stack/addons/postgresql/conf/postgresql.conf: -------------------------------------------------------------------------------- 1 | # ----------------------------- 2 | # PostgreSQL configuration file 3 | # ----------------------------- 4 | # 5 | # This file consists of lines of the form: 6 | # 7 | # name = value 8 | # 9 | # (The "=" is optional.) Whitespace may be used. Comments are introduced with 10 | # "#" anywhere on a line. The complete list of parameter names and allowed 11 | # values can be found in the PostgreSQL documentation. 12 | # 13 | # The commented-out settings shown in this file represent the default values. 14 | # Re-commenting a setting is NOT sufficient to revert it to the default value; 15 | # you need to reload the server. 16 | # 17 | # This file is read on server startup and when the server receives a SIGHUP 18 | # signal. If you edit the file on a running system, you have to SIGHUP the 19 | # server for the changes to take effect, or use "pg_ctl reload". Some 20 | # parameters, which are marked below, require a server shutdown and restart to 21 | # take effect. 22 | # 23 | # Any parameter can also be given as a command-line option to the server, e.g., 24 | # "postgres -c log_connections=on". Some parameters can be changed at run time 25 | # with the "SET" SQL command. 26 | # 27 | # Memory units: kB = kilobytes Time units: ms = milliseconds 28 | # MB = megabytes s = seconds 29 | # GB = gigabytes min = minutes 30 | # h = hours 31 | # d = days 32 | 33 | 34 | #------------------------------------------------------------------------------ 35 | # FILE LOCATIONS 36 | #------------------------------------------------------------------------------ 37 | 38 | # The default values of these variables are driven from the -D command-line 39 | # option or PGDATA environment variable, represented here as ConfigDir. 40 | 41 | data_directory = '/shared/postgresql' # use data in another directory 42 | # (change requires restart) 43 | hba_file = '/etc/postgresql/9.3/main/pg_hba.conf' # host-based authentication file 44 | # (change requires restart) 45 | ident_file = '/etc/postgresql/9.3/main/pg_ident.conf' # ident configuration file 46 | # (change requires restart) 47 | 48 | # If external_pid_file is not explicitly set, no extra PID file is written. 49 | external_pid_file = '/var/run/postgresql/9.3-main.pid' # write an extra PID file 50 | # (change requires restart) 51 | 52 | 53 | #------------------------------------------------------------------------------ 54 | # CONNECTIONS AND AUTHENTICATION 55 | #------------------------------------------------------------------------------ 56 | 57 | # - Connection Settings - 58 | 59 | listen_addresses = '*' # what IP address(es) to listen on; 60 | # comma-separated list of addresses; 61 | # defaults to 'localhost'; use '*' for all 62 | # (change requires restart) 63 | port = 5432 # (change requires restart) 64 | max_connections = 100 # (change requires restart) 65 | # Note: Increasing max_connections costs ~400 bytes of shared memory per 66 | # connection slot, plus lock space (see max_locks_per_transaction). 67 | #superuser_reserved_connections = 3 # (change requires restart) 68 | unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories 69 | # (change requires restart) 70 | #unix_socket_group = '' # (change requires restart) 71 | #unix_socket_permissions = 0777 # begin with 0 to use octal notation 72 | # (change requires restart) 73 | #bonjour = off # advertise server via Bonjour 74 | # (change requires restart) 75 | #bonjour_name = '' # defaults to the computer name 76 | # (change requires restart) 77 | 78 | # - Security and Authentication - 79 | 80 | #authentication_timeout = 1min # 1s-600s 81 | #ssl = off # (change requires restart) 82 | #ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers 83 | # (change requires restart) 84 | #ssl_renegotiation_limit = 512MB # amount of data between renegotiations 85 | #ssl_cert_file = 'server.crt' # (change requires restart) 86 | #ssl_key_file = 'server.key' # (change requires restart) 87 | #ssl_ca_file = '' # (change requires restart) 88 | #ssl_crl_file = '' # (change requires restart) 89 | #password_encryption = on 90 | #db_user_namespace = off 91 | 92 | # Kerberos and GSSAPI 93 | #krb_server_keyfile = '' 94 | #krb_srvname = 'postgres' # (Kerberos only) 95 | #krb_caseins_users = off 96 | 97 | # - TCP Keepalives - 98 | # see "man 7 tcp" for details 99 | 100 | #tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; 101 | # 0 selects the system default 102 | #tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; 103 | # 0 selects the system default 104 | #tcp_keepalives_count = 0 # TCP_KEEPCNT; 105 | # 0 selects the system default 106 | 107 | 108 | #------------------------------------------------------------------------------ 109 | # RESOURCE USAGE (except WAL) 110 | #------------------------------------------------------------------------------ 111 | 112 | # - Memory - 113 | 114 | shared_buffers = 128MB # min 128kB 115 | # (change requires restart) 116 | #temp_buffers = 8MB # min 800kB 117 | #max_prepared_transactions = 0 # zero disables the feature 118 | # (change requires restart) 119 | # Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory 120 | # per transaction slot, plus lock space (see max_locks_per_transaction). 121 | # It is not advisable to set max_prepared_transactions nonzero unless you 122 | # actively intend to use prepared transactions. 123 | #work_mem = 1MB # min 64kB 124 | #maintenance_work_mem = 16MB # min 1MB 125 | #max_stack_depth = 2MB # min 100kB 126 | 127 | # - Disk - 128 | 129 | #temp_file_limit = -1 # limits per-session temp file space 130 | # in kB, or -1 for no limit 131 | 132 | # - Kernel Resource Usage - 133 | 134 | #max_files_per_process = 1000 # min 25 135 | # (change requires restart) 136 | #shared_preload_libraries = '' # (change requires restart) 137 | 138 | # - Cost-Based Vacuum Delay - 139 | 140 | #vacuum_cost_delay = 0 # 0-100 milliseconds 141 | #vacuum_cost_page_hit = 1 # 0-10000 credits 142 | #vacuum_cost_page_miss = 10 # 0-10000 credits 143 | #vacuum_cost_page_dirty = 20 # 0-10000 credits 144 | #vacuum_cost_limit = 200 # 1-10000 credits 145 | 146 | # - Background Writer - 147 | 148 | #bgwriter_delay = 200ms # 10-10000ms between rounds 149 | #bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round 150 | #bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round 151 | 152 | # - Asynchronous Behavior - 153 | 154 | #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching 155 | 156 | 157 | #------------------------------------------------------------------------------ 158 | # WRITE AHEAD LOG 159 | #------------------------------------------------------------------------------ 160 | 161 | # - Settings - 162 | 163 | #wal_level = minimal # minimal, archive, or hot_standby 164 | # (change requires restart) 165 | #fsync = on # turns forced synchronization on or off 166 | #synchronous_commit = on # synchronization level; 167 | # off, local, remote_write, or on 168 | #wal_sync_method = fsync # the default is the first option 169 | # supported by the operating system: 170 | # open_datasync 171 | # fdatasync (default on Linux) 172 | # fsync 173 | # fsync_writethrough 174 | # open_sync 175 | #full_page_writes = on # recover from partial page writes 176 | #wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers 177 | # (change requires restart) 178 | #wal_writer_delay = 200ms # 1-10000 milliseconds 179 | 180 | #commit_delay = 0 # range 0-100000, in microseconds 181 | #commit_siblings = 5 # range 1-1000 182 | 183 | # - Checkpoints - 184 | 185 | #checkpoint_segments = 3 # in logfile segments, min 1, 16MB each 186 | #checkpoint_timeout = 5min # range 30s-1h 187 | #checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 188 | #checkpoint_warning = 30s # 0 disables 189 | 190 | # - Archiving - 191 | 192 | #archive_mode = off # allows archiving to be done 193 | # (change requires restart) 194 | #archive_command = '' # command to use to archive a logfile segment 195 | # placeholders: %p = path of file to archive 196 | # %f = file name only 197 | # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' 198 | #archive_timeout = 0 # force a logfile segment switch after this 199 | # number of seconds; 0 disables 200 | 201 | 202 | #------------------------------------------------------------------------------ 203 | # REPLICATION 204 | #------------------------------------------------------------------------------ 205 | 206 | # - Sending Server(s) - 207 | 208 | # Set these on the master and on any standby that will send replication data. 209 | 210 | #max_wal_senders = 0 # max number of walsender processes 211 | # (change requires restart) 212 | #wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables 213 | #wal_sender_timeout = 60s # in milliseconds; 0 disables 214 | 215 | # - Master Server - 216 | 217 | # These settings are ignored on a standby server. 218 | 219 | #synchronous_standby_names = '' # standby servers that provide sync rep 220 | # comma-separated list of application_name 221 | # from standby(s); '*' = all 222 | #vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed 223 | 224 | # - Standby Servers - 225 | 226 | # These settings are ignored on a master server. 227 | 228 | #hot_standby = off # "on" allows queries during recovery 229 | # (change requires restart) 230 | #max_standby_archive_delay = 30s # max delay before canceling queries 231 | # when reading WAL from archive; 232 | # -1 allows indefinite delay 233 | #max_standby_streaming_delay = 30s # max delay before canceling queries 234 | # when reading streaming WAL; 235 | # -1 allows indefinite delay 236 | #wal_receiver_status_interval = 10s # send replies at least this often 237 | # 0 disables 238 | #hot_standby_feedback = off # send info from standby to prevent 239 | # query conflicts 240 | #wal_receiver_timeout = 60s # time that receiver waits for 241 | # communication from master 242 | # in milliseconds; 0 disables 243 | 244 | 245 | #------------------------------------------------------------------------------ 246 | # QUERY TUNING 247 | #------------------------------------------------------------------------------ 248 | 249 | # - Planner Method Configuration - 250 | 251 | #enable_bitmapscan = on 252 | #enable_hashagg = on 253 | #enable_hashjoin = on 254 | #enable_indexscan = on 255 | #enable_indexonlyscan = on 256 | #enable_material = on 257 | #enable_mergejoin = on 258 | #enable_nestloop = on 259 | #enable_seqscan = on 260 | #enable_sort = on 261 | #enable_tidscan = on 262 | 263 | # - Planner Cost Constants - 264 | 265 | #seq_page_cost = 1.0 # measured on an arbitrary scale 266 | #random_page_cost = 4.0 # same scale as above 267 | #cpu_tuple_cost = 0.01 # same scale as above 268 | #cpu_index_tuple_cost = 0.005 # same scale as above 269 | #cpu_operator_cost = 0.0025 # same scale as above 270 | #effective_cache_size = 128MB 271 | 272 | # - Genetic Query Optimizer - 273 | 274 | #geqo = on 275 | #geqo_threshold = 12 276 | #geqo_effort = 5 # range 1-10 277 | #geqo_pool_size = 0 # selects default based on effort 278 | #geqo_generations = 0 # selects default based on effort 279 | #geqo_selection_bias = 2.0 # range 1.5-2.0 280 | #geqo_seed = 0.0 # range 0.0-1.0 281 | 282 | # - Other Planner Options - 283 | 284 | #default_statistics_target = 100 # range 1-10000 285 | #constraint_exclusion = partition # on, off, or partition 286 | #cursor_tuple_fraction = 0.1 # range 0.0-1.0 287 | #from_collapse_limit = 8 288 | #join_collapse_limit = 8 # 1 disables collapsing of explicit 289 | # JOIN clauses 290 | 291 | 292 | #------------------------------------------------------------------------------ 293 | # ERROR REPORTING AND LOGGING 294 | #------------------------------------------------------------------------------ 295 | 296 | # - Where to Log - 297 | 298 | #log_destination = 'stderr' # Valid values are combinations of 299 | # stderr, csvlog, syslog, and eventlog, 300 | # depending on platform. csvlog 301 | # requires logging_collector to be on. 302 | 303 | # This is used when logging to stderr: 304 | #logging_collector = off # Enable capturing of stderr and csvlog 305 | # into log files. Required to be on for 306 | # csvlogs. 307 | # (change requires restart) 308 | 309 | # These are only used if logging_collector is on: 310 | #log_directory = 'pg_log' # directory where log files are written, 311 | # can be absolute or relative to PGDATA 312 | #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, 313 | # can include strftime() escapes 314 | #log_file_mode = 0600 # creation mode for log files, 315 | # begin with 0 to use octal notation 316 | #log_truncate_on_rotation = off # If on, an existing log file with the 317 | # same name as the new log file will be 318 | # truncated rather than appended to. 319 | # But such truncation only occurs on 320 | # time-driven rotation, not on restarts 321 | # or size-driven rotation. Default is 322 | # off, meaning append to existing files 323 | # in all cases. 324 | #log_rotation_age = 1d # Automatic rotation of logfiles will 325 | # happen after that time. 0 disables. 326 | #log_rotation_size = 10MB # Automatic rotation of logfiles will 327 | # happen after that much log output. 328 | # 0 disables. 329 | 330 | # These are relevant when logging to syslog: 331 | #syslog_facility = 'LOCAL0' 332 | #syslog_ident = 'postgres' 333 | 334 | # This is only relevant when logging to eventlog (win32): 335 | #event_source = 'PostgreSQL' 336 | 337 | # - When to Log - 338 | 339 | #client_min_messages = notice # values in order of decreasing detail: 340 | # debug5 341 | # debug4 342 | # debug3 343 | # debug2 344 | # debug1 345 | # log 346 | # notice 347 | # warning 348 | # error 349 | 350 | #log_min_messages = warning # values in order of decreasing detail: 351 | # debug5 352 | # debug4 353 | # debug3 354 | # debug2 355 | # debug1 356 | # info 357 | # notice 358 | # warning 359 | # error 360 | # log 361 | # fatal 362 | # panic 363 | 364 | #log_min_error_statement = error # values in order of decreasing detail: 365 | # debug5 366 | # debug4 367 | # debug3 368 | # debug2 369 | # debug1 370 | # info 371 | # notice 372 | # warning 373 | # error 374 | # log 375 | # fatal 376 | # panic (effectively off) 377 | 378 | #log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements 379 | # and their durations, > 0 logs only 380 | # statements running at least this number 381 | # of milliseconds 382 | 383 | 384 | # - What to Log - 385 | 386 | #debug_print_parse = off 387 | #debug_print_rewritten = off 388 | #debug_print_plan = off 389 | #debug_pretty_print = on 390 | #log_checkpoints = off 391 | #log_connections = off 392 | #log_disconnections = off 393 | #log_duration = off 394 | #log_error_verbosity = default # terse, default, or verbose messages 395 | #log_hostname = off 396 | log_line_prefix = '%t ' # special values: 397 | # %a = application name 398 | # %u = user name 399 | # %d = database name 400 | # %r = remote host and port 401 | # %h = remote host 402 | # %p = process ID 403 | # %t = timestamp without milliseconds 404 | # %m = timestamp with milliseconds 405 | # %i = command tag 406 | # %e = SQL state 407 | # %c = session ID 408 | # %l = session line number 409 | # %s = session start timestamp 410 | # %v = virtual transaction ID 411 | # %x = transaction ID (0 if none) 412 | # %q = stop here in non-session 413 | # processes 414 | # %% = '%' 415 | # e.g. '<%u%%%d> ' 416 | #log_lock_waits = off # log lock waits >= deadlock_timeout 417 | #log_statement = 'none' # none, ddl, mod, all 418 | #log_temp_files = -1 # log temporary files equal or larger 419 | # than the specified size in kilobytes; 420 | # -1 disables, 0 logs all temp files 421 | log_timezone = 'UTC' 422 | 423 | 424 | #------------------------------------------------------------------------------ 425 | # RUNTIME STATISTICS 426 | #------------------------------------------------------------------------------ 427 | 428 | # - Query/Index Statistics Collector - 429 | 430 | #track_activities = on 431 | #track_counts = on 432 | #track_io_timing = off 433 | #track_functions = none # none, pl, all 434 | #track_activity_query_size = 1024 # (change requires restart) 435 | #update_process_title = on 436 | #stats_temp_directory = 'pg_stat_tmp' 437 | 438 | 439 | # - Statistics Monitoring - 440 | 441 | #log_parser_stats = off 442 | #log_planner_stats = off 443 | #log_executor_stats = off 444 | #log_statement_stats = off 445 | 446 | 447 | #------------------------------------------------------------------------------ 448 | # AUTOVACUUM PARAMETERS 449 | #------------------------------------------------------------------------------ 450 | 451 | #autovacuum = on # Enable autovacuum subprocess? 'on' 452 | # requires track_counts to also be on. 453 | #log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and 454 | # their durations, > 0 logs only 455 | # actions running at least this number 456 | # of milliseconds. 457 | #autovacuum_max_workers = 3 # max number of autovacuum subprocesses 458 | # (change requires restart) 459 | #autovacuum_naptime = 1min # time between autovacuum runs 460 | #autovacuum_vacuum_threshold = 50 # min number of row updates before 461 | # vacuum 462 | #autovacuum_analyze_threshold = 50 # min number of row updates before 463 | # analyze 464 | #autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum 465 | #autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze 466 | #autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum 467 | # (change requires restart) 468 | #autovacuum_multixact_freeze_max_age = 400000000 # maximum Multixact age 469 | # before forced vacuum 470 | # (change requires restart) 471 | #autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for 472 | # autovacuum, in milliseconds; 473 | # -1 means use vacuum_cost_delay 474 | #autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for 475 | # autovacuum, -1 means use 476 | # vacuum_cost_limit 477 | 478 | 479 | #------------------------------------------------------------------------------ 480 | # CLIENT CONNECTION DEFAULTS 481 | #------------------------------------------------------------------------------ 482 | 483 | # - Statement Behavior - 484 | 485 | #search_path = '"$user",public' # schema names 486 | #default_tablespace = '' # a tablespace name, '' uses the default 487 | #temp_tablespaces = '' # a list of tablespace names, '' uses 488 | # only default tablespace 489 | #check_function_bodies = on 490 | #default_transaction_isolation = 'read committed' 491 | #default_transaction_read_only = off 492 | #default_transaction_deferrable = off 493 | #session_replication_role = 'origin' 494 | #statement_timeout = 0 # in milliseconds, 0 is disabled 495 | #lock_timeout = 0 # in milliseconds, 0 is disabled 496 | #vacuum_freeze_min_age = 50000000 497 | #vacuum_freeze_table_age = 150000000 498 | #vacuum_multixact_freeze_min_age = 5000000 499 | #vacuum_multixact_freeze_table_age = 150000000 500 | #bytea_output = 'hex' # hex, escape 501 | #xmlbinary = 'base64' 502 | #xmloption = 'content' 503 | 504 | # - Locale and Formatting - 505 | 506 | datestyle = 'iso, mdy' 507 | #intervalstyle = 'postgres' 508 | timezone = 'UTC' 509 | #timezone_abbreviations = 'Default' # Select the set of available time zone 510 | # abbreviations. Currently, there are 511 | # Default 512 | # Australia 513 | # India 514 | # You can create your own file in 515 | # share/timezonesets/. 516 | #extra_float_digits = 0 # min -15, max 3 517 | #client_encoding = sql_ascii # actually, defaults to database 518 | # encoding 519 | 520 | # These settings are initialized by initdb, but they can be changed. 521 | lc_messages = 'C' # locale for system error message 522 | # strings 523 | lc_monetary = 'C' # locale for monetary formatting 524 | lc_numeric = 'C' # locale for number formatting 525 | lc_time = 'C' # locale for time formatting 526 | 527 | # default configuration for text search 528 | default_text_search_config = 'pg_catalog.english' 529 | 530 | # - Other Defaults - 531 | 532 | #dynamic_library_path = '$libdir' 533 | #local_preload_libraries = '' 534 | 535 | 536 | #------------------------------------------------------------------------------ 537 | # LOCK MANAGEMENT 538 | #------------------------------------------------------------------------------ 539 | 540 | #deadlock_timeout = 1s 541 | #max_locks_per_transaction = 64 # min 10 542 | # (change requires restart) 543 | # Note: Each lock table slot uses ~270 bytes of shared memory, and there are 544 | # max_locks_per_transaction * (max_connections + max_prepared_transactions) 545 | # lock table slots. 546 | #max_pred_locks_per_transaction = 64 # min 10 547 | # (change requires restart) 548 | 549 | 550 | #------------------------------------------------------------------------------ 551 | # VERSION/PLATFORM COMPATIBILITY 552 | #------------------------------------------------------------------------------ 553 | 554 | # - Previous PostgreSQL Versions - 555 | 556 | #array_nulls = on 557 | #backslash_quote = safe_encoding # on, off, or safe_encoding 558 | #default_with_oids = off 559 | #escape_string_warning = on 560 | #lo_compat_privileges = off 561 | #quote_all_identifiers = off 562 | #sql_inheritance = on 563 | #standard_conforming_strings = on 564 | #synchronize_seqscans = on 565 | 566 | # - Other Platforms and Clients - 567 | 568 | #transform_null_equals = off 569 | 570 | 571 | #------------------------------------------------------------------------------ 572 | # ERROR HANDLING 573 | #------------------------------------------------------------------------------ 574 | 575 | #exit_on_error = off # terminate session on any error? 576 | #restart_after_crash = on # reinitialize after backend crash? 577 | 578 | 579 | #------------------------------------------------------------------------------ 580 | # CONFIG FILE INCLUDES 581 | #------------------------------------------------------------------------------ 582 | 583 | # These options allow settings to be loaded from files other than the 584 | # default postgresql.conf. 585 | 586 | #include_dir = 'conf.d' # include files ending in '.conf' from 587 | # directory 'conf.d' 588 | #include_if_exists = 'exists.conf' # include file only if it exists 589 | #include = 'special.conf' # include file 590 | 591 | 592 | #------------------------------------------------------------------------------ 593 | # CUSTOMIZED OPTIONS 594 | #------------------------------------------------------------------------------ 595 | 596 | # Add settings for extensions here 597 | -------------------------------------------------------------------------------- /stack/addons/redis/bin/configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # fail fast 4 | set -o pipefail # dont ignore exit codes when piping output 5 | # set -x # enable debugging 6 | 7 | if ! $(which redis-server &> /dev/null); then 8 | echo "-----> [redis] Installing server..." 9 | ( 10 | sudo add-apt-repository -y ppa:rwky/redis && 11 | sudo apt-get update -q && 12 | sudo apt-get install redis-server -y --force-yes -q 13 | ) &>> /tmp/configure-redis.log && \ 14 | rm /tmp-configure-redis.log 15 | fi 16 | 17 | addon_basedir="$( cd -P "$( dirname "$0" )" && pwd )" 18 | 19 | echo "-----> [redis] Enabling autostart" 20 | mkdir -p ${DEVSTEP_CONF}/service/redis 21 | cp $addon_basedir/start-server ${DEVSTEP_CONF}/service/redis/run 22 | chmod +x ${DEVSTEP_CONF}/service/redis/run 23 | 24 | echo "-----> [redis] Starting server" 25 | sv start ${DEVSTEP_CONF}/service/redis &>/dev/null || true 26 | 27 | echo "-----> [redis] Finished configuration!" 28 | -------------------------------------------------------------------------------- /stack/addons/redis/bin/detect: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Usage: bin/detect 3 | 4 | if [ -f "${1}/Gemfile.lock" ] && $(grep -q '^\s*redis\s\+' "${1}/Gemfile.lock"); then 5 | echo 'redis' 6 | exit 0 7 | fi 8 | 9 | echo 'no redis' 10 | exit 1 11 | -------------------------------------------------------------------------------- /stack/addons/redis/bin/start-server: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p ${HOME}/log 4 | 5 | exec /usr/bin/redis-server &> ${HOME}/log/redis.log 6 | -------------------------------------------------------------------------------- /stack/bashrc: -------------------------------------------------------------------------------- 1 | source /opt/devstep/load-env.sh 2 | -------------------------------------------------------------------------------- /stack/bin/build-project: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: build-project -b -b 3 | set -eo pipefail 4 | 5 | if [ "${DEVSTEP_LOG}" = 'debug' ]; then 6 | set -x 7 | fi 8 | 9 | usage() { 10 | cat < -b -b 13 | EOF 14 | return 0 15 | } 16 | 17 | options=$(getopt -o h,b: -- "$@") 18 | 19 | if [ $? -ne 0 ]; then 20 | usage 21 | exit 1 22 | fi 23 | 24 | eval set -- "$options" 25 | 26 | # This is pretty hacky but werks 27 | FORCED_BUILDPACKS="" 28 | declare -a buildpacks 29 | while :; do 30 | case "$1" in 31 | -h|--help) usage && exit 1;; 32 | -b) buildpacks+=("${2}"); FORCED_BUILDPACKS='1'; shift 2;; 33 | *) shift 1 && break;; 34 | esac 35 | done 36 | 37 | export FORCED_BUILDPACKS 38 | 39 | project_root=${1:-`pwd`} 40 | cache_root=${2:-"${HOME}/cache"} 41 | buildpack_root=/opt/devstep/buildpacks 42 | 43 | if [ "${project_root}" = '/' ]; then 44 | echo "Can't build root dir!" 45 | exit 1 46 | fi 47 | 48 | mkdir -p $project_root 49 | mkdir -p $cache_root 50 | mkdir -p $buildpack_root 51 | mkdir -p ${HOME}/.profile.d 52 | 53 | function output_redirect() { 54 | if [[ "$slug_file" == "-" ]]; then 55 | cat - 1>&2 56 | else 57 | cat - 58 | fi 59 | } 60 | 61 | function echo_title() { 62 | echo $'\e[1G----->' $* | output_redirect 63 | } 64 | 65 | function echo_normal() { 66 | echo $'\e[1G ' $* | output_redirect 67 | } 68 | 69 | function ensure_indent() { 70 | while read line; do 71 | if [[ "$line" == --* ]]; then 72 | echo $'\e[1G'$line | output_redirect 73 | else 74 | echo $'\e[1G ' "$line" | output_redirect 75 | fi 76 | done 77 | } 78 | 79 | # In heroku, there are two separate directories, and some 80 | # buildpacks expect that. 81 | # TODO: Figure out if this is needed 82 | # cp -r $project_dir/. $build_root 83 | 84 | ## Buildpack fixes 85 | 86 | export REQUEST_ID=$(openssl rand -base64 32 2>/dev/null) 87 | export APP_DIR="$project_root" 88 | # export HOME="$project_root" 89 | 90 | ## Fix directory permissions 91 | 92 | (cd $project_root && /opt/devstep/bin/fix-permissions) 93 | 94 | ## Buildpack detection 95 | 96 | declare -a selected_buildpacks 97 | if [[ -z "${buildpacks[@]}" ]]; then 98 | buildpacks=($buildpack_root/*) 99 | for buildpack in "${buildpacks[@]}"; do 100 | if $($buildpack/bin/detect "${project_root}" &>/dev/null); then 101 | selected_buildpacks+=($buildpack) 102 | fi 103 | done 104 | else 105 | for buildpack in "${buildpacks[@]}"; do 106 | selected_buildpacks+=(${buildpack_root}/${buildpack}) 107 | done 108 | fi 109 | 110 | ## Compile! 111 | 112 | if [[ -n "${selected_buildpacks[@]}" ]]; then 113 | # TODO: This output is not needed if a single buildpack was detected 114 | echo_title "Building project at '${project_root}' with the following buildpacks:" 115 | for bp in "${selected_buildpacks[@]}"; do 116 | echo "- $bp" | ensure_indent 117 | done 118 | else 119 | echo_title "Unable to identify a buildpack for the workspace!" 120 | exit 0 121 | fi 122 | 123 | for bp in "${selected_buildpacks[@]}"; do 124 | echo_title "Building with $bp" 125 | $bp/bin/compile "$project_root" "$cache_root" | ensure_indent 126 | done 127 | 128 | echo_title "Build finished successfully!" 129 | 130 | ## Let the user know a reload is needed 131 | if [ "${FORCED_BUILDPACKS}" = '1' ]; then 132 | echo "********************************************************************************" 133 | echo "* HEADS UP: Your environment is likely to have changed, please run \`reload-env'" 134 | fi 135 | 136 | ## Save on disk space if wanted 137 | if [ "${CLEANUP}" = '1' ]; then 138 | echo '-----> Cleaning up...' 139 | 140 | echo " Running 'sudo rm -rf $cache_root/*'" 141 | sudo rm -rf $cache_root/* 142 | 143 | echo " Running 'sudo rm -rf tmp/*'" 144 | sudo rm -rf /tmp/* 145 | fi 146 | -------------------------------------------------------------------------------- /stack/bin/configure-addons: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | addons_root="/opt/devstep/addons" 5 | 6 | available_addons() { 7 | for addon in "${addons_root}"/*; do 8 | echo " - $(basename $addon)" 9 | done 10 | } 11 | 12 | usage() { 13 | cat < 16 | configure-addons 17 | 18 | Available addons: 19 | $(available_addons) 20 | EOF 21 | return 0 22 | } 23 | 24 | options=$(getopt -o h -l help,auto -- "$@") 25 | 26 | if [ $? -ne 0 ]; then 27 | usage 28 | exit 1 29 | fi 30 | 31 | eval set -- "$options" 32 | 33 | AUTO='0' 34 | while :; do 35 | case "$1" in 36 | -h|--help) usage && exit 1;; 37 | --auto) AUTO='1' && shift 1;; 38 | *) shift 1 && break;; 39 | esac 40 | done 41 | 42 | function output_redirect() { 43 | if [[ "$slug_file" == "-" ]]; then 44 | cat - 1>&2 45 | else 46 | cat - 47 | fi 48 | } 49 | 50 | function echo_title() { 51 | echo $'\e[1G----->' $* | output_redirect 52 | } 53 | 54 | function ensure_indent() { 55 | while read line; do 56 | if [[ "$line" == --* ]]; then 57 | echo $'\e[1G'$line | output_redirect 58 | else 59 | echo $'\e[1G ' "$line" | output_redirect 60 | fi 61 | done 62 | } 63 | 64 | declare -a SELECTED_ADDONS 65 | if [ "${AUTO}" = '1' ]; then 66 | [ "$#" != '1' ] && 67 | usage && 68 | exit 1 69 | 70 | project_root="${1}" 71 | ADDONS=("${addons_root}"/*) 72 | for addon in "${ADDONS[@]}"; do 73 | if $($addon/bin/detect "${project_root}" &>/dev/null); then 74 | SELECTED_ADDONS+=("$(basename $addon)") 75 | fi 76 | done 77 | else 78 | SELECTED_ADDONS=$* 79 | fi 80 | 81 | if [[ -z "${SELECTED_ADDONS[@]}" ]]; then 82 | echo_title "Unable to identify an addon for the workspace!" 83 | exit 0 84 | fi 85 | 86 | # Validate provided addons 87 | for addon in ${SELECTED_ADDONS[@]}; do 88 | if ! [ -d ${addons_root}/${addon} ]; then 89 | echo "Invalid addon '${addon}'" 90 | usage 91 | exit 1 92 | fi 93 | done 94 | 95 | # Configure addons 96 | for addon in ${SELECTED_ADDONS[@]}; do 97 | echo_title "Configuring ${addon}..." 98 | ${addons_root}/${addon}/bin/configure | ensure_indent 99 | done 100 | 101 | ## Save on disk space if wanted 102 | if [ "${CLEANUP}" = '1' ]; then 103 | echo '-----> Cleaning up...' 104 | 105 | echo " Running 'rm -rf $cache_root/*'" 106 | rm -rf $cache_root/* 107 | 108 | echo " Running 'sudo rm -rf tmp/*'" 109 | sudo rm -rf /tmp/* 110 | fi 111 | -------------------------------------------------------------------------------- /stack/bin/create-cache-symlinks: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | cache_root="${HOME}/cache" 5 | 6 | empty_directory() { 7 | if [ $(ls -A "${1}" 2>/dev/null | wc -l) = '0' ]; then 8 | return 0 9 | else 10 | return 1 11 | fi 12 | } 13 | 14 | # Centralize cache directories on a single place to simplify caching on the host 15 | if ! [ -L /var/cache/apt/archives ]; then 16 | mkdir -p $cache_root/apt 17 | # Warm up cache with packages available on the container if cache is empty 18 | if (empty_directory "$cache_root/apt") && ! (empty_directory "/var/cache/apt/archives"); then 19 | sudo bash -c "shopt -s dotglob && mv /var/cache/apt/archives/* $cache_root/apt" 20 | fi 21 | sudo rm -rf /var/cache/apt/archives 22 | sudo ln -s $cache_root/apt /var/cache/apt/archives 23 | fi 24 | 25 | if ! [ -L /var/lib/apt/lists ]; then 26 | mkdir -p $cache_root/apt-lists 27 | # Warm up cache with packages available on the container if cache is empty 28 | if (empty_directory "$cache_root/apt-lists") && ! (empty_directory "/var/lib/apt/lists"); then 29 | sudo bash -c "shopt -s dotglob && mv /var/lib/apt/lists/* $cache_root/apt-lists" 30 | fi 31 | mkdir -p $cache_root/apt-lists/partial 32 | sudo rm -rf /var/lib/apt/lists 33 | sudo ln -s $cache_root/apt-lists /var/lib/apt/lists 34 | fi 35 | 36 | if ! [ -d "$(readlink /var/cache/apt/archives)/partial" ]; then 37 | # echo 'Creating archives dir' 38 | sudo mkdir -p "$(readlink /var/cache/apt/archives)/partial" 39 | fi 40 | if ! [ -d "$(readlink /var/lib/apt/lists)/partial" ]; then 41 | # echo 'Creating lists dir' 42 | sudo mkdir -p "$(readlink /var/lib/apt/lists)/partial" 43 | fi 44 | -------------------------------------------------------------------------------- /stack/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # This is the entrypoint for Devstep images that loads the environment 4 | # before running the commands and does some small cleanup on the container 5 | 6 | # This is needed since some custom logic can be injected into the container's 7 | # init process by "plugins" (like the squid3-ssl contrib) 8 | find /etc/devstep/init.d -size 0 -print0 | xargs -0 rm &>/dev/null 9 | 10 | source /opt/devstep/load-env.sh 11 | eval "/opt/devstep/bin/init $@" 12 | -------------------------------------------------------------------------------- /stack/bin/exec-entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/bash -l 2 | 3 | # This is the entrypoint for Devstep exec commands that loads the environment 4 | # before running the commands 5 | 6 | source /opt/devstep/load-env.sh 7 | eval "$@" 8 | -------------------------------------------------------------------------------- /stack/bin/fix-permissions: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | info=( $(stat -Lc"%U %g %u" `pwd`) ) 4 | user=${info[0]} 5 | gid=${info[1]} 6 | uid=${info[2]} 7 | 8 | if [ $user = 'developer' ]; then 9 | true # Nothing to do here 10 | 11 | # If project is owned by root, it means it was built with a Dockerfile, so we 12 | # chown it to the developer user 13 | elif [ "${uid}" = '0' ]; then 14 | if [ `pwd` != '/' ]; then 15 | echo "-----> Setting '`pwd`' owner to 'developer'" 16 | sudo chown -R developer: `pwd` 17 | fi 18 | 19 | else 20 | # If the directory has some other uid, there's nothing we can do right now 21 | echo "-----> The workspace directory is owned by a different user!" 22 | exit 1 23 | fi 24 | 25 | 26 | info=( $(stat -Lc"%U %g %u" ${HOME}/cache) ) 27 | user=${info[0]} 28 | gid=${info[1]} 29 | uid=${info[2]} 30 | 31 | if [ $user != 'developer' ]; then 32 | echo "-----> Changing cache dir owner to 'developer'" 33 | sudo chown developer: ${HOME}/cache 34 | fi 35 | -------------------------------------------------------------------------------- /stack/bin/forward-linked-ports: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Stolen from http://docs.docker.io/en/latest/use/ambassador_pattern_linking/#the-svendowideit-ambassador-dockerfile 4 | 5 | env | grep _TCP= | sed 's/.*_PORT_\([0-9]*\)_TCP=tcp:\/\/\(.*\):\(.*\)/socat TCP4-LISTEN:\1,fork,reuseaddr TCP4:\2:\3 \&/' | sh 6 | -------------------------------------------------------------------------------- /stack/bin/hack: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | /opt/devstep/bin/build-project $PWD "${1}" 5 | 6 | exec /bin/bash 7 | -------------------------------------------------------------------------------- /stack/bin/init: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 -u 2 | 3 | # This is a trimmed down version of https://github.com/phusion/baseimage-docker/blob/master/image/bin/my_init 4 | 5 | import os, os.path, sys, stat, signal, errno, argparse, time, json, re, posixfile 6 | 7 | KILL_PROCESS_TIMEOUT = 5 8 | KILL_ALL_PROCESSES_TIMEOUT = 5 9 | 10 | LOG_LEVEL_ERROR = 1 11 | LOG_LEVEL_WARN = 1 12 | LOG_LEVEL_INFO = 2 13 | LOG_LEVEL_DEBUG = 3 14 | 15 | log_level = None 16 | 17 | class AlarmException(Exception): 18 | pass 19 | 20 | def error(message): 21 | if log_level >= LOG_LEVEL_ERROR: 22 | sys.stderr.write("*** %s\n" % message) 23 | 24 | def warn(message): 25 | if log_level >= LOG_LEVEL_WARN: 26 | print("*** %s" % message) 27 | 28 | def info(message): 29 | if log_level >= LOG_LEVEL_INFO: 30 | print("*** %s" % message) 31 | 32 | def debug(message): 33 | if log_level >= LOG_LEVEL_DEBUG: 34 | print("*** %s" % message) 35 | 36 | def ignore_signals_and_raise_keyboard_interrupt(signame): 37 | signal.signal(signal.SIGTERM, signal.SIG_IGN) 38 | signal.signal(signal.SIGINT, signal.SIG_IGN) 39 | raise KeyboardInterrupt(signame) 40 | 41 | def raise_alarm_exception(): 42 | raise AlarmException('Alarm') 43 | 44 | def listdir(path): 45 | try: 46 | result = os.stat(path) 47 | except OSError: 48 | return [] 49 | if stat.S_ISDIR(result.st_mode): 50 | return sorted(os.listdir(path)) 51 | else: 52 | return [] 53 | 54 | def is_exe(path): 55 | try: 56 | return os.path.isfile(path) and os.access(path, os.X_OK) 57 | except OSError: 58 | return False 59 | 60 | def waitpid_reap_other_children(pid): 61 | done = False 62 | status = None 63 | try: 64 | this_pid, status = os.waitpid(pid, os.WNOHANG) 65 | except OSError as e: 66 | if e.errno == errno.ECHILD or e.errno == errno.ESRCH: 67 | return None 68 | else: 69 | raise 70 | while not done: 71 | this_pid, status = os.waitpid(-1, 0) 72 | done = this_pid == pid 73 | return status 74 | 75 | def stop_child_process(name, pid, signo = signal.SIGTERM, time_limit = KILL_PROCESS_TIMEOUT): 76 | info("Shutting down %s (PID %d)..." % (name, pid)) 77 | try: 78 | os.kill(pid, signo) 79 | except OSError: 80 | pass 81 | signal.alarm(time_limit) 82 | try: 83 | try: 84 | waitpid_reap_other_children(pid) 85 | except OSError: 86 | pass 87 | except AlarmException: 88 | warn("%s (PID %d) did not shut down in time. Forcing it to exit." % (name, pid)) 89 | try: 90 | os.kill(pid, signal.SIGKILL) 91 | except OSError: 92 | pass 93 | try: 94 | waitpid_reap_other_children(pid) 95 | except OSError: 96 | pass 97 | finally: 98 | signal.alarm(0) 99 | 100 | def run_command_killable(*argv): 101 | filename = argv[0] 102 | status = None 103 | pid = os.spawnvp(os.P_NOWAIT, filename, argv) 104 | try: 105 | status = waitpid_reap_other_children(pid) 106 | except BaseException as s: 107 | warn("An error occurred. Aborting.") 108 | stop_child_process(filename, pid) 109 | raise 110 | if status != 0: 111 | if status is None: 112 | error("%s exited with unknown status\n" % filename) 113 | else: 114 | error("%s failed with status %d\n" % (filename, os.WEXITSTATUS(status))) 115 | sys.exit(1) 116 | 117 | def run_command_killable_and_import_envvars(*argv): 118 | run_command_killable(*argv) 119 | 120 | def kill_all_processes(time_limit): 121 | info("Killing all processes...") 122 | try: 123 | os.kill(-1, signal.SIGTERM) 124 | except OSError: 125 | pass 126 | signal.alarm(time_limit) 127 | try: 128 | # Wait until no more child processes exist. 129 | done = False 130 | while not done: 131 | try: 132 | os.waitpid(-1, 0) 133 | except OSError as e: 134 | if e.errno == errno.ECHILD: 135 | done = True 136 | else: 137 | raise 138 | except AlarmException: 139 | warn("Not all processes have exited in time. Forcing them to exit.") 140 | try: 141 | os.kill(-1, signal.SIGKILL) 142 | except OSError: 143 | pass 144 | finally: 145 | signal.alarm(0) 146 | 147 | def run_startup_files(): 148 | # Run /etc/devstep/init.d/* 149 | for name in listdir("/etc/devstep/init.d"): 150 | filename = "/etc/devstep/init.d/" + name 151 | if is_exe(filename): 152 | info("Running %s..." % filename) 153 | run_command_killable_and_import_envvars(filename) 154 | 155 | def start_runit(): 156 | info("Booting runit daemon...") 157 | pid = os.spawnl(os.P_NOWAIT, "/usr/bin/runsvdir", "/usr/bin/runsvdir", 158 | "-P", "/etc/devstep/service", "log: %s" % ('.' * 395)) 159 | info("Runit started as PID %d" % pid) 160 | return pid 161 | 162 | def wait_for_runit_or_interrupt(pid): 163 | try: 164 | status = waitpid_reap_other_children(pid) 165 | return (True, status) 166 | except KeyboardInterrupt: 167 | return (False, None) 168 | 169 | def get_runit_services(dir = "/etc/devstep/service"): 170 | return [name for name in os.listdir(dir) 171 | if os.path.isdir(os.path.join(dir, name))] 172 | 173 | def shutdown_runit_services(): 174 | if not get_runit_services(): 175 | return 176 | 177 | debug("Begin shutting down runit services...") 178 | os.system("/usr/bin/sv down /etc/devstep/service/*") 179 | 180 | def wait_for_runit_services(): 181 | if not get_runit_services(): 182 | return 183 | 184 | debug("Waiting for runit services to exit...") 185 | done = False 186 | while not done: 187 | done = os.system("/usr/bin/sv status /etc/devstep/service/* | grep -q '^run:'") != 0 188 | if not done: 189 | time.sleep(0.1) 190 | 191 | def main(args): 192 | if not args.skip_startup_files: 193 | run_startup_files() 194 | 195 | runit_exited = False 196 | exit_code = None 197 | 198 | if not args.skip_runit: 199 | runit_pid = start_runit() 200 | try: 201 | exit_status = None 202 | if len(args.main_command) == 0: 203 | runit_exited, exit_code = wait_for_runit_or_interrupt(runit_pid) 204 | if runit_exited: 205 | if exit_code is None: 206 | info("Runit exited with unknown status") 207 | exit_status = 1 208 | else: 209 | exit_status = os.WEXITSTATUS(exit_code) 210 | info("Runit exited with status %d" % exit_status) 211 | else: 212 | info("Running %s..." % " ".join(args.main_command)) 213 | pid = os.spawnvp(os.P_NOWAIT, args.main_command[0], args.main_command) 214 | try: 215 | exit_code = waitpid_reap_other_children(pid) 216 | if exit_code is None: 217 | info("%s exited with unknown status." % args.main_command[0]) 218 | exit_status = 1 219 | else: 220 | exit_status = os.WEXITSTATUS(exit_code) 221 | info("%s exited with status %d." % (args.main_command[0], exit_status)) 222 | except KeyboardInterrupt: 223 | stop_child_process(args.main_command[0], pid) 224 | raise 225 | except BaseException as s: 226 | warn("An error occurred. Aborting.") 227 | stop_child_process(args.main_command[0], pid) 228 | raise 229 | sys.exit(exit_status) 230 | finally: 231 | if not args.skip_runit: 232 | shutdown_runit_services() 233 | if not runit_exited: 234 | stop_child_process("runit daemon", runit_pid) 235 | wait_for_runit_services() 236 | 237 | # Parse options. 238 | parser = argparse.ArgumentParser(description = 'Initialize the system.') 239 | parser.add_argument('main_command', metavar = 'MAIN_COMMAND', type = str, nargs = '*', 240 | help = 'The main command to run. (default: runit)') 241 | parser.add_argument('--skip-startup-files', dest = 'skip_startup_files', 242 | action = 'store_const', const = True, default = False, 243 | help = 'Skip running /etc/devstep/init.d/* and /etc/rc.local') 244 | parser.add_argument('--skip-runit', dest = 'skip_runit', 245 | action = 'store_const', const = True, default = False, 246 | help = 'Do not run runit services') 247 | parser.add_argument('--no-kill-all-on-exit', dest = 'kill_all_on_exit', 248 | action = 'store_const', const = False, default = True, 249 | help = 'Don\'t kill all processes on the system upon exiting') 250 | parser.add_argument('--quiet', dest = 'log_level', 251 | action = 'store_const', const = LOG_LEVEL_WARN, default = LOG_LEVEL_INFO, 252 | help = 'Only print warnings and errors') 253 | args = parser.parse_args() 254 | log_level = args.log_level 255 | 256 | if os.getenv('DEVSTEP_LOG', None) != None: 257 | level = os.environ['DEVSTEP_LOG'].lower() 258 | if level == 'debug': 259 | log_level = LOG_LEVEL_DEBUG 260 | elif level == 'info': 261 | log_level = LOG_LEVEL_INFO 262 | elif level == 'error': 263 | log_level = LOG_LEVEL_ERROR 264 | elif level == 'warning': 265 | log_level = LOG_LEVEL_WARN 266 | 267 | if args.skip_runit and len(args.main_command) == 0: 268 | error("When --skip-runit is given, you must also pass a main command.") 269 | sys.exit(1) 270 | 271 | # Run main function. 272 | signal.signal(signal.SIGTERM, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt('SIGTERM')) 273 | signal.signal(signal.SIGINT, lambda signum, frame: ignore_signals_and_raise_keyboard_interrupt('SIGINT')) 274 | signal.signal(signal.SIGALRM, lambda signum, frame: raise_alarm_exception()) 275 | try: 276 | main(args) 277 | except KeyboardInterrupt: 278 | warn("Init system aborted.") 279 | exit(2) 280 | finally: 281 | if args.kill_all_on_exit: 282 | kill_all_processes(KILL_ALL_PROCESSES_TIMEOUT) 283 | -------------------------------------------------------------------------------- /stack/buildpacks/bats/bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: bin/compile 3 | 4 | set -eo pipefail 5 | 6 | cache=$2/bats 7 | mkdir -p $cache 8 | 9 | if $(which bats &>/dev/null); then 10 | echo '-----> Bats already installed, skipping' 11 | else 12 | echo '-----> Downloading bats from GitHub...' 13 | if ! [ -f $cache/bats.tar.gz ]; then 14 | curl -sL --insecure http://github.com/sstephenson/bats/tarball/master > $cache/bats.tar.gz 15 | fi 16 | mkdir -p ${HOME}/bats 17 | tar xfz $cache/bats.tar.gz -C ${HOME}/bats --strip-components=1 18 | (cd ${HOME}/bats && sudo ./install.sh ${HOME}) 19 | fi 20 | -------------------------------------------------------------------------------- /stack/buildpacks/bats/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: bin/detect 3 | 4 | set -eo pipefail 5 | 6 | if test -n "$(find "$1" -type f -name '*.bats' | sed 1q)"; then 7 | echo Bats 8 | else 9 | echo no 10 | exit 1 11 | fi 12 | -------------------------------------------------------------------------------- /stack/buildpacks/golang/bin/compile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # usage: bin/compile 3 | 4 | # This is a modified version of https://github.com/kr/heroku-buildpack-go/blob/master/bin/compile 5 | 6 | set -eo pipefail 7 | 8 | # So that jq works 9 | export PATH="/opt/devstep/bin:$PATH" 10 | 11 | # Go releases for Darwin beginning with 1.2rc1 12 | # have included more than one build, depending 13 | # on the specific version of Mac OS X. Try to 14 | # account for that, but don't try too hard. 15 | # This doesn't affect Heroku builds, it's only 16 | # for testing on Darwin systems. 17 | platext() { 18 | case $1 in 19 | go1.0*|go1.1beta*|go1.1rc*|go1.1|go1.1.*) return ;; 20 | esac 21 | case $(uname|tr A-Z a-z) in 22 | darwin) printf %s -osx10.8 ;; 23 | esac 24 | } 25 | 26 | # Go releases have moved to a new URL scheme 27 | # starting with Go version 1.2.2. Return the old 28 | # location for known old versions and the new 29 | # location otherwise. 30 | urlfor() { 31 | ver=$1 32 | file=$2 33 | case $ver in 34 | go1.0*|go1.1beta*|go1.1rc*|go1.1|go1.1.*|go1.2beta*|go1.2rc*|go1.2|go1.2.1) 35 | echo http://go.googlecode.com/files/$file 36 | ;; 37 | *) 38 | echo https://storage.googleapis.com/golang/$file 39 | ;; 40 | esac 41 | } 42 | 43 | # mkdir -p "$1" "$2" 44 | build=$(cd "$1/" && pwd) 45 | # cache=$(cd "$2/" && pwd) 46 | cache=$(cd "$2/" && pwd)/golang 47 | mkdir -p "$build" "$cache" 48 | # buildpack=$(cd "$(dirname $0)/.." && pwd) 49 | arch=$(uname -m|tr A-Z a-z) 50 | if test $arch = x86_64 51 | then arch=amd64 52 | fi 53 | plat=$(uname|tr A-Z a-z)-$arch 54 | 55 | # Python 56 | # venv=$cache/venv 57 | # mkdir -p $cache/pip 58 | # python=python2.7 59 | # PATH=$buildpack/$plat/bin:$venv/bin:$PATH 60 | 61 | # virtualenv() { 62 | # python "$buildpack/vendor/virtualenv-1.11.6/virtualenv.py" "$@" 63 | # } 64 | 65 | if test -f $build/Godeps 66 | then 67 | name=$(<$build/Godeps jq -r .ImportPath) 68 | ver=$(<$build/Godeps jq -r .GoVersion) 69 | elif test -d $build/Godeps 70 | then 71 | name=$(<$build/Godeps/Godeps.json jq -r .ImportPath) 72 | ver=$(<$build/Godeps/Godeps.json jq -r .GoVersion) 73 | elif test -f $build/.godir 74 | then 75 | name=$(cat $build/.godir) 76 | elif test -d $build/.git 77 | then 78 | pushd $build &> /dev/null 79 | # REFACTOR: I'm pretty sure this can be cleaned up 80 | name=$(git config --get-regexp '^remote.+\.url' | grep github | head -n1 | cut -f2 -d' ' | sed 's/^\(git\|https\):\/\///' | sed 's/^\w\+@//' | sed 's/:/\//' | sed 's/\.git$//') 81 | popd &> /dev/null 82 | fi 83 | ver=${ver:-go${GOVERSION:-1.4.2}} 84 | 85 | file=${GOFILE:-$ver.$(uname|tr A-Z a-z)-amd64$(platext $ver).tar.gz} 86 | url=${GOURL:-$(urlfor $ver $file)} 87 | 88 | # if test -e $build/bin && ! test -d $build/bin 89 | # then 90 | # echo >&2 " ! File bin exists and is not a directory." 91 | # exit 1 92 | # fi 93 | 94 | # if test -d $cache/$ver/go 95 | if test -d $HOME/go 96 | then 97 | # echo "-----> Using $ver" 98 | echo "-----> Using $(${HOME}/go/bin/go version)" 99 | else 100 | # rm -rf $cache/* # be sure not to build up cruft 101 | mkdir -p $cache/$ver 102 | cd $cache/$ver 103 | # echo -n "-----> Installing $ver..." 104 | # curl -sO $url 105 | # tar zxf $file 106 | # rm -f $file 107 | if test -f $cache/$ver/$file; then 108 | echo "-----> Using $ver" 109 | else 110 | echo "-----> Downloading ${ver}..." 111 | curl -sLO $url 112 | echo "done" 113 | fi 114 | tar zxf $file -C ${HOME} 115 | echo " done" 116 | cd - >/dev/null 117 | fi 118 | 119 | # mkdir -p $build/bin 120 | # GOBIN=$build/bin export GOBIN 121 | # GOROOT=$cache/$ver/go export GOROOT 122 | GOROOT=${HOME}/go export GOROOT 123 | # GOPATH=$build/.heroku/go export GOPATH 124 | GOPATH=${HOME}/gocode export GOPATH 125 | # PATH=$GOROOT/bin:$PATH 126 | PATH=$GOBIN:$GOROOT/bin:$PATH export PATH 127 | 128 | GOBIN=$GOPATH/bin export GOBIN 129 | 130 | # if ! (test -d $build/Godeps || (which hg >/dev/null && which bzr >/dev/null)) 131 | # then 132 | # echo 133 | # echo " Tired of waiting for bzr and hg?" 134 | # echo " Try github.com/kr/godep for faster deploys." 135 | # echo 136 | # 137 | # echo -n " Installing Virtualenv..." 138 | # virtualenv --python $python --distribute --never-download --prompt='(venv) ' $venv > /dev/null 2>&1 139 | # . $venv/bin/activate > /dev/null 2>&1 140 | # echo " done" 141 | # 142 | # echo -n " Installing Mercurial..." 143 | # pip install mercurial > /dev/null 2>&1 144 | # echo " done" 145 | # 146 | # echo -n " Installing Bazaar..." 147 | # pip install bzr > /dev/null 2>&1 148 | # echo " done" 149 | # fi 150 | heroku_buildpack_bin='https://github.com/kr/heroku-buildpack-go/raw/master/linux-amd64/bin' 151 | target="${HOME}/bin" 152 | if [ -f $build/Godeps ] || [ -d $build/Godeps ]; then 153 | if ! [ -f ${target}/godep ]; then 154 | echo "-----> Installing godep to ${target}..." 155 | if ! [ -f ${cache}/godep ]; then 156 | curl -L -s ${heroku_buildpack_bin}/godep > ${cache}/godep 157 | fi 158 | cp ${cache}/godep ${target}/godep 159 | chmod +x ${target}/godep 160 | echo ' done' 161 | else 162 | echo "-----> Using ${target}/godep" 163 | fi 164 | fi 165 | PATH=$target:$PATH 166 | 167 | p=$GOPATH/src/$name 168 | # mkdir -p $p 169 | # cp -R $build/* $p 170 | 171 | # Default to $SOURCE_VERSION environment variable 172 | GO_LINKER_VALUE=${SOURCE_VERSION} 173 | 174 | # allow apps to specify cgo flags and set up /app symlink so things like CGO_CFLAGS=-I/app/... work 175 | # env_dir="$3" 176 | # if [ -d "$env_dir" ] 177 | # then 178 | # ln -sfn $build /app/code 179 | # for key in CGO_CFLAGS CGO_CPPFLAGS CGO_CXXFLAGS CGO_LDFLAGS GO_LINKER_SYMBOL GO_LINKER_VALUE 180 | # do 181 | # if [ -f "$env_dir/$key" ] 182 | # then 183 | # export "$key=$(cat "$env_dir/$key")" 184 | # fi 185 | # done 186 | # fi 187 | 188 | if [ -d $GOPATH ]; then 189 | sudo chown developer:developer $GOPATH 190 | if [ -d $GOPATH/src ]; then 191 | sudo chown developer:developer $GOPATH/src 192 | for dir in $GOPATH/src/*; do 193 | sudo chown developer:developer $dir 194 | done 195 | for dir in $GOPATH/src/**/*; do 196 | sudo chown developer:developer $dir 197 | done 198 | fi 199 | else 200 | mkdir -p $GOBIN 201 | mkdir -p $GOPATH 202 | fi 203 | if ! [ -f ${HOME}/.profile.d/go.sh ]; then 204 | echo "-----> Configuring ${ver}..." 205 | echo "export GOPATH=$GOPATH" > ${HOME}/.profile.d/go.sh 206 | echo "export GOROOT=$GOROOT" >> ${HOME}/.profile.d/go.sh 207 | echo "export GOBIN=$GOBIN" >> ${HOME}/.profile.d/go.sh 208 | echo 'export PATH=$PATH:$GOROOT/bin' >> ${HOME}/.profile.d/go.sh 209 | echo 'export PATH=$PATH:$GOBIN' >> ${HOME}/.profile.d/go.sh 210 | echo 'done' 211 | fi 212 | if [ -z "${name}" ] && [ -n "${GO_PROJECT_NAME}" ]; then 213 | name=$GO_PROJECT_NAME 214 | fi 215 | if [ -z "${name}" ]; then 216 | echo -e "-----> Unable to identify project name\nPlease add the full import path to \`.godir\` or set the GO_PROJECT_NAME environmental variable" 217 | p=$build 218 | else 219 | p=$GOPATH/src/$name 220 | if ! [ -d $GOPATH/src/$name ]; then 221 | mkdir -p $(dirname $p) 222 | if [ -L $p ] && ! [ -e $p ]; then 223 | unlink $p 224 | fi 225 | if ! [ -L $p ]; then 226 | ln -s $build $p 227 | fi 228 | fi 229 | fi 230 | 231 | 232 | # If $GO_LINKER_SYMBOL and GO_LINKER_VALUE are set, tell the linker to DTRT 233 | # FLAGS=(-tags heroku) 234 | # if [ -n "$GO_LINKER_SYMBOL" -a -n "$GO_LINKER_VALUE" ] 235 | # then 236 | # FLAGS=(${FLAGS[@]} -ldflags "-X $GO_LINKER_SYMBOL $GO_LINKER_VALUE") 237 | # fi 238 | 239 | unset GIT_DIR # unset git dir or it will mess with goinstall 240 | cd $p 241 | if test -e $build/Godeps 242 | then 243 | FLAGS=(-v) 244 | echo "-----> Running: godep go install ${FLAGS[@]} ./..." 245 | godep go install "${FLAGS[@]}" ./... 246 | else 247 | FLAGS=(-d -v) 248 | echo "-----> Running: go get ${FLAGS[@]} ./..." 249 | go get "${FLAGS[@]}" ./... 250 | fi 251 | 252 | # rm -rf $build/.heroku 253 | 254 | # mkdir -p $build/.profile.d 255 | # echo 'PATH=$PATH:$HOME/bin' > $build/.profile.d/go.sh 256 | -------------------------------------------------------------------------------- /stack/buildpacks/golang/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: bin/detect 3 | 4 | # From https://github.com/kr/heroku-buildpack-go/blob/master/bin/detect 5 | 6 | set -eo pipefail 7 | 8 | if test -n "$(find "$1" -type f -name '*.go' | sed 1q)"; then 9 | echo Go 10 | else 11 | echo no 12 | exit 1 13 | fi 14 | -------------------------------------------------------------------------------- /stack/buildpacks/inline/bin/compile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: bin/compile 3 | 4 | set -e 5 | 6 | if [ -f $1/devstep.yml ]; then 7 | provision_steps=$(ruby -ryaml -e "puts (YAML.load(File.read('$1/devstep.yml'))['provision'] || []).size") 8 | if [[ $provision_steps -gt 0 ]]; then 9 | bp_bin="$( cd -P "$( dirname "$0" )" && pwd )" 10 | ${bp_bin}/provision "${1}/devstep.yml" 11 | elif [ -x $1/bin/compile ]; then 12 | exec "$1"/bin/compile "$@" 13 | fi 14 | elif [ -x $1/bin/compile ]; then 15 | exec "$1"/bin/compile "$@" 16 | fi 17 | -------------------------------------------------------------------------------- /stack/buildpacks/inline/bin/detect: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Usage: bin/detect 3 | 4 | if [ -f $1/devstep.yml ]; then 5 | provision_steps=$(ruby -ryaml -e "puts (YAML.load(File.read('$1/devstep.yml'))['provision'] || []).size") 6 | echo $provision_steps 7 | if [[ $provision_steps -gt 0 ]]; then 8 | echo 'devstep' && exit 0 9 | fi 10 | fi 11 | 12 | if [ -x $1/bin/compile ]; then 13 | echo 'inline' && exit 0 14 | fi 15 | 16 | exit 1 17 | -------------------------------------------------------------------------------- /stack/buildpacks/inline/bin/provision: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'yaml' 4 | 5 | commands = YAML.load(File.read(ARGV[0]))['provision'] 6 | 7 | commands.each do |cmd| 8 | if !(system *cmd) 9 | exit $?.to_i 10 | end 11 | end 12 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # bin/compile 3 | 4 | # set -x 5 | 6 | ### Configure environment 7 | 8 | set -o errexit # always exit on error 9 | set -o pipefail # dont ignore exit codes when piping output 10 | set -o nounset # fail on unset variables 11 | unset GIT_DIR # Avoid GIT_DIR leak from previous build steps 12 | 13 | ### Configure directories 14 | 15 | BUILD_DIR=${1:-} 16 | # CACHE_DIR=${2:-} 17 | CACHE_DIR=${2:-}/node 18 | ENV_DIR=${3:-} 19 | # BP_DIR=$(cd $(dirname ${0:-}); cd ..; pwd) 20 | bp_basedir="$( cd -P "$( dirname "$0" )" && cd .. && pwd )" 21 | bp_basedir=$(readlink -f $bp_basedir) 22 | BP_DIR="${bp_basedir}/heroku-buildpack-nodejs" 23 | BP_DIR=$(readlink -f $BP_DIR) 24 | 25 | mkdir -p $CACHE_DIR 26 | 27 | # mkdir -p "$BUILD_DIR/.heroku/node/" 28 | cd $BUILD_DIR 29 | # export PATH="$BUILD_DIR/.heroku/node/bin":$PATH 30 | 31 | ### Load dependencies 32 | 33 | source $BP_DIR/lib/output.sh 34 | source $BP_DIR/lib/json.sh 35 | source $BP_DIR/lib/failure.sh 36 | # source $BP_DIR/lib/environment.sh 37 | source $bp_basedir/lib/environment.sh 38 | # source $BP_DIR/lib/binaries.sh 39 | source $bp_basedir/lib/binaries.sh 40 | # source $BP_DIR/lib/cache.sh 41 | # source $BP_DIR/lib/dependencies.sh 42 | source $bp_basedir/lib/dependencies.sh 43 | 44 | ### Handle errors 45 | 46 | handle_failure() { 47 | header "Build failed" 48 | failure_message | indent 49 | } 50 | trap 'handle_failure' ERR 51 | 52 | ### Check initial state 53 | 54 | [ -e "$BUILD_DIR/node_modules" ] && PREBUILD=true || PREBUILD=false 55 | 56 | ### Failures that should be caught immediately 57 | 58 | if [[ -f $BUILD_DIR/package.json ]]; then 59 | fail_invalid_package_json "$BUILD_DIR" 60 | fi 61 | # warn_prebuilt_modules "$BUILD_DIR" 62 | warn_missing_package_json "$BUILD_DIR" 63 | 64 | ### Compile 65 | 66 | create_env() { 67 | # write_profile "$BP_DIR" "$BUILD_DIR" 68 | # write_profile "$BP_DIR" "$HOME" 69 | write_export "$BP_DIR" "$BUILD_DIR" 70 | # export_env_dir "$ENV_DIR" 71 | create_default_env 72 | } 73 | 74 | header "Creating runtime environment" 75 | create_env # can't indent the whole thing because piping causes subshells; no exporting possible 76 | list_node_config | indent 77 | 78 | install_bins() { 79 | local node_engine=$(read_json "$BUILD_DIR/package.json" ".engines.node") 80 | local iojs_engine=$(read_json "$BUILD_DIR/package.json" ".engines.iojs") 81 | local npm_engine=$(read_json "$BUILD_DIR/package.json" ".engines.npm") 82 | 83 | if [ -n "$iojs_engine" ]; then 84 | echo "engines.iojs (package.json): $iojs_engine (iojs)" 85 | else 86 | echo "engines.node (package.json): ${node_engine:-unspecified}" 87 | fi 88 | echo "engines.npm (package.json): ${npm_engine:-unspecified (use default)}" 89 | echo "" 90 | 91 | if [ -n "$iojs_engine" ]; then 92 | warn_node_engine "$iojs_engine" 93 | # install_iojs "$iojs_engine" "$BUILD_DIR/.heroku/node" 94 | install_iojs "$iojs_engine" "$HOME/node" "$CACHE_DIR" 95 | else 96 | warn_node_engine "$node_engine" 97 | # install_nodejs "$node_engine" "$BUILD_DIR/.heroku/node" 98 | install_nodejs "$node_engine" "$HOME/node" "$CACHE_DIR" 99 | fi 100 | # install_npm "$npm_engine" "$BUILD_DIR/.heroku/node" 101 | for f in $HOME/.profile.d/*.sh; do 102 | source $f 103 | done 104 | install_npm "$npm_engine" "$HOME/node" "$CACHE_DIR" 105 | warn_old_npm 106 | } 107 | 108 | if [[ -d "$HOME/node" ]]; then 109 | header "Node already installed, skipping installation" 110 | else 111 | header "Installing binaries" 112 | install_bins | indent 113 | fi 114 | 115 | # restore_cache() { 116 | # local cache_status=$(get_cache_status) 117 | # 118 | # if [ "$cache_status" == "disabled" ]; then 119 | # echo "Skipping (cache disabled)" 120 | # elif [ "$cache_status" == "invalidated" ]; then 121 | # echo "Skipping (cache invalidated)" 122 | # else 123 | # local cache_directories=$(get_cache_directories) 124 | # if [ "$cache_directories" == "" ]; then 125 | # echo "Loading 1 from cacheDirectories (default):" 126 | # restore_cache_directories "$BUILD_DIR" "$CACHE_DIR" "node_modules" 127 | # else 128 | # echo "Loading $(echo $cache_directories | wc -w | xargs) from cacheDirectories (package.json):" 129 | # restore_cache_directories "$BUILD_DIR" "$CACHE_DIR" $cache_directories 130 | # fi 131 | # fi 132 | # } 133 | # 134 | # header "Restoring cache" 135 | # restore_cache | indent 136 | 137 | for f in $HOME/.profile.d/*.sh; do 138 | source $f 139 | done 140 | # Cache npm packages on host machine 141 | npm config set cache $CACHE_DIR/npm 142 | 143 | build_dependencies() { 144 | if $PREBUILD; then 145 | echo "Prebuild detected (node_modules already exists)" 146 | rebuild_node_modules "$BUILD_DIR" 147 | else 148 | install_node_modules "$BUILD_DIR" 149 | fi 150 | } 151 | 152 | if [[ ! -L $BUILD_DIR/node_modules ]] && [[ -d $BUILD_DIR/node_modules ]]; then 153 | echo "${BUILD_DIR}/node_modules cant be a directory" 154 | exit 1 155 | fi 156 | 157 | if [[ -L $BUILD_DIR/node_modules ]] && [[ ! -e $BUILD_DIR/node_modules ]]; then 158 | unlink $BUILD_DIR/node_modules 159 | fi 160 | 161 | if [[ ! -L $BUILD_DIR/node_modules ]]; then 162 | mkdir -p $HOME/node_modules/npm 163 | ln -s $HOME/node_modules $BUILD_DIR/node_modules 164 | cat <<-STR > $DEVSTEP_CONF/init.d/15-node_modules-symlink.sh 165 | #!/bin/bash 166 | if [[ ! -L $BUILD_DIR/node_modules ]] && [[ -d $BUILD_DIR/node_modules ]]; then 167 | echo "WARNING: Your ${BUILD_DIR}/node_modules is a directory" 168 | fi 169 | if [[ ! -L $BUILD_DIR/node_modules ]]; then 170 | mkdir -p $HOME/node_modules/npm 171 | ln -s $HOME/node_modules $BUILD_DIR/node_modules 172 | fi 173 | STR 174 | chmod +x $DEVSTEP_CONF/init.d/15-node_modules-symlink.sh 175 | fi 176 | 177 | header "Building dependencies" 178 | build_dependencies | indent 179 | 180 | # cache_build() { 181 | # local cache_directories=$(get_cache_directories) 182 | # echo "Clearing previous node cache" 183 | # clear_cache 184 | # if [ "$cache_directories" == "" ]; then 185 | # echo "Saving 1 cacheDirectories (default):" 186 | # save_cache_directories "$BUILD_DIR" "$CACHE_DIR" "node_modules" 187 | # else 188 | # echo "Saving $(echo $cache_directories | wc -w | xargs) cacheDirectories (package.json):" 189 | # save_cache_directories "$BUILD_DIR" "$CACHE_DIR" $cache_directories 190 | # fi 191 | # } 192 | 193 | # header "Caching build" 194 | # cache_build | indent 195 | 196 | summarize_build() { 197 | cd $BUILD_DIR 198 | (npm ls --depth=0 | tail -n +2 || true) 2>/dev/null 199 | } 200 | 201 | header "Build succeeded!" 202 | summarize_build | indent 203 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # bin/detect 3 | 4 | if [ -f $1/package.json ]; then 5 | echo "Node.js" && exit 0 6 | else 7 | echo "no" && exit 1 8 | fi 9 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/bin/install-dependencies: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo -n ' Installing dependencies for nodejs buildpack... ' 6 | 7 | url='https://github.com/heroku/heroku-buildpack-nodejs/archive/7e0ef6a885de7a8a4534c9879891db692e063955.tar.gz' 8 | mkdir -p /opt/devstep/buildpacks/nodejs/heroku-buildpack-nodejs 9 | curl -sL $url | tar xz --strip-components=1 -C /opt/devstep/buildpacks/nodejs/heroku-buildpack-nodejs 10 | 11 | echo 'DONE' 12 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/lib/binaries.sh: -------------------------------------------------------------------------------- 1 | needs_resolution() { 2 | local semver=$1 3 | if ! [[ "$semver" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then 4 | return 0 5 | else 6 | return 1 7 | fi 8 | } 9 | 10 | install_nodejs() { 11 | local version="$1" 12 | local dir="$2" 13 | local cache_dir="$3" 14 | 15 | if needs_resolution "$version"; then 16 | echo "Resolving node version ${version:-(latest stable)} via semver.io..." 17 | # local version=$(curl --silent --get --data-urlencode "range=${version}" https://semver.herokuapp.com/node/resolve) 18 | local version=$(curl --silent --get --data-urlencode "range=${version}" https://semver.herokuapp.com/node/resolve) 19 | fi 20 | 21 | local tarball="node-v$version-$os-$cpu.tar.gz" 22 | # local download_url="http://s3pository.heroku.com/node/v$version/node-v$version-$os-$cpu.tar.gz" 23 | local download_url="http://s3pository.heroku.com/node/v$version/$tarball" 24 | # curl "$download_url" -s -o - | tar xzf - -C /tmp 25 | 26 | local tarball_path="$cache_dir/$tarball" 27 | if [ -f $tarball_path ]; then 28 | status "Using cached node $version tarball" 29 | else 30 | echo "Downloading and installing node $version..." 31 | curl -L $download_url > $tarball_path 32 | fi 33 | mkdir -p $dir 34 | tar xzf $tarball_path -C $dir --strip-components=1 35 | 36 | # mv /tmp/node-v$version-$os-$cpu/* $dir 37 | chmod +x $dir/bin/* 38 | } 39 | 40 | install_iojs() { 41 | local version="$1" 42 | local dir="$2" 43 | local cache_dir="$3" 44 | 45 | if needs_resolution "$version"; then 46 | echo "Resolving iojs version ${version:-(latest stable)} via semver.io..." 47 | version=$(curl --silent --get --data-urlencode "range=${version}" https://semver.herokuapp.com/iojs/resolve) 48 | fi 49 | 50 | echo "Downloading and installing iojs $version..." 51 | local tarball="iojs-v$version-$os-$cpu.tar.gz" 52 | # local download_url="https://iojs.org/dist/v$version/iojs-v$version-$os-$cpu.tar.gz" 53 | local download_url="https://iojs.org/dist/v$version/$tarball" 54 | # curl $download_url -s -o - | tar xzf - -C /tmp 55 | 56 | local tarball_path="$cache_dir/$tarball" 57 | if [ -f $tarball_path ]; then 58 | status "Using cached iojs $version tarball" 59 | else 60 | curl -L $download_url > $tarball_path 61 | fi 62 | mkdir -p $dir 63 | tar xzf $tarball_path -C $dir --strip-components=1 64 | 65 | # mv /tmp/iojs-v$version-$os-$cpu/* $dir 66 | chmod +x $dir/bin/* 67 | } 68 | 69 | install_npm() { 70 | local version="$1" 71 | local cache_dir="$3" 72 | 73 | if [ "$version" == "" ]; then 74 | echo "Using default npm version: `npm --version`" 75 | else 76 | if needs_resolution "$version"; then 77 | echo "Resolving npm version ${version} via semver.io..." 78 | version=$(curl --silent --get --data-urlencode "range=${version}" https://semver.herokuapp.com/npm/resolve) 79 | fi 80 | if [[ `npm --version` == "$version" ]]; then 81 | echo "npm `npm --version` already installed with node" 82 | else 83 | echo "Downloading and installing npm $version (replacing version `npm --version`)..." 84 | npm install --unsafe-perm --quiet -g npm@$version 2>&1 >/dev/null 85 | fi 86 | fi 87 | } 88 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/lib/dependencies.sh: -------------------------------------------------------------------------------- 1 | install_node_modules() { 2 | local build_dir=${1:-} 3 | 4 | if [ -e $build_dir/package.json ]; then 5 | cd $build_dir 6 | echo "Pruning any extraneous modules" 7 | # npm prune --unsafe-perm --userconfig $build_dir/.npmrc 2>&1 8 | npm prune --unsafe-perm --userconfig $HOME/.npmrc 2>&1 9 | if [ -e $build_dir/npm-shrinkwrap.json ]; then 10 | echo "Installing node modules (package.json + shrinkwrap)" 11 | else 12 | echo "Installing node modules (package.json)" 13 | fi 14 | # npm install --unsafe-perm --userconfig $build_dir/.npmrc 2>&1 15 | npm install --unsafe-perm --userconfig $HOME/.npmrc 2>&1 16 | else 17 | echo "Skipping (no package.json)" 18 | fi 19 | } 20 | 21 | rebuild_node_modules() { 22 | local build_dir=${1:-} 23 | 24 | if [ -e $build_dir/package.json ]; then 25 | cd $build_dir 26 | # echo "Rebuilding any native modules" 27 | # npm rebuild 2>&1 28 | if [ -e $build_dir/npm-shrinkwrap.json ]; then 29 | echo "Installing any new modules (package.json + shrinkwrap)" 30 | else 31 | echo "Installing any new modules (package.json)" 32 | fi 33 | # npm install --unsafe-perm --userconfig $build_dir/.npmrc 2>&1 34 | npm install --unsafe-perm --userconfig $HOME/.npmrc 2>&1 35 | else 36 | echo "Skipping (no package.json)" 37 | fi 38 | } 39 | -------------------------------------------------------------------------------- /stack/buildpacks/nodejs/lib/environment.sh: -------------------------------------------------------------------------------- 1 | create_default_env() { 2 | # export NPM_CONFIG_PRODUCTION=${NPM_CONFIG_PRODUCTION:-true} 3 | # export NPM_CONFIG_LOGLEVEL=${NPM_CONFIG_LOGLEVEL:-error} 4 | export NODE_MODULES_CACHE=${NODE_MODULES_CACHE:-true} 5 | } 6 | 7 | list_node_config() { 8 | echo "" 9 | printenv | grep ^NPM_CONFIG_ || true 10 | printenv | grep ^NODE_ || true 11 | } 12 | 13 | export_env_dir() { 14 | local env_dir=$1 15 | if [ -d "$env_dir" ]; then 16 | local whitelist_regex=${2:-''} 17 | local blacklist_regex=${3:-'^(PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH)$'} 18 | if [ -d "$env_dir" ]; then 19 | for e in $(ls $env_dir); do 20 | echo "$e" | grep -E "$whitelist_regex" | grep -qvE "$blacklist_regex" && 21 | export "$e=$(cat $env_dir/$e)" 22 | : 23 | done 24 | fi 25 | fi 26 | } 27 | 28 | write_profile() { 29 | local bp_dir="$1" 30 | local build_dir="$2" 31 | mkdir -p $build_dir/.profile.d 32 | cp $bp_dir/profile/* $build_dir/.profile.d/ 33 | } 34 | 35 | write_export() { 36 | local bp_dir="$1" 37 | local build_dir="$2" 38 | # echo "export PATH=\"$build_dir/.heroku/node/bin:$build_dir/node_modules/.bin:\$PATH\"" > $bp_dir/export 39 | # echo "export NODE_HOME=\"$build_dir/.heroku/node\"" >> $bp_dir/export 40 | echo "export PATH=\"\$HOME/node/bin:$build_dir/node_modules/.bin:\$PATH\"" > $HOME/.profile.d/nodejs-paths.sh 41 | echo "export NODE_HOME=\"\$HOME/node\"" >> $HOME/.profile.d/nodejs-paths.sh 42 | } 43 | -------------------------------------------------------------------------------- /stack/buildpacks/phantomjs/bin/compile: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # BUILD_DIR=$1 6 | CACHE_DIR=$2/phantomjs 7 | 8 | # config 9 | PHANTOMJS_VERSION=${PHANTOMJS_VERSION:-"1.9.7"} 10 | 11 | # Buildpack URL 12 | ARCHIVE_NAME=phantomjs-${PHANTOMJS_VERSION}-linux-x86_64 13 | FILE_NAME=${ARCHIVE_NAME}.tar.bz2 14 | BUILDPACK_PHANTOMJS_PACKAGE=https://bitbucket.org/ariya/phantomjs/downloads/${FILE_NAME} 15 | 16 | if [ -x ${HOME}/bin/phantomjs ]; then 17 | echo "-----> PhantomJS already installed" 18 | exit 0 19 | fi 20 | 21 | mkdir -p $CACHE_DIR 22 | if ! [ -e $CACHE_DIR/$FILE_NAME ]; then 23 | echo "-----> Fetching PhantomJS ${PHANTOMJS_VERSION} binaries at ${BUILDPACK_PHANTOMJS_PACKAGE}" 24 | curl $BUILDPACK_PHANTOMJS_PACKAGE -L -s > $CACHE_DIR/$FILE_NAME 25 | fi 26 | 27 | echo "-----> Extracting PhantomJS ${PHANTOMJS_VERSION} binaries to ${HOME}/phantomjs" 28 | mkdir -p $CACHE_DIR/$ARCHIVE_NAME 29 | tar jxf $CACHE_DIR/$FILE_NAME -C $CACHE_DIR 30 | mv $CACHE_DIR/$ARCHIVE_NAME ${HOME}/phantomjs 31 | ln -s ${HOME}/phantomjs/bin/phantomjs ${HOME}/bin/phantomjs 32 | -------------------------------------------------------------------------------- /stack/buildpacks/phantomjs/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # usage: bin/detect 3 | 4 | # For now this buildpack needs to be manually installed 5 | exit 1 6 | -------------------------------------------------------------------------------- /stack/buildpacks/php/bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # bin/compile 4 | 5 | install_ext () { 6 | local ext=$1 7 | local reason=${2:-} 8 | local ext_ini="$BP_DIR/conf/php/conf.d/ext-$ext.ini" 9 | local ext_so= 10 | local ext_dir=$(basename $(php-config --extension-dir)) 11 | if [[ -f "$ext_ini" ]]; then 12 | ext_so=$(php -r '$ini=parse_ini_file("'$ext_ini'"); echo $ext=$ini["zend_extension"]?:$ini["extension"]; exit((int)empty($ext));') # read .so name from .ini because e.g. opcache.so is named "zend-opcache" 13 | if [[ ! -f "$PHP_EXT_DIR/$ext_so" ]]; then 14 | # curl --silent --location "${S3_URL}/extensions/${ext_dir}/${ext}.tar.gz" | tar xz -C $BUILD_DIR/.heroku/php 15 | curl --silent --location "${S3_URL}/extensions/${ext_dir}/${ext}.tar.gz" | tar xz -C $PHP_DIR 16 | echo "- ${ext} (${reason}; downloaded)" | indent 17 | else 18 | echo "- ${ext} (${reason}; bundled)" | indent 19 | fi 20 | cp "${ext_ini}" "${BUILD_DIR}/.heroku/php/etc/php/conf.d" 21 | elif [[ -f "${PHP_EXT_DIR}/${ext}.so" ]]; then 22 | # echo "extension = ${ext}.so" > "${BUILD_DIR}/.heroku/php/etc/php/conf.d/ext-${ext}.ini" 23 | echo "extension = ${ext}.so" > "${PHP_DIR}/etc/php/conf.d/ext-${ext}.ini" 24 | echo "- ${ext} (${reason}; bundled)" | indent 25 | elif echo -n ${ext} | php -r 'exit((int)!extension_loaded(file_get_contents("php://stdin")));'; then 26 | : # echo "- ${ext} (${reason}; enabled by default)" | indent 27 | else 28 | warning_inline "Unknown extension ${ext} (${reason}), install may fail!" 29 | fi 30 | } 31 | 32 | # fail hard 33 | set -o pipefail 34 | # fail harder 35 | set -eu 36 | # move hidden files too, just in case 37 | shopt -s dotglob 38 | 39 | STACK=${STACK:-'cedar-14'} # Anvil has none 40 | BUILD_DIR=$1 41 | CACHE_DIR=$2/php 42 | mkdir -p "$CACHE_DIR" 43 | # ENV_DIR=${3:-} # Anvil has none 44 | # BP_DIR=$(cd $(dirname $0); cd ..; pwd) 45 | bp_basedir="$( cd -P "$( dirname "$0" )" && cd .. && pwd )" 46 | bp_basedir=$(readlink -f $bp_basedir) 47 | BP_DIR="${bp_basedir}/heroku-buildpack-php" 48 | BP_DIR=$(readlink -f $BP_DIR) 49 | 50 | # convenience functions 51 | source $BP_DIR/bin/util/common.sh 52 | 53 | # for extensions that need special treatment 54 | source $BP_DIR/bin/util/newrelic.sh 55 | source $BP_DIR/bin/util/blackfire.sh 56 | 57 | # if this is set it prevents Git clones (e.g. for Composer installs from source) during the build in some circumstances, and it is set in SSH Git deploys to Heroku 58 | unset GIT_DIR 59 | 60 | # export_env_dir "$ENV_DIR" '^[A-Z_][A-Z0-9_]*$' '^(HOME|PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH|LD_LIBRARY_PATH|STACK|S3_URL|RTVS_URL|REQUEST_ID)$' 61 | 62 | # BUILDPACK_URL=${BUILDPACK_URL:-} # Anvil has none 63 | # BUILDPACK_BRANCH=$(expr "$BUILDPACK_URL" : '^.*/heroku-buildpack-php#\(..*\)$' || expr "$BUILDPACK_URL" : '^.*/heroku-buildpack-php\.git#\(..*\)$' || true) 64 | # BUILDPACK_BRANCH=${BUILDPACK_BRANCH:-master} 65 | # BUILDPACK_OWNER=$(expr "$BUILDPACK_URL" : '^.*/\(..*\)/heroku-buildpack-php' || true) 66 | 67 | # if [[ "$BUILDPACK_BRANCH" != v* && "$BUILDPACK_BRANCH" != "master" ]]; then 68 | # S3_URL="develop" 69 | # fi 70 | S3_URL="master" 71 | S3_URL="https://lang-php.s3.amazonaws.com/dist-${STACK}-${S3_URL:-"master"}" 72 | RTVS_URL="https://php-runtime-version-selector.herokuapp.com" 73 | 74 | cd $BUILD_DIR 75 | 76 | if [[ -s "composer.json" ]]; then 77 | cat composer.json | python -mjson.tool &> /dev/null || error "Could not parse composer.json; make sure it's valid!" 78 | else 79 | if [[ ! -f "composer.json" ]]; then 80 | warning 'No composer.json found. 81 | Using index.php to declare PHP applications is considered legacy 82 | functionality and may lead to unexpected behavior.' 83 | else 84 | notice 'Your composer.json is completely empty. 85 | Please change its contents to at least "{}" so it is valid JSON.' 86 | fi 87 | echo "{}" > composer.json 88 | fi 89 | 90 | # mkdir -p .heroku/php 91 | PHP_DIR="${HOME}/php" 92 | mkdir -p "${PHP_DIR}" 93 | 94 | # export COMPOSER_HOME=$CACHE_DIR/.composer 95 | export COMPOSER_HOME=$HOME/.composer 96 | mkdir -p $COMPOSER_HOME 97 | mkdir -p $CACHE_DIR/composer 98 | # Check if composer cache link is broken and fix it 99 | if [ -L $COMPOSER_HOME/cache ] && ! [ -e $COMPOSER_HOME/cache ]; then 100 | unlink $COMPOSER_HOME/cache 101 | fi 102 | if ! [ -L $COMPOSER_HOME/cache ]; then 103 | ln -s $CACHE_DIR/composer $COMPOSER_HOME/cache 104 | fi 105 | 106 | if [[ ! -L $BUILD_DIR/vendor ]] && [[ -d $BUILD_DIR/vendor ]]; then 107 | echo "${BUILD_DIR}/vendor cant be a directory" 108 | exit 1 109 | fi 110 | 111 | if [[ -L $BUILD_DIR/vendor ]] && [[ ! -e $BUILD_DIR/vendor ]]; then 112 | unlink $BUILD_DIR/vendor 113 | fi 114 | 115 | if [[ ! -L $BUILD_DIR/vendor ]]; then 116 | mkdir -p $HOME/composer-vendor 117 | ln -s $HOME/composer-vendor $BUILD_DIR/vendor 118 | cat <<-STR > $DEVSTEP_CONF/init.d/15-composer-vendor-symlink.sh 119 | #!/bin/bash 120 | if [[ ! -L $BUILD_DIR/vendor ]] && [[ -d $BUILD_DIR/node_modules ]]; then 121 | echo "WARNING: Your ${BUILD_DIR}/vendor is a directory" 122 | fi 123 | if [[ ! -L $BUILD_DIR/vendor ]]; then 124 | mkdir -p $HOME/composer-vendor 125 | ln -s $HOME/composer-vendor $BUILD_DIR/vendor 126 | fi 127 | STR 128 | chmod +x $DEVSTEP_CONF/init.d/15-composer-vendor-symlink.sh 129 | fi 130 | 131 | PHP_VERSION= 132 | HHVM_VERSION= 133 | if [[ "$STACK" == "cedar" ]]; then 134 | runtimes=$(cd $BP_DIR/support/build; ls {php-*,hhvm-3.2.0}) # cedar only has HHVM 3.2 135 | else 136 | runtimes=$(cd $BP_DIR/support/build; ls {php,hhvm}-*) 137 | fi 138 | engine="php" 139 | engine_r="php -r" 140 | if [[ -f "composer.json" ]]; then 141 | # if [[ ! -f "composer.lock" ]]; then 142 | # cat composer.json | python -c 'import sys, json; print sys.exit(any(key.count("/") for key in json.load(sys.stdin).get("require", {})))' 2> /dev/null || error "Your composer.json has dependencies, but no composer.lock 143 | # was found, check it into your repository alongside composer.json!" 144 | # fi 145 | 146 | solver_payload=$(echo -n "$runtimes" | python -c ' 147 | import sys, json 148 | ret = {} 149 | ret["packages"] = sys.stdin.read().splitlines() 150 | c_json = dict(json.load(open("composer.json"))) # wrap in dict() to prevent simple errors with {} vs [] 151 | ret["json"] = {k: c_json.get(k, None) for k in ["minimum-stability", "prefer-stable"]} 152 | ret["json"]["platform"] = {k: dict(c_json.get("require", {})).get(k, None) for k in ["php", "hhvm"]} # wrap in dict() to prevent simple errors with {} vs [] 153 | try: 154 | c_lock = json.load(open("composer.lock")) 155 | ret["lock"] = {k: c_lock.get(k, None) for k in ["minimum-stability", "prefer-stable", "stability-flags"]} 156 | ret["lock"]["platform"] = {k: dict(c_lock.get("platform", {})).get(k, None) for k in ["php", "hhvm"]} # wrap in dict() to prevent simple errors with {} vs [] 157 | except IOError: 158 | ret["lock"] = None 159 | print json.dumps(ret) 160 | ' 2> /dev/null) || error "Couldn't parse composer.lock; it must be valid JSON. 161 | Run 'composer update' to have it re-generated by Composer." 162 | solver_result=$(echo -n "$solver_payload" | curl --silent --location --fail -H'Content-Type: application/json' -H"X-Build-Request-Id: ${REQUEST_ID:-}" -d @- "${RTVS_URL}/resolve?default_runtime=php") || error "Failed to resolve runtime requirements. 163 | If the issue persists, please contact support." 164 | while IFS="|" read runtime_name runtime_source runtime_constraint runtime_result; do 165 | if [[ $runtime_result ]]; then 166 | if [[ $runtime_source == "default" ]]; then 167 | status "No runtime required in composer.json, defaulting to ${runtime_name^^} $runtime_result." 168 | else 169 | status "Resolved $runtime_source requirement for ${runtime_name^^} to version ${runtime_result}." 170 | if [[ "$runtime_constraint" == "$runtime_result" || "$runtime_constraint" == "=$runtime_result" ]]; then 171 | recommended_selector="~$runtime_result" 172 | if [[ $runtime_name == "hhvm" ]]; then 173 | recommended_selector="${recommended_selector%.*}" # strip "3.2.0" to just "3.2"; HHVM has different versioning 174 | fi 175 | warning "Your composer.json requirement for runtime '$runtime_name' 176 | specifies an exact-match constraint of '$runtime_constraint'. 177 | This means you will not automatically get more recent compatible 178 | versions of ${runtime_name^^} on push once they are available. As any such 179 | new version may contain security updates, we strongly suggest 180 | you use a more lenient selector like '$recommended_selector' instead. 181 | See https://devcenter.heroku.com/articles/php-support for info." 182 | fi 183 | fi 184 | if [[ $runtime_name == "php" ]]; then 185 | PHP_VERSION="$runtime_result" 186 | elif [[ $runtime_name == "hhvm" ]]; then 187 | HHVM_VERSION="$runtime_result" 188 | engine="hhvm" 189 | engine_r="hhvm --php -r" 190 | if [[ "$STACK" == "cedar" ]]; then 191 | warning_inline "Support for HHVM on the cedar stack is deprecated and 192 | will be removed soon. Please upgrade to cedar-14 to continue to 193 | use HHVM and get access to new releases. For more information, 194 | see https://devcenter.heroku.com/articles/cedar-14-migration" 195 | fi 196 | # if [[ -f "composer.lock" ]] && python -c 'import sys, json; print sys.exit(any(key.count("hhvm") for key in json.load(open("composer.lock")).get("platform", {})))' 2> /dev/null; then 197 | # warning_inline "Support for requiring HHVM in 'composer.json' but not in 198 | # 'composer.lock' is deprecated and will be removed soon. Please 199 | # update 'composer.lock' to contain the requirement. If you do not 200 | # have HHVM installed, you can re-generate the lock file using the 201 | # 'composer update --ignore-platform-reqs' command; the same switch 202 | # may be used with 'composer install' under the same circumstances." 203 | # fi 204 | fi 205 | else 206 | error "Could not resolve $runtime_source requirement for ${runtime_name^^} $runtime_constraint, 207 | please adjust the selector. The following runtimes are available: 208 | $(echo $runtimes | fold -s -w 65)" 209 | fi 210 | done <<<"$solver_result" 211 | fi 212 | 213 | status "Installing system packages..." 214 | 215 | # we need to run things in here, set it up! 216 | # ln -s $BUILD_DIR/.heroku /app/.heroku 217 | # export PATH=/app/.heroku/php/bin:$PATH 218 | export PATH=$PHP_DIR/bin:$PATH 219 | 220 | if [[ "${PHP_VERSION}" ]]; then 221 | # PHP_DIST_URL="$S3_URL/php-$PHP_VERSION.tar.gz" 222 | TARBALL="php-${PHP_VERSION}.tar.gz" 223 | PHP_DIST_URL="$S3_URL/$TARBALL" 224 | echo "- PHP $PHP_VERSION" | indent 225 | # curl --silent --location "$PHP_DIST_URL" | tar xz -C $BUILD_DIR/.heroku/php 226 | if ! [ -f $CACHE_DIR/$TARBALL ]; then 227 | curl --silent --location "$PHP_DIST_URL" > $CACHE_DIR/$TARBALL 228 | fi 229 | tar xzf "${CACHE_DIR}/${TARBALL}" -C $PHP_DIR 230 | PHP_EXT_DIR=$(php-config --extension-dir) 231 | 232 | # update config files 233 | # mkdir -p $BUILD_DIR/.heroku/php/etc/php 234 | # cp $BP_DIR/conf/php/php.ini $BUILD_DIR/.heroku/php/etc/php 235 | # cp $BP_DIR/conf/php/php-fpm.conf $BUILD_DIR/.heroku/php/etc/php 236 | # mkdir -p $BUILD_DIR/.heroku/php/etc/php/conf.d 237 | # store PHP version in the slug for possible later evaluation 238 | # mkdir -p $BUILD_DIR/.heroku/php/.versions/php 239 | # echo $PHP_VERSION > $BUILD_DIR/.heroku/php/.versions/php/php 240 | 241 | # update config files 242 | mkdir -p $PHP_DIR/etc/php 243 | cp $BP_DIR/conf/php/php.ini $PHP_DIR/etc/php 244 | cp $BP_DIR/conf/php/php-fpm.conf $PHP_DIR/etc/php 245 | mkdir -p $PHP_DIR/etc/php/conf.d 246 | # remember the version for future upgrade handling 247 | echo $PHP_VERSION > $HOME/php_version 248 | fi 249 | if [[ "${HHVM_VERSION}" ]]; then 250 | # HHVM_DIST_URL="$S3_URL/hhvm-$HHVM_VERSION.tar.gz" 251 | TARBALL="hhvm-${HHVM_VERSION}.tar.gz" 252 | HHVM_DIST_URL="$S3_URL/$TARBALL" 253 | echo "- HHVM $HHVM_VERSION" | indent 254 | # curl --silent --location "$HHVM_DIST_URL" | tar xz -C $BUILD_DIR/.heroku/php 255 | if ! [ -f $CACHE_DIR/$TARBALL ]; then 256 | curl --silent --location "$HHVM_DIST_URL" > $CACHE_DIR/$TARBALL 257 | fi 258 | tar xzf $CACHE_DIR/$TARBALL -C $PHP_DIR 259 | 260 | # store HHVM version in the slug for possible later evaluation 261 | # mkdir -p $BUILD_DIR/.heroku/php/.versions/hhvm 262 | # echo $HHVM_VERSION > $BUILD_DIR/.heroku/php/.versions/hhvm/hhvm 263 | echo $HHVM_VERSION > $HOME/hhvm_version 264 | 265 | # make HHVM accessible 266 | # export PATH=$PATH:/app/.heroku/php/usr/bin 267 | export PATH=$PATH:$PHP_DIR/usr/bin 268 | 269 | # so it'll start. remember to use the full path to the binary, or we'll get an infinite loop 270 | # hhvm() { LD_LIBRARY_PATH=/app/.heroku/php/usr/lib/x86_64-linux-gnu:/app/.heroku/php/usr/lib/hhvm:/app/.heroku/php/usr/lib `which hhvm` "$@"; } 271 | hhvm() { LD_LIBRARY_PATH=$PHP_DIR/usr/lib/x86_64-linux-gnu:$PHP_DIR/usr/lib/hhvm:$PHP_DIR/usr/lib `which hhvm` "$@"; } 272 | export -f hhvm 273 | fi 274 | 275 | if ! [ -d /app/.heroku ]; then 276 | sudo mkdir -p /app/.heroku 277 | sudo chown -R developer:developer /app/ 278 | fi 279 | 280 | if ! [ -L /app/.heroku/php ]; then 281 | ln -s $PHP_DIR /app/.heroku/php 282 | fi 283 | 284 | APACHE_VERSION="2.4.10" 285 | # APACHE_DIST_URL="$S3_URL/apache-$APACHE_VERSION.tar.gz" 286 | TARBALL="apache-${APACHE_VERSION}.tar.gz" 287 | APACHE_DIST_URL="$S3_URL/$TARBALL" 288 | echo "- Apache $APACHE_VERSION" | indent 289 | # curl --silent --location "$APACHE_DIST_URL" | tar xz -C $BUILD_DIR/.heroku/php 290 | if ! [ -f $CACHE_DIR/$TARBALL ]; then 291 | curl --silent --location "$APACHE_DIST_URL" > $CACHE_DIR/$TARBALL 292 | fi 293 | tar xzf $CACHE_DIR/$TARBALL -C $PHP_DIR 294 | # Apache; copy in our config 295 | cp $BP_DIR/conf/apache2/httpd.conf.default $PHP_DIR/etc/apache2/httpd.conf 296 | # store Apache version in the slug for possible later evaluation 297 | # mkdir -p $BUILD_DIR/.heroku/php/.versions/apache2 298 | # echo $APACHE_VERSION > $BUILD_DIR/.heroku/php/.versions/apache2/apache2 299 | echo $APACHE_VERSION > $HOME/apache2_version 300 | 301 | NGINX_VERSION="1.6.0" 302 | # NGINX_DIST_URL="$S3_URL/nginx-$NGINX_VERSION.tar.gz" 303 | TARBALL="nginx-${NGINX_VERSION}.tar.gz" 304 | NGINX_DIST_URL="$S3_URL/$TARBALL" 305 | echo "- Nginx $NGINX_VERSION" | indent 306 | # curl --silent --location "$NGINX_DIST_URL" | tar xz -C $BUILD_DIR/.heroku/php 307 | if ! [ -f $CACHE_DIR/$TARBALL ]; then 308 | curl --silent --location "$NGINX_DIST_URL" > $CACHE_DIR/$TARBALL 309 | fi 310 | tar xzf $CACHE_DIR/$TARBALL -C $PHP_DIR 311 | # nginx; copy in our config 312 | # cp $BP_DIR/conf/nginx/nginx.conf.default $BUILD_DIR/.heroku/php/etc/nginx/nginx.conf 313 | cp $BP_DIR/conf/nginx/nginx.conf.default $PHP_DIR/etc/nginx/nginx.conf 314 | # store Nginx version in the slug for possible later evaluation 315 | # mkdir -p $BUILD_DIR/.heroku/php/.versions/nginx 316 | # echo $NGINX_VERSION > $BUILD_DIR/.heroku/php/.versions/nginx/nginx 317 | echo $NGINX_VERSION > $HOME/nginx_version 318 | 319 | # handle extensions for PHP 320 | if [[ "${PHP_VERSION}" ]]; then 321 | status "Installing PHP extensions..." 322 | 323 | php() { 324 | # the newrelic extension logs to stderr which pollutes our build output on each invocation of PHP, the daemon it launches will prevent the build from finishing, and each call to PHP would be logged to NR as non-web traffic, so we disable it for the duration of this build 325 | `which php` -dnewrelic.enabled=0 -dnewrelic.loglevel=error -dnewrelic.daemon.dont_launch=3 -dnewrelic.daemon.loglevel=error "$@" 326 | } 327 | 328 | exts=() 329 | if [[ -f "composer.lock" ]]; then 330 | exts=($(cat composer.lock | python -c 'import sys, json, itertools; l=json.load(sys.stdin); print "\n".join(list(set(itertools.chain([plat[4:].lower() for plat in l.get("platform", []) if plat.startswith("ext-")], *[ [ pkg[4:].lower() for pkg in p.get("require", []) if pkg.startswith("ext-") ] for p in l.get("packages", []) ]))))' 2> /dev/null || true)) # convert to array 331 | ext_source="composer.lock" 332 | else 333 | exts=($(cat composer.json | python -c 'from __future__ import print_function; import sys, json; { print(key[4:].lower()) for key in json.load(sys.stdin)["require"] if key.startswith("ext-")}' 2> /dev/null || true)) # convert to array 334 | ext_source="composer.json" 335 | fi 336 | for ext in "${!exts[@]}"; do # loop over keys in case the array is empty or else it'll error out 337 | install_ext "${exts[$ext]}" $ext_source 338 | done 339 | 340 | # special treatment for New Relic; we enable it if we detect a license key for it 341 | install_newrelic_ext 342 | 343 | # special treatment for Blackfire; we enable it if we detect a server id and a server token for it 344 | install_blackfire_ext 345 | 346 | # if [[ ${#exts[@]} -eq 0 || ! ${exts[*]} =~ "zend-opcache" ]]; then 347 | # install_ext "zend-opcache" "automatic" 348 | # exts+=("zend-opcache") 349 | # fi 350 | fi 351 | 352 | status "Installing dependencies..." 353 | 354 | # check if we should use a composer.phar version bundled with the project 355 | if [[ -f "composer.phar" ]]; then 356 | [[ -x "composer.phar" ]] || error "File '/composer.phar' isn't executable; please 'chmod +x'!" 357 | $engine_r 'new Phar("composer.phar");' &> /dev/null || error "File '/composer.phar' is not a valid PHAR archive!" 358 | composer() { 359 | $engine composer.phar "$@" 360 | } 361 | export -f composer 362 | composer --version 2>&1 | grep "^Composer version" > /dev/null || error "File '/composer.phar' is not a Composer executable!" 363 | notice_inline "Using '/composer.phar' to install dependencies." 364 | else 365 | # curl --silent --location "$S3_URL/composer.tar.gz" | tar xz -C $BUILD_DIR/.heroku/php 366 | curl --silent --location "$S3_URL/composer.tar.gz" | tar xz -C $PHP_DIR 367 | composer() { 368 | $engine `which composer` "$@" 369 | } 370 | export -f composer 371 | fi 372 | # echo composer version for info purposes 373 | # tail to get rid of outdated version warnings (Composer sends those to STDOUT instead of STDERR) 374 | composer --version 2> /dev/null | tail -n 1 | indent 375 | 376 | # throw a notice if people have added their vendor dir to Git; that's bad practice and makes everything slow and cluttered 377 | # if [[ -e "$(composer config vendor-dir 2> /dev/null | tail -n 1)" ]]; then # tail, as composer echos outdated version warnings to STDOUT 378 | # warning "Your Composer vendor dir is part of your Git repository. 379 | # That directory should not be under version control; only your 380 | # 'composer.json' and 'composer.lock' files should be added, which 381 | # will let Composer handle installation of dependencies on deploy. 382 | # To suppress this notice, first remove the folder from your index 383 | # by running 'git rm --cached $(composer config vendor-dir 2> /dev/null | tail -n 1)/'. 384 | # Next, edit your project's '.gitignore' file and add the folder 385 | # '/$(composer config vendor-dir 2> /dev/null | tail -n 1)/' to the list. 386 | # For more info, refer to the Composer FAQ: http://bit.ly/1rlCSZU" 387 | # fi 388 | 389 | # handle custom oauth keys 390 | COMPOSER_GITHUB_OAUTH_TOKEN=${COMPOSER_GITHUB_OAUTH_TOKEN:-} 391 | if [[ -n "$COMPOSER_GITHUB_OAUTH_TOKEN" ]]; then 392 | if curl --fail --silent -H "Authorization: token $COMPOSER_GITHUB_OAUTH_TOKEN" https://api.github.com/rate_limit > /dev/null; then 393 | composer config -g github-oauth.github.com "$COMPOSER_GITHUB_OAUTH_TOKEN" &> /dev/null # redirect outdated version warnings (Composer sends those to STDOUT instead of STDERR) 394 | notice_inline 'Using $COMPOSER_GITHUB_OAUTH_TOKEN for GitHub OAuth.' 395 | else 396 | error 'Invalid $COMPOSER_GITHUB_OAUTH_TOKEN for GitHub OAuth!' 397 | fi 398 | else 399 | # don't forget to remove any stored key if it's gone from the env 400 | composer config -g --unset github-oauth.github.com &> /dev/null # redirect outdated version warnings (Composer sends those to STDOUT instead of STDERR) 401 | if curl --silent https://api.github.com/rate_limit | python -c 'import sys, json; sys.exit((json.load(sys.stdin)["resources"]["core"]["remaining"] > 0))'; then # yes, check > 0, not < 1 - exit status of 0 will trigger the if 402 | notice "You've reached the GitHub API's request rate limit. 403 | Composer will try and fall back to slower downloads from source. 404 | It's strongly recommended you use a custom OAuth token; see 405 | http://devcenter.heroku.com/articles/php-support#custom-github-oauth-tokens" 406 | fi 407 | fi 408 | # no need for the token to stay around in the env 409 | unset COMPOSER_GITHUB_OAUTH_TOKEN 410 | 411 | # install dependencies unless composer.json is completely empty (in which case it'd talk to packagist.org which may be slow and is unnecessary) 412 | # cat composer.json | python -c 'import sys,json; sys.exit(not json.load(sys.stdin));' && composer install --no-dev --prefer-dist --optimize-autoloader --no-interaction 2>&1 | indent 413 | cat composer.json | python -c 'import sys,json; sys.exit(not json.load(sys.stdin));' && composer install --prefer-dist --optimize-autoloader --no-interaction 2>&1 | indent 414 | 415 | # composer show --installed heroku/heroku-buildpack-php &> /dev/null && error "Your composer.json requires 'heroku/heroku-buildpack-php'. 416 | # This package may only be used as a dependency in 'require-dev'!" 417 | 418 | if cat composer.json | python -c 'import sys,json; sys.exit("compile" not in json.load(sys.stdin).get("scripts", {}));'; then 419 | status "Running 'composer compile'..." 420 | # composer run-script --no-dev --no-interaction compile 2>&1 | indent 421 | composer run-script --no-interaction compile 2>&1 | indent 422 | fi 423 | 424 | status "Preparing runtime environment..." 425 | 426 | # install this buildpack like a composer package 427 | # it will contain the apache/nginx/php configs and the boot script 428 | # TODO: warn if require-dev has the package using a different branch 429 | shopt -u dotglob # we don't want .git, .gitignore et al 430 | composer_vendordir=$(composer config vendor-dir 2> /dev/null | tail -n 1) # tail, as composer echos outdated version warnings to STDOUT 431 | composer_bindir=$(composer config bin-dir 2> /dev/null | tail -n 1) # tail, as composer echos outdated version warnings to STDOUT 432 | # figure out the package dir name to write to and copy to it 433 | hbpdir="$composer_vendordir/$(cat $BP_DIR/composer.json | python -c 'import sys, json; print json.load(sys.stdin)["name"]')" 434 | mkdir -p "$BUILD_DIR/$hbpdir" 435 | cp -r "$BP_DIR"/* "$BUILD_DIR/$hbpdir/" 436 | # make bin dir, just in case 437 | mkdir -p "$BUILD_DIR/$composer_bindir" 438 | # figure out shortest relative path from vendor/heroku/heroku-buildpack-php to vendor/bin (or whatever the bin dir is) 439 | relbin=$(python -c "import os.path; print os.path.relpath('$hbpdir', '$composer_bindir')") 440 | # collect bin names from composer.json 441 | relbins=$(cat $BP_DIR/composer.json | python -c 'from __future__ import print_function; import sys, json; { print(sys.argv[1]+"/"+bin) for bin in json.load(sys.stdin)["bin"] }' $relbin) 442 | # link to bins 443 | cd $BUILD_DIR/$composer_bindir 444 | # ln -s $relbins . 445 | for bin in $relbins; do 446 | if ! [ -L ./$(basename $bin) ]; then 447 | ln -s $bin . 448 | fi 449 | done 450 | cd $BUILD_DIR 451 | 452 | # Update the PATH 453 | # mkdir -p $BUILD_DIR/.profile.d 454 | mkdir -p $HOME/.profile.d 455 | # cat > $BUILD_DIR/.profile.d/php.sh <<"EOF" 456 | # export PATH="$HOME/.heroku/php/bin:$HOME/.heroku/php/sbin:$PATH" 457 | cat > $HOME/.profile.d/php.sh < $BUILD_DIR/.profile.d/hhvm.sh <<"EOF" 463 | # export PATH="$PATH:$HOME/.heroku/php/usr/bin" 464 | # hhvm() { LD_LIBRARY_PATH=$HOME/.heroku/php/usr/lib/x86_64-linux-gnu:$HOME/.heroku/php/usr/lib/hhvm:$HOME/.heroku/php/usr/lib `which hhvm` "$@"; } 465 | cat > $HOME/.profile.d/hhvm.sh <<"EOF" 466 | export PHP_DIR="$HOME/php" 467 | export PATH="$PATH:$PHP_DIR/usr/bin" 468 | hhvm() { LD_LIBRARY_PATH=$PHP_DIR/usr/lib/x86_64-linux-gnu:$PHP_DIR/usr/lib/hhvm:$PHP_DIR/usr/lib `which hhvm` "$@"; } 469 | export -f hhvm 470 | EOF 471 | fi 472 | # Alias composer if needed 473 | if [[ -f "composer.phar" ]]; then 474 | # cat > $BUILD_DIR/.profile.d/composer.sh < $HOME/.profile.d/composer.sh < $BUILD_DIR/.profile.d/composer.sh < $HOME/.profile.d/composer.sh < /dev/null | tail -n 1) # tail, as composer echos outdated version warnings to STDOUT 498 | # echo "web: $bindir/heroku-$engine-apache2" > Procfile 499 | # notice_inline "No Procfile, using 'web: $bindir/heroku-$engine-apache2'." 500 | # fi 501 | 502 | bindir=$(composer config bin-dir 2> /dev/null | tail -n 1) # tail, as composer echos outdated version warnings to STDOUT 503 | echo "To start the server: 504 | * With Apache: 'PORT=\"\" $bindir/heroku-$engine-apache2 ' 505 | * With nginx: 'PORT=\"\" $bindir/heroku-$engine-nginx '" 506 | -------------------------------------------------------------------------------- /stack/buildpacks/php/bin/detect: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Same as https://github.com/heroku/heroku-buildpack-php/blob/master/bin/detect 4 | 5 | if [[ -f "$1/composer.json" || -f "$1/index.php" ]]; then 6 | echo "PHP" && exit 0 7 | else 8 | exit 1 9 | fi 10 | -------------------------------------------------------------------------------- /stack/buildpacks/php/bin/install-dependencies: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo -n ' Installing dependencies for PHP buildpack... ' 6 | 7 | url='https://github.com/heroku/heroku-buildpack-php/archive/v67.tar.gz' 8 | mkdir -p /opt/devstep/buildpacks/php/heroku-buildpack-php 9 | curl -sL $url | tar xz --strip-components=1 -C /opt/devstep/buildpacks/php/heroku-buildpack-php 10 | 11 | # Replace heroku buildpack configs with our stuff 12 | for conf in /opt/devstep/buildpacks/php/heroku-buildpack-php/conf/**/*; do 13 | if [ -f $conf ] && $(grep -q "/app/.heroku" $conf); then 14 | sed -i 's|/app/.heroku|/home/devstep|' $conf 15 | fi 16 | done 17 | 18 | echo 'DONE' 19 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # https://github.com/heroku/heroku-buildpack-python/blob/3558766ae8d9407a8f08f996b5ac43add3c2efd9/bin/compile 4 | 5 | # Usage: 6 | # 7 | # $ bin/compile 8 | 9 | # Fail fast and fail hard. 10 | set -eo pipefail 11 | 12 | [ "$BUILDPACK_XTRACE" ] && set -o xtrace 13 | 14 | # Prepend proper path for virtualenv hackery. This will be deprecated soon. 15 | # export PATH=:/usr/local/bin:$PATH 16 | 17 | # Devstep buildpack dir 18 | bp_basedir="$( cd -P "$( dirname "$0" )" && cd .. && pwd )" 19 | bp_basedir=$(readlink -f $bp_basedir) 20 | 21 | basedir="${bp_basedir}/heroku-buildpack-python" 22 | if ! [ -d $basedir ]; then 23 | echo 'Unable to find Heroku buildpack for python!' 24 | exit 1 25 | fi 26 | 27 | # Paths. 28 | # BIN_DIR=$(cd $(dirname $0); pwd) # absolute path 29 | BIN_DIR=$basedir/bin 30 | # ROOT_DIR=$(dirname $BIN_DIR) 31 | ROOT_DIR=$basedir 32 | BUILD_DIR=$1 33 | # CACHE_DIR=$2 34 | CACHE_DIR=$2/python 35 | ENV_DIR=$3 36 | 37 | mkdir -p $CACHE_DIR/.cache/pip 38 | if [[ -L $HOME/.cache/pip ]] && ! [[ -e $HOME/.cache/pip ]]; then 39 | unlink $HOME/.cache/pip 40 | fi 41 | if ! [[ -L $HOME/.cache/pip ]]; then 42 | mkdir -p $HOME/.cache 43 | rm -rf $HOME/.cache/pip 44 | ln -s $CACHE_DIR/pip $HOME/.cache/pip 45 | fi 46 | 47 | # CACHED_DIRS=".heroku" 48 | 49 | # Static configurations for virtualenv caches. 50 | # VIRTUALENV_LOC=".heroku/venv" 51 | # LEGACY_TRIGGER="lib/python2.7" 52 | # PROFILE_PATH="$BUILD_DIR/.profile.d/python.sh" 53 | PROFILE_PATH="$HOME/.profile.d/python.sh" 54 | # WEBCONCURRENCY_PROFILE_PATH="$BUILD_DIR/.profile.d/python.webconcurrency.sh" 55 | WEBCONCURRENCY_PROFILE_PATH="$HOME/.profile.d/python.webconcurrency.sh" 56 | 57 | DEFAULT_PYTHON_VERSION="python-2.7.10" 58 | # DEFAULT_PYTHON_STACK="cedar" 59 | DEFAULT_PYTHON_STACK="cedar-14" 60 | # PYTHON_EXE="/app/.heroku/python/bin/python" 61 | PYTHON_EXE="$HOME/python/bin/python" 62 | PIP_VERSION="7.0.3" 63 | SETUPTOOLS_VERSION="17.0" 64 | 65 | # Setup bpwatch 66 | # export PATH=$PATH:$ROOT_DIR/vendor/bpwatch 67 | # LOGPLEX_KEY="t.b90d9d29-5388-4908-9737-b4576af1d4ce" 68 | # export BPWATCH_STORE_PATH=$CACHE_DIR/bpwatch.json 69 | BUILDPACK_VERSION=v28 70 | 71 | # Setup pip-pop (pip-diff) 72 | export PATH=$PATH:$ROOT_DIR/vendor/pip-pop 73 | 74 | # Support Anvil Build_IDs 75 | # [ ! "$SLUG_ID" ] && SLUG_ID="defaultslug" 76 | # [ ! "$REQUEST_ID" ] && REQUEST_ID=$SLUG_ID 77 | [ ! "$STACK" ] && STACK=$DEFAULT_PYTHON_STACK 78 | 79 | # Sanitizing environment variables. 80 | unset GIT_DIR PYTHONHOME PYTHONPATH LD_LIBRARY_PATH LIBRARY_PATH 81 | 82 | # bpwatch init $LOGPLEX_KEY 83 | # bpwatch build python $BUILDPACK_VERSION $REQUEST_ID 84 | # TMP_APP_DIR=$CACHE_DIR/tmp_app_dir 85 | 86 | # bpwatch start compile 87 | 88 | 89 | # We'll need to send these statics to other scripts we `source`. 90 | export BUILD_DIR CACHE_DIR BIN_DIR PROFILE_PATH 91 | 92 | # Syntax sugar. 93 | source $BIN_DIR/utils 94 | 95 | # Directory Hacks for path consistiency. 96 | # APP_DIR='/app' 97 | # TMP_APP_DIR=$CACHE_DIR/tmp_app_dir 98 | 99 | # Skip these steps for Docker. 100 | # if [[ ! "$DOCKER_BUILD" ]]; then 101 | # 102 | # # Copy Anvil app dir to temporary storage... 103 | # bpwatch start anvil_appdir_stage 104 | # if [ "$SLUG_ID" ]; then 105 | # mkdir -p $TMP_APP_DIR 106 | # deep-mv $APP_DIR $TMP_APP_DIR 107 | # else 108 | # deep-rm $APP_DIR 109 | # fi 110 | # bpwatch stop anvil_appdir_stage 111 | # 112 | # # Copy Application code in. 113 | # bpwatch start appdir_stage 114 | # deep-mv $BUILD_DIR $APP_DIR 115 | # bpwatch stop appdir_stage 116 | # fi 117 | 118 | 119 | # Set new context. 120 | # ORIG_BUILD_DIR=$BUILD_DIR 121 | # BUILD_DIR=$APP_DIR 122 | 123 | # Prepend proper path buildpack use. 124 | # export PATH=$BUILD_DIR/.heroku/python/bin:$BUILD_DIR/.heroku/vendor/bin:$PATH 125 | export PATH=$HOME/python/bin:$PATH 126 | export PYTHONUNBUFFERED=1 127 | export LANG=en_US.UTF-8 128 | # export C_INCLUDE_PATH=/app/.heroku/vendor/include:$BUILD_DIR/.heroku/vendor/include:/app/.heroku/python/include 129 | export C_INCLUDE_PATH="${HOME}/python/include" 130 | # export CPLUS_INCLUDE_PATH=/app/.heroku/vendor/include:$BUILD_DIR/.heroku/vendor/include:/app/.heroku/python/include 131 | export CPLUS_INCLUDE_PATH="${HOME}/python/include" 132 | # export LIBRARY_PATH=/app/.heroku/vendor/lib:$BUILD_DIR/.heroku/vendor/lib:/app/.heroku/python/lib 133 | export LIBRARY_PATH="${HOME}/python/lib" 134 | # export LD_LIBRARY_PATH=/app/.heroku/vendor/lib:$BUILD_DIR/.heroku/vendor/lib:/app/.heroku/python/lib 135 | export LD_LIBRARY_PATH="${HOME}/python/lib" 136 | # export PKG_CONFIG_PATH=/app/.heroku/vendor/lib/pkg-config:$BUILD_DIR/.heroku/vendor/lib/pkg-config:/app/.heroku/python/lib/pkg-config 137 | export PKG_CONFIG_PATH="${HOME}/python/lib/pkg-config" 138 | # export PIP_DOWNLOAD_CACHE="$CACHE_DIR/pip" 139 | 140 | # Switch to the repo's context. 141 | cd $BUILD_DIR 142 | 143 | # Experimental pre_compile hook. 144 | # bpwatch start pre_compile 145 | source $BIN_DIR/steps/hooks/pre_compile 146 | # bpwatch stop pre_compile 147 | 148 | # If no requirements given, assume `setup.py develop`. 149 | if [ ! -f requirements.txt ]; then 150 | echo "-e ." > requirements.txt 151 | fi 152 | 153 | 154 | # Sticky runtimes. 155 | # if [ -f $CACHE_DIR/.heroku/python-version ]; then 156 | # DEFAULT_PYTHON_VERSION=$(cat $CACHE_DIR/.heroku/python-version) 157 | # fi 158 | 159 | # Stack fallback for non-declared caches. 160 | # if [ -f $CACHE_DIR/.heroku/python-stack ]; then 161 | # CACHED_PYTHON_STACK=$(cat $CACHE_DIR/.heroku/python-stack) 162 | # else 163 | # CACHED_PYTHON_STACK=$DEFAULT_PYTHON_STACK 164 | # fi 165 | 166 | # If no runtime given, assume default version. 167 | # if [ ! -f runtime.txt ]; then 168 | # echo $DEFAULT_PYTHON_VERSION > runtime.txt 169 | # fi 170 | if [ ! -f runtime.txt ]; then 171 | puts-step "No runtime.txt provided; assuming $DEFAULT_PYTHON_VERSION." 172 | PYTHON_VERSION=$DEFAULT_PYTHON_VERSION 173 | else 174 | PYTHON_VERSION=$(cat runtime.txt) 175 | fi 176 | 177 | export PYTHON_VERSION 178 | 179 | # ### The Cache 180 | mkdir -p $CACHE_DIR 181 | 182 | # Purge "old-style" virtualenvs. 183 | # bpwatch start clear_old_venvs 184 | # [ -d $CACHE_DIR/$LEGACY_TRIGGER ] && rm -fr $CACHE_DIR/.heroku/bin $CACHE_DIR/.heroku/lib $CACHE_DIR/.heroku/include 185 | # [ -d $CACHE_DIR/$VIRTUALENV_LOC ] && rm -fr $CACHE_DIR/.heroku/venv $CACHE_DIR/.heroku/src 186 | # bpwatch stop clear_old_venvs 187 | 188 | # Restore old artifacts from the cache. 189 | # bpwatch start restore_cache 190 | # for dir in $CACHED_DIRS; do 191 | # cp -R $CACHE_DIR/$dir . &> /dev/null || true 192 | # done 193 | # bpwatch stop restore_cache 194 | 195 | # set +e 196 | # Create set-aside `.heroku` folder. 197 | # mkdir .heroku &> /dev/null 198 | # set -e 199 | 200 | mkdir -p $(dirname $PROFILE_PATH) 201 | 202 | # Install Python. 203 | # source $BIN_DIR/steps/python 204 | 205 | # Install Python using our own script. 206 | source $bp_basedir/bin/steps/python 207 | 208 | # Sanity check for setuptools/distribute. 209 | source $BIN_DIR/steps/setuptools 210 | 211 | # Uninstall removed dependencies with Pip. 212 | # source $BIN_DIR/steps/pip-uninstall 213 | source $bp_basedir/bin/steps/pip-uninstall 214 | 215 | # Mercurial support. 216 | # source $BIN_DIR/steps/mercurial 217 | 218 | # Pylibmc support. 219 | # source $BIN_DIR/steps/pylibmc 220 | source $bp_basedir/bin/steps/pylibmc 221 | 222 | # Libffi support. 223 | # source $BIN_DIR/steps/cryptography 224 | source $bp_basedir/bin/steps/cryptography 225 | 226 | # GDAL support. 227 | # source $BIN_DIR/steps/gdal 228 | source $bp_basedir/bin/steps/gdal 229 | 230 | # Install dependencies with Pip. 231 | # source $BIN_DIR/steps/pip-install 232 | source $bp_basedir/bin/steps/pip-install 233 | 234 | # Django collectstatic support. 235 | # sub-env $BIN_DIR/steps/collectstatic 236 | 237 | 238 | # ### Finalize 239 | # 240 | 241 | # Set context environment variables. 242 | # set-env PATH '$HOME/.heroku/python/bin:$PATH' 243 | set-env PATH '$HOME/python/bin:$PATH' 244 | set-env PYTHONUNBUFFERED true 245 | # set-env PYTHONHOME /app/.heroku/python 246 | set-env PYTHONHOME $HOME/python 247 | # set-env LIBRARY_PATH '/app/.heroku/vendor/lib:/app/.heroku/python/lib:$LIBRARY_PATH' 248 | set-env LIBRARY_PATH $HOME/python/lib 249 | # set-env LD_LIBRARY_PATH '/app/.heroku/vendor/lib:/app/.heroku/python/lib:$LD_LIBRARY_PATH' 250 | set-env LD_LIBRARY_PATH '$HOME/python/lib:$LD_LIBRARY_PATH' 251 | # set-env PIP_DOWNLOAD_CACHE "$CACHE_DIR/pip" 252 | set-default-env LANG en_US.UTF-8 253 | set-default-env PYTHONHASHSEED random 254 | # set-default-env PYTHONPATH /app/ 255 | set-default-env PYTHONPATH $BUILD_DIR 256 | 257 | # Install sane-default script for WEB_CONCURRENCY environment variable. 258 | cp $ROOT_DIR/vendor/python.webconcurrency.sh $WEBCONCURRENCY_PROFILE_PATH 259 | 260 | 261 | # Experimental post_compile hook. 262 | # bpwatch start post_compile 263 | # source $BIN_DIR/steps/hooks/post_compile 264 | # bpwatch stop post_compile 265 | 266 | # Store new artifacts in cache. 267 | # bpwatch start dump_cache 268 | # for dir in $CACHED_DIRS; do 269 | # rm -rf $CACHE_DIR/$dir 270 | # cp -R $dir $CACHE_DIR/ 271 | # done 272 | # bpwatch stop dump_cache 273 | 274 | # ### Fin. 275 | # if [[ ! "$DOCKER_BUILD" ]]; then 276 | # 277 | # bpwatch start appdir_commit 278 | # deep-mv $BUILD_DIR $ORIG_BUILD_DIR 279 | # bpwatch stop appdir_commit 280 | # 281 | # bpwatch start anvil_appdir_commit 282 | # if [ "$SLUG_ID" ]; then 283 | # deep-mv $TMP_APP_DIR $APP_DIR 284 | # fi 285 | # 286 | # bpwatch stop anvil_appdir_commit 287 | # bpwatch stop compile 288 | # fi 289 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # bin/detect 3 | 4 | BUILD_DIR=$1 5 | 6 | # Exit early if app is clearly not Python. 7 | if [ ! -f $BUILD_DIR/requirements.txt ] && [ ! -f $BUILD_DIR/setup.py ]; then 8 | exit 1 9 | fi 10 | 11 | echo Python 12 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/install-dependencies: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo -n ' Installing dependencies for Python buildpack... ' 6 | 7 | url='https://github.com/heroku/heroku-buildpack-python/archive/3558766ae8d9407a8f08f996b5ac43add3c2efd9.tar.gz' 8 | mkdir -p /opt/devstep/buildpacks/python/heroku-buildpack-python 9 | curl -sL $url | tar xz --strip-components=1 -C /opt/devstep/buildpacks/python/heroku-buildpack-python 10 | 11 | echo 'DONE' 12 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/cryptography: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script serves as the Pylibmc build step of the 4 | # [**Python Buildpack**](https://github.com/heroku/heroku-buildpack-python) 5 | # compiler. 6 | # 7 | # A [buildpack](http://devcenter.heroku.com/articles/buildpacks) is an 8 | # adapter between a Python application and Heroku's runtime. 9 | # 10 | # This script is invoked by [`bin/compile`](/). 11 | 12 | # The location of the pre-compiled cryptography binary. 13 | VENDORED_LIBFFI="http://lang-python.s3.amazonaws.com/$STACK/libraries/vendor/libffi.tar.gz" 14 | 15 | # PKG_CONFIG_PATH="/app/.heroku/vendor/lib/pkgconfig:$PKG_CONFIG_PATH" 16 | PKG_CONFIG_PATH="$HOME/vendor/lib/pkgconfig:$PKG_CONFIG_PATH" 17 | 18 | # Syntax sugar. 19 | source $BIN_DIR/utils 20 | 21 | # bpwatch start libffi_install 22 | 23 | # If pylibmc exists within requirements, use vendored cryptography. 24 | if (pip-grep -s requirements.txt bcrypt cffi cryptography pyOpenSSL PyOpenSSL requests[security] &> /dev/null) then 25 | 26 | if [ -d "$HOME/vendor/lib/libffi-3.1.1" ]; then 27 | export LIBFFI=$HOME/vendor 28 | else 29 | echo "-----> Noticed cffi. Bootstrapping libffi." 30 | mkdir -p $HOME/vendor 31 | # Download and extract cryptography into target vendor directory. 32 | curl $VENDORED_LIBFFI -s | tar zxv -C $HOME/vendor &> /dev/null 33 | 34 | export LIBFFI=$HOME/vendor 35 | fi 36 | fi 37 | 38 | # bpwatch stop libffi_install 39 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/gdal: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script serves as the GDAL build step of the 4 | # [**Python Buildpack**](https://github.com/heroku/heroku-buildpack-python) 5 | # compiler. 6 | # 7 | # A [buildpack](http://devcenter.heroku.com/articles/buildpacks) is an 8 | # adapter between a Python application and Heroku's runtime. 9 | # 10 | # This script is invoked by [`bin/compile`](/). 11 | 12 | # The location of the pre-compiled cryptography binary. 13 | VENDORED_GDAL="https://lang-python.s3.amazonaws.com/$STACK/libraries/vendor/gdal.tar.gz" 14 | 15 | # PKG_CONFIG_PATH="/app/.heroku/vendor/lib/pkgconfig:$PKG_CONFIG_PATH" 16 | PKG_CONFIG_PATH="$HOME/vendor/lib/pkgconfig:$PKG_CONFIG_PATH" 17 | 18 | # Syntax sugar. 19 | source $BIN_DIR/utils 20 | 21 | # bpwatch start gdal_install 22 | 23 | # If GDAL exists within requirements, use vendored gdal. 24 | if (pip-grep -s requirements.txt GDAL &> /dev/null) then 25 | 26 | if [ -f "${HOME}/vendor/bin/gdalserver" ]; then 27 | export GDAL=$HOME/vendor 28 | else 29 | echo "-----> Noticed GDAL. Bootstrapping gdal." 30 | mkdir -p $HOME/vendor 31 | # Download and extract cryptography into target vendor directory. 32 | curl $VENDORED_GDAL -s | tar zxv -C $HOME/vendor &> /dev/null 33 | 34 | export GDAL=$HOME/vendor 35 | fi 36 | fi 37 | 38 | # bpwatch stop gdal_install 39 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/pip-install: -------------------------------------------------------------------------------- 1 | # Install dependencies with Pip. 2 | puts-step "Installing dependencies with pip" 3 | 4 | # [ ! "$FRESH_PYTHON" ] && bpwatch start pip_install 5 | # [ "$FRESH_PYTHON" ] && bpwatch start pip_install_first 6 | 7 | # /app/.heroku/python/bin/pip install -r requirements.txt --exists-action=w --src=./.heroku/src --allow-all-external --disable-pip-version-check --no-cache-dir | cleanup | indent 8 | $HOME/python/bin/pip install -r requirements.txt --exists-action=w --allow-all-external | cleanup | indent 9 | 10 | # Smart Requirements handling 11 | # cp requirements.txt .heroku/python/requirements-declared.txt 12 | # /app/.heroku/python/bin/pip freeze > .heroku/python/requirements-installed.txt 13 | cp requirements.txt $HOME/python/requirements-declared.txt 14 | $HOME/python/bin/pip freeze > $HOME/python/requirements-installed.txt 15 | 16 | # [ ! "$FRESH_PYTHON" ] && bpwatch stop pip_install 17 | # [ "$FRESH_PYTHON" ] && bpwatch stop pip_install_first 18 | 19 | echo 20 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/pip-uninstall: -------------------------------------------------------------------------------- 1 | set +e 2 | # Install dependencies with Pip. 3 | # bpwatch start pip_uninstall 4 | if [[ -f $HOME/python/requirements-declared.txt ]]; then 5 | 6 | cp $HOME/python/requirements-declared.txt requirements-declared.txt 7 | 8 | pip-diff --stale requirements-declared.txt requirements.txt --exclude setuptools pip > $HOME/python/requirements-stale.txt 9 | 10 | rm -fr requirements-declared.txt 11 | 12 | if [[ -s $HOME/python/requirements-stale.txt ]]; then 13 | puts-step "Uninstalling stale dependencies" 14 | $HOME/python/bin/pip uninstall -r $HOME/python/requirements-stale.txt -y --exists-action=w | cleanup | indent 15 | fi 16 | fi 17 | # bpwatch stop pip_uninstall 18 | set -e 19 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/pylibmc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script serves as the Pylibmc build step of the 4 | # [**Python Buildpack**](https://github.com/heroku/heroku-buildpack-python) 5 | # compiler. 6 | # 7 | # A [buildpack](http://devcenter.heroku.com/articles/buildpacks) is an 8 | # adapter between a Python application and Heroku's runtime. 9 | # 10 | # This script is invoked by [`bin/compile`](/). 11 | 12 | # The location of the pre-compiled libmemcached binary. 13 | VENDORED_MEMCACHED="http://lang-python.s3.amazonaws.com/$STACK/libraries/vendor/libmemcache.tar.gz" 14 | 15 | # Syntax sugar. 16 | source $BIN_DIR/utils 17 | 18 | 19 | # bpwatch start pylibmc_install 20 | 21 | # If pylibmc exists within requirements, use vendored libmemcached. 22 | if (pip-grep -s requirements.txt pylibmc &> /dev/null) then 23 | 24 | if [ -d "$HOME/vendor/lib/sasl2" ]; then 25 | export LIBMEMCACHED=$HOME/vendor 26 | else 27 | echo "-----> Noticed pylibmc. Bootstrapping libmemcached." 28 | mkdir -p $HOME/vendor 29 | # Download and extract libmemcached into target vendor directory. 30 | curl $VENDORED_MEMCACHED -s | tar zxv -C $HOME/vendor &> /dev/null 31 | 32 | export LIBMEMCACHED=$HOME/vendor 33 | fi 34 | fi 35 | 36 | # bpwatch stop pylibmc_install 37 | -------------------------------------------------------------------------------- /stack/buildpacks/python/bin/steps/python: -------------------------------------------------------------------------------- 1 | set +e 2 | # PYTHON_VERSION=$(cat runtime.txt) 3 | 4 | # Install Python. 5 | if [ -f $HOME/python-version ]; then 6 | if [ ! $(cat $HOME/python-version) = $PYTHON_VERSION ]; then 7 | # bpwatch start uninstall_python 8 | puts-step "Found runtime $(cat $HOME/python-version), removing" 9 | rm -fr $HOME/python 10 | # bpwatch stop uninstall_python 11 | else 12 | SKIP_INSTALL=1 13 | fi 14 | fi 15 | 16 | # if [ ! $STACK = $CACHED_PYTHON_STACK ]; then 17 | # bpwatch start uninstall_python 18 | # puts-step "Stack changed, re-installing runtime" 19 | # rm -fr $HOME/python 20 | # unset SKIP_INSTALL 21 | # bpwatch stop uninstall_python 22 | # fi 23 | 24 | 25 | if [ ! "$SKIP_INSTALL" ]; then 26 | # bpwatch start install_python 27 | puts-step "Installing runtime ($PYTHON_VERSION)" 28 | 29 | # Prepare destination directory. 30 | mkdir -p $HOME/python 31 | if ! [ -f $CACHE_DIR/${PYTHON_VERSION}.tar.gz ]; then 32 | curl -s -L http://lang-python.s3.amazonaws.com/$STACK/runtimes/${PYTHON_VERSION}.tar.gz > ${CACHE_DIR}/${PYTHON_VERSION}.tar.gz 33 | fi 34 | tar zxf ${CACHE_DIR}/${PYTHON_VERSION}.tar.gz -C $HOME/python &> /dev/null 35 | if [[ $? != 0 ]] ; then 36 | rm ${CACHE_DIR}/${PYTHON_VERSION}.tar.gz 37 | puts-warn "Requested runtime ($PYTHON_VERSION) was not found." 38 | puts-warn "Aborting. More info: https://devcenter.heroku.com/articles/python-support" 39 | exit 1 40 | fi 41 | 42 | # bpwatch stop install_python 43 | 44 | # Record for future reference. 45 | echo $PYTHON_VERSION > $HOME/python-version 46 | # echo $STACK > $HOME/python-stack 47 | FRESH_PYTHON=true 48 | 49 | hash -r 50 | fi 51 | 52 | # If Pip isn't up to date: 53 | if [ "$FRESH_PYTHON" ] || [[ ! $(pip --version) == *$PIP_VERSION* ]]; then 54 | WORKING_DIR=$(pwd) 55 | 56 | # bpwatch start prepare_environment 57 | 58 | TMPTARDIR=$(mktemp -d) 59 | trap "rm -rf $TMPTARDIR" RETURN 60 | 61 | # bpwatch start install_setuptools 62 | # Prepare it for the real world 63 | # puts-step "Installing Setuptools ($SETUPTOOLS_VERSION)" 64 | tar zxf $ROOT_DIR/vendor/setuptools-$SETUPTOOLS_VERSION.tar.gz -C $TMPTARDIR 65 | cd $TMPTARDIR/setuptools-$SETUPTOOLS_VERSION/ 66 | python setup.py install &> /dev/null 67 | cd $WORKING_DIR 68 | # bpwatch stop install_setuptoools 69 | 70 | # bpwatch start install_pip 71 | # puts-step "Installing Pip ($PIP_VERSION)" 72 | tar zxf $ROOT_DIR/vendor/pip-$PIP_VERSION.tar.gz -C $TMPTARDIR 73 | cd $TMPTARDIR/pip-$PIP_VERSION/ 74 | python setup.py install &> /dev/null 75 | cd $WORKING_DIR 76 | 77 | # bpwatch stop install_pip 78 | # bpwatch stop prepare_environment 79 | fi 80 | 81 | set -e 82 | hash -r 83 | -------------------------------------------------------------------------------- /stack/buildpacks/ruby/bin/compile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | GEMFILE_PATH=$1/Gemfile 6 | RUBY_VERSION_PATH=$1/.ruby-version 7 | 8 | # From https://github.com/aripollak/rbenv-bundler-ruby-version/blob/32bd1a63ed57c6fcfbd4f3766fed64fad21b61b0/bin/rbenv-bundler-ruby-version#L24-L33 9 | version_from_gemfile() { 10 | if [ -f ${GEMFILE_PATH} ]; then 11 | # handles simple ruby statements, as well as engine and engine_version, 12 | # with single or double quotes and in either order 13 | grep '^\s*ruby' "$GEMFILE_PATH" | sed -e 's/\s*#.*//' -e 's/engine:/:engine =>/' \ 14 | -e 's/engine_version:/:engine_version =>/' \ 15 | -e "s/.*:engine\s*=>\s*['\"]\([^'\"]*\).*:engine_version\s*=>\s*['\"]\([^'\"]*\).*/\1-\2/" \ 16 | -e "s/.*:engine_version\s*=>\s*['\"]\([^'\"]*\).*:engine\s*=>\s*['\"]\([^'\"]*\).*/\2-\1/" \ 17 | -e "s/^\s*ruby\s*['\"]\([^'\"]*\).*/\1/" | head -1 18 | fi 19 | } 20 | 21 | version_from_ruby_version() { 22 | if [ -f ${RUBY_VERSION_PATH} ]; then 23 | cat $RUBY_VERSION_PATH 24 | fi 25 | } 26 | 27 | download_ruby() { 28 | cache=$1 29 | stack=$2 30 | version="ruby-${3}" 31 | 32 | mkdir -p $cache/rubies 33 | 34 | tarball_path=${cache}/rubies/${stack}-${version}.tgz 35 | if ! [ -f $tarball_path ]; then 36 | echo " Downloading and installing ${version} (${stack})..." 1>&2 37 | ruby_url="https://s3-external-1.amazonaws.com/heroku-buildpack-ruby/${stack}/${version}.tgz" 38 | curl -L -s $ruby_url > $tarball_path 39 | else 40 | echo " Installing previously downloaded ${version} (${stack})..." 1>&2 41 | fi 42 | echo $tarball_path 43 | } 44 | 45 | install_ruby() { 46 | tarball_path=$(download_ruby $1 'cedar-14' $DEVSTEP_RUBY_VERSION) 47 | 48 | mkdir -p $HOME/ruby 49 | # TODO: Verify if the tarball is a proper tarball, if not, try using the cedar ruby instead of cedar14, but raise a warning 50 | # document the behavior on docs 51 | tar xfz $tarball_path -C $HOME/ruby &>/dev/null || { 52 | rm -f $tarball_path 53 | echo "WARNING: ${DEVSTEP_RUBY_VERSION} is not available for Heroku's Cedar 14 stack, attempting to use the Cedar stack Ruby." 1>&2 54 | 55 | # TODO: Some debug logging 56 | #echo "Unable to install Ruby $DEVSTEP_RUBY_VERSION from Heroku's Cedar stack" 57 | #echo "Trying to use cedar stack" 58 | 59 | tarball_path=$(download_ruby $1 'cedar' $DEVSTEP_RUBY_VERSION) 60 | tar xfz $tarball_path -C $HOME/ruby &>/dev/null || { 61 | rm -f $tarball_path 62 | # TODO: Some debug logging 63 | #echo "Unable to download $DEVSTEP_RUBY_VERSION" 64 | #echo "Failing" 65 | return 1 66 | } 67 | } 68 | 69 | mkdir -p ${HOME}/.profile.d 70 | echo 'export PATH="$HOME/ruby/bin:$PATH"' > $HOME/.profile.d/ruby.sh 71 | 72 | return 0 73 | } 74 | 75 | requires_phantomjs() { 76 | return $(grep -q 'poltergeist' $1/Gemfile*) 77 | } 78 | 79 | install_phantomjs() { 80 | /opt/devstep/buildpacks/phantomjs/bin/compile $1 $2 81 | } 82 | 83 | # TODO: Look into travis.yml as well 84 | DEVSTEP_RUBY_VERSION=${DEVSTEP_RUBY_VERSION:-$(version_from_ruby_version)} 85 | DEVSTEP_RUBY_VERSION=${DEVSTEP_RUBY_VERSION:-$(version_from_gemfile)} 86 | if [ -z "$DEVSTEP_RUBY_VERSION" ]; then 87 | DEVSTEP_RUBY_VERSION='2.2.2' 88 | echo "Unable to identify the project ruby version, setting to ${DEVSTEP_RUBY_VERSION}" 89 | fi 90 | 91 | if [ -d ${HOME}/ruby ]; then 92 | echo 'Ruby already installed, skipping' 93 | else 94 | # Step away from project sources to avoid loading .ruby-version and .ruby-gemset files 95 | pushd / &>/dev/null 96 | install_ruby $2 || { 97 | echo 'Error installing ruby!' 98 | tail /tmp/build.log 99 | exit 1 100 | } 101 | popd &>/dev/null 102 | # Create this dir from here as we might want to bind mount ~/.gem/credentials 103 | # from the host 104 | sudo mkdir -p $HOME/.gem/specs 105 | sudo chown -R developer: $HOME/.gem 106 | cat <<-GEMRC > $HOME/.gemrc 107 | --- 108 | gem: --no-ri --no-rdoc 109 | GEMRC 110 | 111 | # Use system libraries when installing nokogiri 112 | mkdir -p "${HOME}/.bundle" 113 | cat <<-STR > $HOME/.bundle/config 114 | --- 115 | BUNDLE_BUILD__NOKOGIRI: "--use-system-libraries" 116 | STR 117 | fi 118 | 119 | source ${HOME}/.profile.d/ruby.sh 120 | 121 | DEVSTEP_BUNDLER_VERSION=${DEVSTEP_BUNDLER_VERSION:-'1.10.5'} 122 | 123 | if ! $(which bundle &> /dev/null); then 124 | tarball_path=$2/rubies/bundler-${DEVSTEP_BUNDLER_VERSION}.gem 125 | if ! [ -f $tarball_path ]; then 126 | echo "Downloading and installing bundler ${DEVSTEP_BUNDLER_VERSION}..." 127 | bundler_url="https://rubygems.org/downloads/bundler-${DEVSTEP_BUNDLER_VERSION}.gem" 128 | curl -L -s $bundler_url > $tarball_path 129 | else 130 | echo "Installing previously downloaded bundler ${DEVSTEP_BUNDLER_VERSION}..." 131 | fi 132 | gem install $tarball_path 133 | fi 134 | 135 | # If there's no Gemfile in place, our work here is done 136 | if ! [ -f $GEMFILE_PATH ]; then 137 | exit 0 138 | fi 139 | 140 | mkdir -p "$2/gem-cache" 141 | gem_cache="$(gem env gemdir)/cache" 142 | # Check if gem cache link is broken 143 | if [ -L $gem_cache ] && ! [ -e $gem_cache ]; then 144 | unlink $gem_cache 145 | fi 146 | if ! [ -L $gem_cache ]; then 147 | rm -rf $gem_cache 148 | ln -s "$2/gem-cache" $gem_cache 149 | fi 150 | 151 | echo "Installing gems using bundler '$(bundle -v | cut -d' ' -f 3)'..." 152 | (cd $1 && bundle install --jobs=4) 153 | 154 | if requires_phantomjs $1; then 155 | if [ -d ${HOME}/phantomjs ]; then 156 | echo '-----> PhantomJS already installed, skipping' 157 | else 158 | install_phantomjs $1 $2 159 | fi 160 | fi 161 | -------------------------------------------------------------------------------- /stack/buildpacks/ruby/bin/detect: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'pathname' 4 | 5 | if Pathname.new(ARGV.first).join("Gemfile").exist? 6 | puts "Ruby" 7 | exit 0 8 | else 9 | puts "no" 10 | exit 1 11 | end 12 | -------------------------------------------------------------------------------- /stack/load-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # some ls aliases 4 | alias ls='ls --color=auto' 5 | alias ll='ls -alFh' 6 | alias la='ls -A' 7 | alias l='ls -CF' 8 | 9 | # shortcut for reloading envs after a build 10 | alias reload-env="source /opt/devstep/load-env.sh" 11 | 12 | # shorten PS1 (useful for golang projects) 13 | export PROMPT_DIRTRIM="2" 14 | 15 | export PATH="${HOME}/bin:${DEVSTEP_BIN}:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 16 | 17 | # TODO: Review these paths 18 | profile_dirs="/etc ${HOME}" 19 | for dir in $profile_dirs; do 20 | if [ -d "${dir}/.profile.d" ]; then 21 | for i in "${dir}/.profile.d/"*.sh; do 22 | if [ -r $i ]; then 23 | . $i 24 | fi 25 | done 26 | unset i 27 | fi 28 | if [ -d "${dir}/profile.d" ]; then 29 | for i in "${dir}/profile.d/"*.sh; do 30 | if [ -r $i ]; then 31 | . $i 32 | fi 33 | done 34 | unset i 35 | fi 36 | done 37 | --------------------------------------------------------------------------------