├── .circleci
└── config.yml
├── .gitignore
├── README.md
├── common
└── provision.sh
├── current_architecture.png
├── dev-requirements.txt
├── get.py
├── image-builder
├── docker
│ ├── README.md
│ ├── packerfile.json
│ └── provision.sh
├── packman
│ ├── README.md
│ ├── packerfile.json
│ └── provision.sh
└── quickstart-vagrantbox
│ ├── README.md
│ ├── cloudify-hpcloud
│ └── Vagrantfile
│ ├── keys
│ └── insecure_private_key
│ ├── nightly-builder.py
│ ├── nightly.png
│ ├── packer_inputs.json
│ ├── packerfile.json
│ ├── provision
│ ├── Vagrantfile
│ ├── cleanup.sh
│ ├── common.sh
│ ├── influxdb_monkeypatch.sh
│ ├── install_ga.sh
│ └── prepare_nightly.sh
│ ├── settings.py
│ ├── templates
│ ├── box_Vagrantfile.template
│ ├── extlinux.conf.template
│ └── publish_Vagrantfile.template
│ └── userdata
│ └── add_vagrant_user.sh
├── offline-configuration
├── .pydistutils
├── bandersnatch.conf
├── nginx.conf
├── pip.conf
└── provision.sh
├── package-configuration
├── .gitignore
├── debian-agent
│ ├── debian-agent-disable-requiretty.sh
│ ├── debian-celeryd-cloudify.conf.template
│ └── debian-celeryd-cloudify.init.template
├── linux-cli
│ ├── get-cloudify.py
│ ├── test_cli_install.py
│ └── test_get_cloudify.py
├── manager
│ ├── conf
│ │ └── guni.conf.template
│ └── init
│ │ ├── amqpflux.conf.template
│ │ └── manager.conf.template
├── rabbitmq
│ └── init
│ │ └── rabbitmq-server.conf.template
├── riemann
│ └── init
│ │ └── riemann.conf.template
└── ubuntu-commercial-agent
│ ├── Ubuntu-agent-disable-requiretty.sh
│ ├── Ubuntu-celeryd-cloudify.conf.template
│ └── Ubuntu-celeryd-cloudify.init.template
├── package-scripts
└── .gitignore
├── package-templates
├── .gitignore
├── agent-centos-bootstrap.template
├── agent-debian-bootstrap.template
├── agent-ubuntu-bootstrap.template
├── agent-windows-bootstrap.template
├── cli-linux.template
├── manager-bootstrap.template
└── virtualenv-bootstrap.template
├── tox.ini
├── user_definitions-DEPRECATED.py
└── vagrant
├── .gitignore
├── agents
├── Vagrantfile
├── packager.yaml
├── provision.bat
├── provision.sh
├── runit.sh
└── windows
│ ├── packaging
│ ├── create_install_wizard.iss
│ └── source
│ │ ├── icons
│ │ └── Cloudify.ico
│ │ ├── license.txt
│ │ ├── pip
│ │ ├── get-pip.py
│ │ ├── pip-7.0.1-py2.py3-none-any.whl
│ │ └── setuptools-17.0-py2.py3-none-any.whl
│ │ ├── python
│ │ └── python.msi
│ │ └── virtualenv
│ │ └── virtualenv-13.0.1-py2.py3-none-any.whl
│ └── provision.sh
├── cli
├── Vagrantfile
├── provision.sh
└── windows
│ ├── packaging
│ ├── create_install_wizard.iss
│ ├── source
│ │ ├── icons
│ │ │ └── Cloudify.ico
│ │ ├── license.txt
│ │ ├── pip
│ │ │ ├── get-pip.py
│ │ │ ├── pip-6.1.1-py2.py3-none-any.whl
│ │ │ └── setuptools-15.2-py2.py3-none-any.whl
│ │ ├── python
│ │ │ └── python.msi
│ │ └── virtualenv
│ │ │ └── virtualenv-12.1.1-py2.py3-none-any.whl
│ └── update_wheel.py
│ └── provision.sh
└── docker_images
├── Vagrantfile
└── provision.sh
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | checkout:
4 | post:
5 | - >
6 | if [ -n "$CI_PULL_REQUEST" ]; then
7 | PR_ID=${CI_PULL_REQUEST##*/}
8 | git fetch origin +refs/pull/$PR_ID/merge:
9 | git checkout -qf FETCH_HEAD
10 | fi
11 |
12 | defaults:
13 | - &tox_defaults
14 | docker:
15 | - image: circleci/python:2.7
16 |
17 | steps:
18 | - checkout
19 | - run:
20 | name: Install tox
21 | command: sudo pip install tox
22 | - run:
23 | name: Run tox of specfic environment
24 | command: python -m tox -e $DO_ENV
25 |
26 | jobs:
27 | flake8:
28 | <<: *tox_defaults
29 | environment:
30 | DO_ENV: flake8
31 |
32 | test:
33 | <<: *tox_defaults
34 | environment:
35 | DO_ENV: py27
36 |
37 | workflows:
38 | version: 2
39 |
40 | build_and_test:
41 | jobs:
42 | - flake8
43 | - test
44 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | *.pyc
3 | *.iml
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Packages
9 | *.egg
10 | *.egg-info
11 | *.deb
12 | *.iml
13 | .idea
14 | dist
15 | build
16 | eggs
17 | parts
18 | bin
19 | var
20 | sdist
21 | develop-eggs
22 | .installed.cfg
23 | lib
24 | lib64
25 | __pycache__
26 |
27 | # Installer logs
28 | pip-log.txt
29 |
30 | # Unit test / coverage reports
31 | .coverage
32 | .tox
33 | nosetests.xml
34 |
35 | # Translations
36 | *.mo
37 |
38 | # Mr Developer
39 | .mr.developer.cfg
40 | .project
41 | .pydevproject
42 |
43 | .vagrant/
44 | *COMMIT_MSG
45 |
46 | packager.log
47 |
48 | # QuickBuild
49 | .qbcache/
50 |
51 | # OS X
52 | .DS_Store
53 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Cloudify-Packager
2 | =================
3 |
4 | * Master [](https://circleci.com/gh/cloudify-cosmo/cloudify-packager/tree/master)
5 |
6 | Cloudify's packager provides tools and configuration objects we use to build Cloudify's Management environments, agents and demo images.
7 |
8 | ### [Docker Images](http://www.docker.com)
9 |
10 | Please see [Bootstrapping using Docker](http://getcloudify.org/guide/3.1/installation-bootstrapping.html#bootstrapping-using-docker) for information on our transition from packages to container-based installations.
11 |
12 | To generate our [Dockerfile](https://github.com/cloudify-cosmo/cloudify-packager/raw/master/docker/Dockerfile.template) templates, we're using [Jocker](https://github.com/nir0s/jocker).
13 |
14 | ### Generate a custom Cloudify manager image
15 |
16 | * Clone the cloudify-packager repository from github:
17 | `git clone https://github.com/cloudify-cosmo/cloudify-packager.git`
18 |
19 | * Make your changes in [var.py](https://github.com/cloudify-cosmo/cloudify-packager/blob/master/docker/vars.py)
20 |
21 | - For example:
22 |
23 | - Use a specific branch of Cloudify related [modules](https://github.com/cloudify-cosmo/cloudify-packager/blob/master/docker/vars.py#L123).
24 | For example, replace the `master` branch with `my-branch` in `cloudify_rest_client` module:
25 | `"cloudify_rest_client": "git+git://github.com/cloudify-cosmo/cloudify-rest-client.git@my-branch"`
26 | - Add system packages to be installed on the image.
27 | For example, add the package "my-package" to the manager's requirements list (the [reqs](https://github.com/cloudify-cosmo/cloudify-packager/blob/master/docker/vars.py#L119) list):
28 |
29 | ```
30 | "manager": {
31 | "service_name": "manager",
32 | "reqs": [
33 | "git",
34 | "python2.7",
35 | "my-package"
36 | ],
37 | ...
38 | }
39 | ```
40 |
41 | * Run the [build.sh](https://github.com/cloudify-cosmo/cloudify-packager/blob/master/docker/build.sh)
42 | script from the [docker folder](https://github.com/cloudify-cosmo/cloudify-packager/tree/master/docker):
43 | ```
44 | cd cloudify-packager/docker/
45 | . build.sh
46 | ```
47 | - Create a tar file from the generated image:
48 | {% highlight bash %}
49 | sudo docker run -t --name=cloudifycommercial -d cloudify-commercial:latest /bin/bash
50 | sudo docker export cloudifycommercial > /tmp/cloudify-docker_commercial.tar
51 | {% endhighlight %}
52 |
53 | - Create a url from which you can download the tar file.
54 |
55 | * Set the `docker_url` property in your manager blueprint (see `cloudify_packages` property in [CloudifyManager Type](http://getcloudify.org/guide/3.2/reference-types.html#cloudifymanager-type) with your custom image url, e.g:
56 | ```
57 | cloudify_packages:
58 | ...
59 | docker:
60 | docker_url: {url to download the custom Cloudify manager image tar file}
61 | ```
62 |
63 | * Run cfy [bootstrap](http://getcloudify.org/guide/3.1/installation-bootstrapping.html) using your manager blueprint.
64 |
65 |
66 | ### [packman](http://packman.readthedocs.org) configuration
67 |
68 | Package based provisioning will be deprecated in Cloudify 3.2!
69 |
70 | Packman is used to generate Cloudify's packages.
71 | This repository contains packman's configuration for creating the packages.
72 |
73 | #### package-configuration
74 |
75 | The package-configuration folder contains the init scripts and configuration files for Cloudify's management environment components.
76 |
77 | #### package-templates
78 |
79 | The package-templates folder contains the bootstrap scripts that are used to install Cloudify's management environment.
80 |
81 | #### packages.py
82 |
83 | The packages.py file is the base packman configuration file containing the configuration of the entire stack (including agents).
84 |
85 | ### [Vagrant](http://www.vagrantup.com)
86 |
87 | Cloudify's packages are created using vagrant VM's (currently on AWS).
88 |
89 | The Vagrant folder contains vagrant configuration for different components that are generated using packman:
90 |
91 | - A Vagrant VM is initialized.
92 | - Packman is installed on the machine alongside its requirements.
93 | - If a virtualenv is required, it is created and the relevant modules are installed in it.
94 | - Packman is used to create the environment into which the components are retrieved.
95 | - Packman is used to create the package.
96 |
97 | NOTE: the Windows Agent Vagrantfile uses a premade image already containing the basic requirements for creating the Windows agent.
98 |
99 | #### image-builder
100 |
101 | Creates a Vagrant box (using Virtualbox, AWS orw HPCloud) with the Cloudify Manager installed on it.
102 |
--------------------------------------------------------------------------------
/common/provision.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function print_params() {
4 |
5 | echo "## print common parameters"
6 |
7 | arr[0]="VERSION=$VERSION"
8 | arr[1]="PRERELEASE=$PRERELEASE"
9 | arr[2]="CORE_BRANCH=$CORE_BRANCH"
10 | arr[2]="CORE_TAG_NAME=$CORE_TAG_NAME"
11 | echo ${arr[@]}
12 | }
13 |
14 | function install_common_prereqs () {
15 |
16 | echo "## install common prerequisites"
17 | if which yum >> /dev/null; then
18 | sudo yum -y install openssl curl
19 | SUDO="sudo"
20 | # Setting this for Centos only, as it seems to break otherwise on 6.5
21 | CURL_OPTIONS="-1"
22 | elif which apt-get >> /dev/null; then
23 | sudo apt-get update &&
24 | sudo apt-get -y install openssl libssl-dev
25 | SUDO="sudo"
26 | if [ "`lsb_release -r -s`" == "16.04" ];then
27 | sudo apt-get -y install python
28 | fi
29 | elif [[ "$OSTYPE" == "darwin"* ]]; then
30 | echo "Installing on OSX"
31 | else
32 | echo 'Probably windows machine'
33 | fi
34 |
35 | curl $CURL_OPTIONS "https://bootstrap.pypa.io/2.6/get-pip.py" -o "get-pip.py" &&
36 | $SUDO python get-pip.py pip==9.0.1 &&
37 | $SUDO pip install wheel==0.29.0 &&
38 | $SUDO pip install setuptools==36.8.0 &&
39 | $SUDO pip install awscli &&
40 | echo "## end of installing common prerequisites"
41 |
42 | }
43 |
44 | function create_md5() {
45 |
46 | local file_ext=$1
47 | echo "## create md5"
48 | if [[ "$OSTYPE" == "darwin"* ]]; then
49 | md5cmd="md5 -r"
50 | else
51 | md5cmd="md5sum -t"
52 | fi
53 | md5sum=$($md5cmd *.$file_ext) &&
54 | echo $md5sum | $SUDO tee ${md5sum##* }.md5
55 | }
56 |
57 | function upload_to_s3() {
58 |
59 | local file_ext=$1
60 | file=$(basename $(find . -type f -name "*.$file_ext"))
61 |
62 | echo "## uploading https://$AWS_S3_BUCKET.s3.amazonaws.com/$AWS_S3_PATH/$file"
63 | export AWS_SECRET_ACCESS_KEY=${AWS_ACCESS_KEY} &&
64 | export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} &&
65 | awscli="aws"
66 | if [[ "$OSTYPE" == "cygwin" ]]; then
67 | awscli="python `cygpath -w $(which aws)`"
68 | fi
69 | echo "$awscli s3 cp --acl public-read $file s3://$AWS_S3_BUCKET/$AWS_S3_PATH/"
70 | $awscli s3 cp --acl public-read $file s3://$AWS_S3_BUCKET/$AWS_S3_PATH/ &&
71 | echo "## successfully uploaded $file"
72 |
73 | }
74 |
75 | print_params
76 |
--------------------------------------------------------------------------------
/current_architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/current_architecture.png
--------------------------------------------------------------------------------
/dev-requirements.txt:
--------------------------------------------------------------------------------
1 | coverage==3.7.1
2 | nose
3 | nose-cov
4 | testfixtures
5 | testtools
6 | mock
7 | virtualenv
--------------------------------------------------------------------------------
/get.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | ########
3 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
4 | #
5 | # Licensed under the Apache License, Version 2.0 (the "License");
6 | # you may not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing, software
12 | # distributed under the License is distributed on an "AS IS" BASIS,
13 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 | # * See the License for the specific language governing permissions and
15 | # * limitations under the License.
16 |
17 | from packman import logger
18 | from packman.packman import get_package_config as get_conf
19 | from packman import utils
20 | from packman import python
21 | from packman import retrieve
22 |
23 | lgr = logger.init()
24 |
25 |
26 | def _prepare(package):
27 |
28 | common = utils.Handler()
29 | common.rmdir(package['sources_path'])
30 | common.mkdir('{0}/archives'.format(package['sources_path']))
31 | common.mkdir(package['package_path'])
32 |
33 |
34 | def create_agent(package, download=False):
35 | dl_handler = retrieve.Handler()
36 | common = utils.Handler()
37 | py_handler = python.Handler()
38 | _prepare(package)
39 | py_handler.make_venv(package['sources_path'])
40 | if download:
41 | tar_file = '{0}/{1}.tar.gz'.format(
42 | package['sources_path'], package['name'])
43 | for url in package['source_urls']:
44 | dl_handler.download(url, file=tar_file)
45 | common.untar(package['sources_path'], tar_file)
46 | for module in package['modules']:
47 | py_handler.pip(module, package['sources_path'])
48 |
49 |
50 | def get_ubuntu_precise_agent(download=False):
51 | package = get_conf('Ubuntu-precise-agent')
52 | create_agent(package, download)
53 |
54 |
55 | def get_ubuntu_trusty_agent(download=False):
56 | package = get_conf('Ubuntu-trusty-agent')
57 | create_agent(package, download)
58 |
59 |
60 | def get_centos_final_agent(download=False):
61 | package = get_conf('centos-Final-agent')
62 | create_agent(package, download)
63 |
64 |
65 | def get_debian_jessie_agent(download=False):
66 | package = get_conf('debian-jessie-agent')
67 | create_agent(package, download)
68 |
69 |
70 | def get_celery(download=False):
71 | package = get_conf('celery')
72 |
73 | dl_handler = retrieve.Handler()
74 | common = utils.Handler()
75 | py_handler = python.Handler()
76 | _prepare(package)
77 | py_handler.make_venv(package['sources_path'])
78 | tar_file = '{0}/{1}.tar.gz'.format(
79 | package['sources_path'], package['name'])
80 | for url in package['source_urls']:
81 | dl_handler.download(url, file=tar_file)
82 | common.untar(package['sources_path'], tar_file)
83 | if download:
84 | for module in package['modules']:
85 | py_handler.pip(module, package['sources_path'])
86 |
87 |
88 | def get_manager(download=False):
89 | package = get_conf('manager')
90 |
91 | dl_handler = retrieve.Handler()
92 | common = utils.Handler()
93 | py_handler = python.Handler()
94 | _prepare(package)
95 | py_handler.make_venv(package['sources_path'])
96 | tar_file = '{0}/{1}.tar.gz'.format(
97 | package['sources_path'], package['name'])
98 | for url in package['source_urls']:
99 | dl_handler.download(url, file=tar_file)
100 | common.untar(package['sources_path'], tar_file)
101 |
102 | common.mkdir(package['file_server_dir'])
103 | common.cp(package['resources_path'], package['file_server_dir'])
104 | if download:
105 | for module in package['modules']:
106 | py_handler.pip(module, package['sources_path'])
107 |
108 |
109 | def main():
110 |
111 | lgr.debug('VALIDATED!')
112 |
113 |
114 | if __name__ == '__main__':
115 | main()
116 |
--------------------------------------------------------------------------------
/image-builder/docker/README.md:
--------------------------------------------------------------------------------
1 | # Docker Image Builder
2 |
3 | This allows generating a Docker image using Packer to be used in our build processes.
4 | It is meant to be generated manually per request.
5 |
6 | Currently, the image will be provisioned with the following:
7 |
8 | * Docker (version not hardcoded)
9 | * docker-compose (Docker's API will be exposed for docker-compose to work)
10 | * boto
--------------------------------------------------------------------------------
/image-builder/docker/packerfile.json:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
4 | "aws_secret_key": "{{env `AWS_ACCESS_KEY`}}",
5 | "aws_ubuntu_trusty_source_ami": "ami-f0b11187",
6 | "instance_type": "m3.medium",
7 | "region": "eu-west-1"
8 | },
9 | "builders": [
10 | {
11 | "name": "ubuntu_trusty_docker",
12 | "type": "amazon-ebs",
13 | "access_key": "{{user `aws_access_key`}}",
14 | "secret_key": "{{user `aws_secret_key`}}",
15 | "region": "{{user `region`}}",
16 | "source_ami": "{{user `aws_ubuntu_trusty_source_ami`}}",
17 | "instance_type": "{{user `instance_type`}}",
18 | "ssh_username": "ubuntu",
19 | "ami_name": "ubuntu-trusty-docker {{timestamp}}",
20 | "run_tags": {
21 | "Name": "ubuntu trusty docker image generator"
22 | }
23 | }
24 | ],
25 | "provisioners": [
26 | {
27 | "type": "shell",
28 | "script": "provision.sh"
29 | }
30 | ]
31 | }
32 |
--------------------------------------------------------------------------------
/image-builder/docker/provision.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash -e
2 |
3 | function install_docker
4 | {
5 | echo Installing Docker
6 | curl -sSL https://get.docker.com/ubuntu/ | sudo sh
7 | }
8 |
9 | function install_docker_compose
10 | {
11 | echo Installing docker-compose
12 | # docker-compose requires requests in version 2.2.1. will probably change.
13 | sudo pip install requests==2.2.1 --upgrade
14 | sudo pip install docker-compose==1.1.0
15 |
16 | echo Exposing docker api
17 | sudo /bin/sh -c 'echo DOCKER_OPTS=\"-H tcp://127.0.0.1:4243 -H unix:///var/run/docker.sock\" >> /etc/default/docker'
18 | sudo restart docker
19 | export DOCKER_HOST=tcp://localhost:4243
20 | }
21 |
22 | function install_pip
23 | {
24 | echo Installing pip
25 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python
26 | }
27 |
28 | function install_boto
29 | {
30 | echo Installing boto
31 | sudo pip install boto==2.36.0
32 |
33 | }
34 |
35 | install_docker
36 | install_pip
37 | install_boto
38 | install_docker_compose
39 |
--------------------------------------------------------------------------------
/image-builder/packman/README.md:
--------------------------------------------------------------------------------
1 | # Packman Image Builder
2 |
3 | This allows generating a packman image using Packer to be used in our build processes.
4 | It is meant to be generated manually per request.
5 |
6 | We use packman to generate our Agent and CLI packages.
7 |
8 | The provisioning script supports Debian, Ubuntu and CentOS (and potentially, RHEL) based images.
9 | Currently, the image will be provisioned with the following:
10 |
11 | * Build prerequisites such as gcc, g++, python-dev, etc..
12 | * git
13 | * fpm (and consequently, Ruby 1.9.3), for packman to generate packages
14 | * Virtualenv and boto
15 | * packman (hardcoded version. should be upgraded if needed)
16 |
17 | The supplied packerfile currently generates images for:
18 |
19 | * debian jessie
20 | * Ubuntu precise
21 | * Ubuntu trusty
22 | * Centos 6.4
23 |
24 | Note that all images are ebs backed - a prerequisite of baking them using Packer.
--------------------------------------------------------------------------------
/image-builder/packman/packerfile.json:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
4 | "aws_secret_key": "{{env `AWS_ACCESS_KEY`}}",
5 | "aws_debian_jessie_source_ami": "ami-f9a9238e",
6 | "aws_ubuntu_trusty_source_ami": "ami-f0b11187",
7 | "aws_ubuntu_precise_source_ami": "ami-00d12677",
8 | "aws_ubuntu_precise_official_ami": "ami-73cb5a04",
9 | "aws_centos_64_source_ami": "ami-3b39ee4c",
10 | "instance_type": "m3.large",
11 | "region": "eu-west-1"
12 | },
13 | "builders": [
14 | {
15 | "name": "ubuntu_precise_packman",
16 | "type": "amazon-ebs",
17 | "access_key": "{{user `aws_access_key`}}",
18 | "secret_key": "{{user `aws_secret_key`}}",
19 | "region": "{{user `region`}}",
20 | "source_ami": "{{user `aws_ubuntu_precise_source_ami`}}",
21 | "instance_type": "{{user `instance_type`}}",
22 | "ssh_username": "ubuntu",
23 | "ami_name": "ubuntu-precise-packman {{timestamp}}",
24 | "run_tags": {
25 | "Name": "ubuntu precise packman image generator"
26 | }
27 | },
28 | {
29 | "name": "ubuntu_trusty_packman",
30 | "type": "amazon-ebs",
31 | "access_key": "{{user `aws_access_key`}}",
32 | "secret_key": "{{user `aws_secret_key`}}",
33 | "region": "{{user `region`}}",
34 | "source_ami": "{{user `aws_ubuntu_trusty_source_ami`}}",
35 | "instance_type": "{{user `instance_type`}}",
36 | "ssh_username": "ubuntu",
37 | "ami_name": "ubuntu-trusty-packman {{timestamp}}",
38 | "run_tags": {
39 | "Name": "ubuntu trusty packman image generator"
40 | }
41 | },
42 | {
43 | "name": "debian_jessie_packman",
44 | "type": "amazon-ebs",
45 | "access_key": "{{user `aws_access_key`}}",
46 | "secret_key": "{{user `aws_secret_key`}}",
47 | "region": "{{user `region`}}",
48 | "source_ami": "{{user `aws_debian_jessie_source_ami`}}",
49 | "instance_type": "{{user `instance_type`}}",
50 | "ssh_username": "admin",
51 | "ami_name": "debian-jessie-packman {{timestamp}}",
52 | "run_tags": {
53 | "Name": "debian jessie packman image generator"
54 | }
55 | },
56 | {
57 | "name": "centos_64_packman",
58 | "type": "amazon-ebs",
59 | "access_key": "{{user `aws_access_key`}}",
60 | "secret_key": "{{user `aws_secret_key`}}",
61 | "region": "{{user `region`}}",
62 | "source_ami": "{{user `aws_centos_64_source_ami`}}",
63 | "instance_type": "{{user `instance_type`}}",
64 | "ssh_username": "root",
65 | "ssh_private_key_file": "/home/nir0s/.ssh/aws/vagrant_centos_build.pem",
66 | "ami_name": "centos-64-packman {{timestamp}}",
67 | "run_tags": {
68 | "Name": "centos 64 packman image generator"
69 | }
70 | }
71 | ],
72 | "provisioners": [
73 | {
74 | "type": "shell",
75 | "script": "provision.sh"
76 | }
77 | ]
78 | }
79 |
--------------------------------------------------------------------------------
/image-builder/packman/provision.sh:
--------------------------------------------------------------------------------
1 | function install_prereqs
2 | {
3 | # some of these are not related to the image specifically but rather to processes that will be executed later
4 | # such as git, python-dev and make
5 | if which apt-get; then
6 | # ubuntu
7 | sudo apt-get -y update &&
8 | # the commented below might not be required. If the latest version of git is required, we'll have to enable them
9 | # precise - python-software-properties
10 | # trusty - software-properties-common
11 | # sudo apt-get install -y software-properties-common
12 | # sudo apt-get install -y python-software-properties
13 | # sudo add-apt-repository -y ppa:git-core/ppa &&
14 | sudo apt-get install -y curl python-dev git make gcc libyaml-dev zlib1g-dev g++
15 | elif which yum; then
16 | # centos/REHL
17 | sudo yum -y update &&
18 | sudo yum install -y yum-downloadonly wget mlocate yum-utils &&
19 | sudo yum install -y python-devel libyaml-devel ruby rubygems ruby-devel make gcc git g++
20 | # this is required to build pyzmq under centos/RHEL
21 | sudo yum install -y zeromq-devel -c http://download.opensuse.org/repositories/home:/fengshuo:/zeromq/CentOS_CentOS-6/home:fengshuo:zeromq.repo
22 | else
23 | echo 'unsupported package manager, exiting'
24 | exit 1
25 | fi
26 | }
27 |
28 | function install_ruby
29 | {
30 | wget https://ftp.ruby-lang.org/pub/ruby/ruby-1.9.3-rc1.tar.gz --no-check-certificate
31 | # alt since ruby-lang's ftp blows
32 | # wget http://mirrors.ibiblio.org/ruby/1.9/ruby-1.9.3-rc1.tar.gz --no-check-certificate
33 | tar -xzvf ruby-1.9.3-rc1.tar.gz
34 | cd ruby-1.9.3-rc1
35 | ./configure --disable-install-doc
36 | make
37 | sudo make install
38 | cd ~
39 | }
40 |
41 | function install_fpm
42 | {
43 | sudo gem install fpm --no-ri --no-rdoc
44 | # if we want to downlod gems as a part of the packman run, this should be enabled
45 | # echo -e 'gem: --no-ri --no-rdoc\ninstall: --no-rdoc --no-ri\nupdate: --no-rdoc --no-ri' >> ~/.gemrc
46 | }
47 |
48 | function install_pip
49 | {
50 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python
51 | }
52 |
53 | install_prereqs &&
54 | if ! which ruby; then
55 | install_ruby
56 | fi
57 | install_fpm &&
58 | install_pip &&
59 | sudo pip install "packman==0.5.0" &&
60 | sudo pip install "virtualenv==12.0.7" &&
61 | sudo pip install "boto==2.36.0"
62 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/README.md:
--------------------------------------------------------------------------------
1 | # Image Builder
2 | This directory contains configuration and script files that are intended to be used for creating Vagrant box with working Cloudify Manager to number of Vagrant providers.
3 | Supported scenarios:
4 |
5 | 1. Create Vagrant box locally by using Virtualbox (for Virtualbox Vagrant provider)
6 | 1. Create Vagrant box remotly by using AWS (for Virtualbox Vagrant provider)
7 | 1. Create Vagrant box remotly by using AWS (for AWS Vagrant provider)
8 | 1. Create Vagrant box locally by using HPCloud (for HPCloud Vagrant provider)
9 |
10 | # Directory Structure
11 | * `userdata` - contains userdata scripts for AWS machines
12 | * `templates` - templates files used for building
13 | * `provision` - provisioning scripts used by Packer & Vagrant
14 | * common.sh - Main provisioning script (installing manager). Includes all the GIT repos (SHA)
15 | * cleanup.sh - Post install for provisioning with AWS
16 | * prepare_nightly.sh - All the needed changes to make Cloud image into Virtualbox image
17 | * `keys` - insecure keys for Vagrant
18 | * `cloudify-hpcloud` - Vagrant box creator for hpcloud
19 |
20 | # How to use this
21 | ## Pre Requirements
22 |
23 | 1. Python2.7 (for scenerio 2):
24 | * [Fabric](http://www.fabfile.org/)
25 | * [Boto](http://docs.pythonboto.org/en/latest/)
26 | 1. [Packer](https://www.packer.io/)
27 | 1. [Virtualbox](https://www.virtualbox.org/) (for scenerio 1 & 4 only)
28 | 1. [Vagrant](https://www.vagrantup.com/) (for scenerio 4 only):
29 | * [HPCloud Vagrant plugin](https://github.com/mohitsethi/vagrant-hp)
30 |
31 | ## Configuration files
32 | ### settings.py (scenerio 2 only)
33 | This file contains settings used by `nightly-builder.py` script. You'll need to configure it in case you want to build nightly Virtualbox image on AWS.
34 | * `region` - The region where `nightly-builder` will launch its worker instance. This should be the same region as in Packer config.
35 | * `username` - The username to use when connecting to worker instance. This depands on what instance you use. Usually `ubuntu` for Ubuntu AMIs.
36 | * `aws_s3_bucket` - S3 bucket name where nightlies should be uploaded to.
37 | * `aws_iam_group` - IAM group for worker instance (see below).
38 | * `factory_ami` - Base AMI for worker instance.
39 | * `instance_type` - Worker instance type (m3.medium, m3.large,...). Note that not all AMIs support all instance types.
40 | * `packer_var_file` - Packer var file path. This is the `packer_inputs.json` file which used by Packer.
41 |
42 | ### packer_inputs.json
43 | This is input file for Packer.
44 | * `cloudify_release` - Release version number.
45 | * `aws_source_ami` - Base AWS AMI.
46 | * `components_package_url` - Components package url
47 | * `core_package_url` - Core package url
48 | * `ui_package_url` - UI package url
49 | * `ubuntu_agent_url` - Ubuntu package url
50 | * `centos_agent_url` - Centos agent url
51 | * `windows_agent_url` - Windows agent url
52 |
53 | ### packerfile.json
54 | Packer template file. It contains number of user variables defined in the top of the file (`variables` section). `packer_inputs.json` is the inputs file for these variables. Note that some variables are not passed via that file:
55 | * `aws_access_key` - AWS key ID, taken from environment variable `AWS_ACCESS_KEY_ID`
56 | * `aws_secret_key` - AWS Secret key, taken from environment variable `AWS_ACCESS_KEY`
57 | * `instance_type` - Instance type for the provisioning machine
58 | * `virtualbox_source_image` - Source image (ovf) for when building local virtualbox image with Packer (without AWS)
59 | * `insecure_private_key` - Path of Vagrant's default insecure private key
60 |
61 | ## AWS requirements
62 | * Valid credentials - Your user must be able to launch/terminate instances, add/remove security groups and private keys
63 | * AMI base image - This was tested with Ubuntu base image
64 | * S3 bucket - S3 bucket where final images will be stored
65 | * IAM role - An IAM group must be created for the worker instances with sufficient rights to upload into the S3 bucket.
66 |
67 | ### IAM role
68 | Example for role policy:
69 | ```json
70 | {
71 | "Version": "2012-10-17",
72 | "Statement": [
73 | {
74 | "Effect": "Allow",
75 | "Action": [
76 | "s3:List*",
77 | "s3:Put*"
78 | ],
79 | "Resource": [
80 | "arn:aws:s3:::name-of-s3-bucket-here",
81 | "arn:aws:s3:::name-of-s3-bucket-here/*"
82 | ]
83 | }
84 | ]
85 | }
86 | ```
87 |
88 | ## Running
89 |
90 | ### Create Vagrant box locally by using Virtualbox
91 | ```shell
92 | packer build \
93 | -only=virtualbox
94 | -var-file=packer_inputs.json
95 | packerfile.json
96 | ```
97 |
98 | ### Create Vagrant box remotly by using AWS (for Virtualbox provider)
99 | ```shell
100 | python nightly-builder.py
101 | ````
102 |
103 | ### Create Vagrant box remotly by using AWS (for AWS provider)
104 | ```shell
105 | packer build \
106 | -only=amazon
107 | -var-file=packer_inputs.json
108 | packerfile.json
109 | ```
110 |
111 | ### Create Vagrant box locally by using HPCloud
112 | ```shell
113 | cd cloudify-hpcloud
114 | vagrant up --provider hp
115 | ```
116 | ## How nightly image is built
117 | The nightly image process is more complecated from the rest. This is because we use AWS as the platform to build our images on. The following diagram explains the process:
118 |
119 |
120 | 
121 |
122 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/cloudify-hpcloud/Vagrantfile:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | INSTALL_FROM_PYPI = ENV['INSTALL_FROM_PYPI']
20 |
21 | HP_ACCESS_KEY = ENV['HP_ACCESS_KEY']
22 | HP_SECRET_KEY = ENV['HP_SECRET_KEY']
23 | HP_TENANT_ID = ENV['HP_TENANT_ID']
24 | HP_KEYPAIR_NAME = ENV['HP_KEYPAIR_NAME']
25 | HP_PRIVATE_KEY_PATH = ENV['HP_PRIVATE_KEY_PATH']
26 | HP_SERVER_IMAGE = ENV['HP_SERVER_IMAGE'] || "Ubuntu Server 12.04.5 LTS (amd64 20140927) - Partner Image"
27 | HP_SSH_USERNAME = ENV['HP_SSH_USERNAME'] || "ubuntu"
28 | HP_AVAILABILITY_ZONE = ENV['HP_AVAILABILITY_ZONE'] || "us-east"
29 |
30 | SERVER_NAME = INSTALL_FROM_PYPI==="true"? "install_cloudify_3_stable" : "install_cloudify_3_latest"
31 |
32 | Vagrant.configure('2') do |config|
33 | cloudify.vm.provider :hp do |hp, override|
34 | unless Vagrant.has_plugin?("vagrant-hp")
35 | raise 'vagrant-hp plugin not installed!'
36 | end
37 | override.vm.box = "dummy_hp"
38 | override.vm.box_url = "https://github.com/mohitsethi/vagrant-hp/raw/master/dummy_hp.box"
39 |
40 | hp.access_key = "#{HP_ACCESS_KEY}"
41 | hp.secret_key = "#{HP_SECRET_KEY}"
42 | hp.flavor = "standard.medium"
43 | hp.tenant_id = "#{HP_TENANT_ID}"
44 | hp.server_name = "#{SERVER_NAME}"
45 | hp.image = "#{HP_SERVER_IMAGE}"
46 | hp.keypair_name = "#{HP_KEYPAIR_NAME}"
47 | hp.ssh_private_key_path = "#{HP_PRIVATE_KEY_PATH}"
48 | hp.ssh_username = "#{HP_SSH_USERNAME}"
49 | hp.availability_zone = "#{HP_AVAILABILITY_ZONE}"
50 | end
51 |
52 | config.vm.provision "shell" do |sh|
53 | sh.path = "../provision/common.sh"
54 | sh.args = "#{INSTALL_FROM_PYPI}"
55 | sh.privileged = false
56 | end
57 | end
58 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/keys/insecure_private_key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI
3 | w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP
4 | kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2
5 | hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO
6 | Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW
7 | yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd
8 | ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1
9 | Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf
10 | TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK
11 | iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A
12 | sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf
13 | 4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP
14 | cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk
15 | EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN
16 | CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX
17 | 3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG
18 | YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj
19 | 3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+
20 | dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz
21 | 6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC
22 | P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF
23 | llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ
24 | kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH
25 | +vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ
26 | NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s=
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/nightly-builder.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import os
3 | import re
4 | import string
5 | import random
6 | from time import sleep, strftime
7 | from string import Template
8 | from tempfile import gettempdir
9 | from StringIO import StringIO
10 | from subprocess import Popen, PIPE
11 |
12 | import boto.ec2
13 | from boto.ec2 import blockdevicemapping as bdm
14 | from fabric.api import env, run, sudo, execute, put
15 |
16 | from settings import settings
17 |
18 | RESOURCES = []
19 |
20 |
21 | def main():
22 | print('Starting nightly build: {}'.format(strftime("%Y-%m-%d %H:%M:%S")))
23 | print('Opening connection..')
24 | access_key = os.environ.get('AWS_ACCESS_KEY_ID')
25 | secret_key = os.environ.get('AWS_ACCESS_KEY')
26 | conn = boto.ec2.connect_to_region(settings['region'],
27 | aws_access_key_id=access_key,
28 | aws_secret_access_key=secret_key)
29 | RESOURCES.append(conn)
30 |
31 | print('Running Packer..')
32 | baked_ami_id = run_packer()
33 | baked_ami = conn.get_image(baked_ami_id)
34 | RESOURCES.append(baked_ami)
35 |
36 | baked_snap = baked_ami.block_device_mapping['/dev/sda1'].snapshot_id
37 |
38 | print('Launching worker machine..')
39 | mapping = bdm.BlockDeviceMapping()
40 | mapping['/dev/sda1'] = bdm.BlockDeviceType(size=10,
41 | volume_type='gp2',
42 | delete_on_termination=True)
43 | mapping['/dev/sdf'] = bdm.BlockDeviceType(snapshot_id=baked_snap,
44 | volume_type='gp2',
45 | delete_on_termination=True)
46 |
47 | kp_name = random_generator()
48 | kp = conn.create_key_pair(kp_name)
49 | kp.save(gettempdir())
50 | print('Keypair created: {}'.format(kp_name))
51 |
52 | sg_name = random_generator()
53 | sg = conn.create_security_group(sg_name, 'vagrant nightly')
54 | sg.authorize(ip_protocol='tcp',
55 | from_port=22,
56 | to_port=22,
57 | cidr_ip='0.0.0.0/0')
58 | print('Security Group created: {}'.format(sg_name))
59 |
60 | reserv = conn.run_instances(
61 | image_id=settings['factory_ami'],
62 | key_name=kp_name,
63 | instance_type=settings['instance_type'],
64 | security_groups=[sg],
65 | block_device_map=mapping,
66 | instance_profile_name=settings['aws_iam_group'])
67 |
68 | factory_instance = reserv.instances[0]
69 | RESOURCES.append(factory_instance)
70 | RESOURCES.append(kp)
71 | RESOURCES.append(sg)
72 |
73 | env.key_filename = os.path.join(gettempdir(), '{}.pem'.format(kp_name))
74 | env.timeout = 10
75 | env.connection_attempts = 12
76 |
77 | while factory_instance.state != 'running':
78 | sleep(5)
79 | factory_instance.update()
80 | print('machine state: {}'.format(factory_instance.state))
81 |
82 | print('Executing script..')
83 | execute(do_work, host='{}@{}'.format(settings['username'],
84 | factory_instance.ip_address))
85 |
86 |
87 | def random_generator(size=8, chars=string.ascii_uppercase + string.digits):
88 | return ''.join(random.choice(chars) for _ in range(size))
89 |
90 |
91 | def run_packer():
92 | packer_cmd = 'packer build ' \
93 | '-machine-readable ' \
94 | '-only=nightly_virtualbox_build ' \
95 | '-var-file={} ' \
96 | 'packerfile.json'.format(settings['packer_var_file'])
97 | p = Popen(packer_cmd.split(), stdout=PIPE, stderr=PIPE)
98 |
99 | packer_output = ''
100 | while True:
101 | line = p.stdout.readline()
102 | if line == '':
103 | break
104 | else:
105 | print(line, end="")
106 | if re.match('^.+artifact.+id', line):
107 | packer_output = line
108 | return packer_output.split(':')[-1].rstrip()
109 |
110 |
111 | def do_work():
112 | sudo('apt-get update')
113 | sudo('apt-get install -y virtualbox kpartx extlinux qemu-utils python-pip')
114 | sudo('pip install awscli')
115 |
116 | sudo('mkdir -p /mnt/image')
117 | sudo('mount /dev/xvdf1 /mnt/image')
118 |
119 | run('dd if=/dev/zero of=image.raw bs=1M count=8192')
120 | sudo('losetup --find --show image.raw')
121 | sudo('parted -s -a optimal /dev/loop0 mklabel msdos'
122 | ' -- mkpart primary ext4 1 -1')
123 | sudo('parted -s /dev/loop0 set 1 boot on')
124 | sudo('kpartx -av /dev/loop0')
125 | sudo('mkfs.ext4 /dev/mapper/loop0p1')
126 | sudo('mkdir -p /mnt/raw')
127 | sudo('mount /dev/mapper/loop0p1 /mnt/raw')
128 |
129 | sudo('cp -a /mnt/image/* /mnt/raw')
130 |
131 | sudo('extlinux --install /mnt/raw/boot')
132 | sudo('dd if=/usr/lib/syslinux/mbr.bin conv=notrunc bs=440 count=1 '
133 | 'of=/dev/loop0')
134 | sudo('echo -e "DEFAULT cloudify\n'
135 | 'LABEL cloudify\n'
136 | 'LINUX /vmlinuz\n'
137 | 'APPEND root=/dev/disk/by-uuid/'
138 | '`sudo blkid -s UUID -o value /dev/mapper/loop0p1` ro\n'
139 | 'INITRD /initrd.img" | sudo -s tee /mnt/raw/boot/extlinux.conf')
140 |
141 | sudo('umount /mnt/raw')
142 | sudo('kpartx -d /dev/loop0')
143 | sudo('losetup --detach /dev/loop0')
144 |
145 | run('qemu-img convert -f raw -O vmdk image.raw image.vmdk')
146 | run('rm image.raw')
147 |
148 | run('mkdir output')
149 | run('VBoxManage createvm --name cloudify --ostype Ubuntu_64 --register')
150 | run('VBoxManage storagectl cloudify '
151 | '--name SATA '
152 | '--add sata '
153 | '--sataportcount 1 '
154 | '--hostiocache on '
155 | '--bootable on')
156 | run('VBoxManage storageattach cloudify '
157 | '--storagectl SATA '
158 | '--port 0 '
159 | '--type hdd '
160 | '--medium image.vmdk')
161 | run('VBoxManage modifyvm cloudify '
162 | '--memory 2048 '
163 | '--cpus 2 '
164 | '--vram 12 '
165 | '--ioapic on '
166 | '--rtcuseutc on '
167 | '--pae off '
168 | '--boot1 disk '
169 | '--boot2 none '
170 | '--boot3 none '
171 | '--boot4 none ')
172 | run('VBoxManage export cloudify --output output/box.ovf')
173 |
174 | run('echo "Vagrant::Config.run do |config|" > output/Vagrantfile')
175 | run('echo " config.vm.base_mac = `VBoxManage showvminfo cloudify '
176 | '--machinereadable | grep macaddress1 | cut -d"=" -f2`"'
177 | ' >> output/Vagrantfile')
178 | run('echo -e "end\n\n" >> output/Vagrantfile')
179 | run('echo \'include_vagrantfile = File.expand_path'
180 | '("../include/_Vagrantfile", __FILE__)\' >> output/Vagrantfile')
181 | run('echo "load include_vagrantfile if File.exist?'
182 | '(include_vagrantfile)" >> output/Vagrantfile')
183 | run('echo \'{ "provider": "virtualbox" }\' > output/metadata.json')
184 | run('tar -cvf cloudify.box -C output/ .')
185 |
186 | box_name = 'cloudify_{}'.format(strftime('%y%m%d-%H%M'))
187 | box_url = 'https://s3-{0}.amazonaws.com/{1}/{2}.box'.format(
188 | settings['region'], settings['aws_s3_bucket'], box_name
189 | )
190 | run('aws s3 cp '
191 | 'cloudify.box s3://{}/{}.box'.format(settings['aws_s3_bucket'],
192 | box_name))
193 | with open('templates/publish_Vagrantfile.template') as f:
194 | template = Template(f.read())
195 | vfile = StringIO()
196 | vfile.write(template.substitute(BOX_NAME=box_name,
197 | BOX_URL=box_url))
198 | put(vfile, 'publish_Vagrantfile')
199 | run('aws s3 cp publish_Vagrantfile s3://{}/{}'.format(
200 | settings['aws_s3_bucket'], 'Vagrantfile'))
201 |
202 |
203 | def cleanup():
204 | print('cleaning up..')
205 | for item in RESOURCES:
206 | if type(item) == boto.ec2.image.Image:
207 | item.deregister()
208 | print('{} deregistered'.format(item))
209 | elif type(item) == boto.ec2.instance.Instance:
210 | item.terminate()
211 | while item.state != 'terminated':
212 | sleep(5)
213 | item.update()
214 | print('{} terminated'.format(item))
215 | elif type(item) == boto.ec2.connection.EC2Connection:
216 | item.close()
217 | print('{} closed'.format(item))
218 | elif (type(item) == boto.ec2.securitygroup.SecurityGroup or
219 | type(item) == boto.ec2.keypair.KeyPair):
220 | item.delete()
221 | print('{} deleted'.format(item))
222 | else:
223 | print('{} not cleared'.format(item))
224 |
225 |
226 | try:
227 | main()
228 | finally:
229 | cleanup()
230 | print('finished build: {}'.format(strftime("%Y-%m-%d %H:%M:%S")))
231 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/nightly.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/image-builder/quickstart-vagrantbox/nightly.png
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/packer_inputs.json:
--------------------------------------------------------------------------------
1 | {
2 | "aws_source_ami": "ami-234ecc54"
3 | }
4 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/packerfile.json:
--------------------------------------------------------------------------------
1 | {
2 | "variables": {
3 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
4 | "aws_secret_key": "{{env `AWS_ACCESS_KEY`}}",
5 | "aws_source_ami": "",
6 | "instance_type": "m3.large",
7 | "virtualbox_source_image": "",
8 | "insecure_private_key": "./keys/insecure_private_key"
9 | },
10 | "builders": [
11 | {
12 | "name": "virtualbox",
13 | "type": "virtualbox-ovf",
14 | "source_path": "{{user `virtualbox_source_image`}}",
15 | "vm_name": "cloudify",
16 | "ssh_username": "vagrant",
17 | "ssh_key_path": "{{user `insecure_private_key`}}",
18 | "ssh_wait_timeout": "2m",
19 | "shutdown_command": "sudo -S shutdown -P now",
20 | "vboxmanage": [
21 | ["modifyvm", "{{.Name}}", "--memory", "2048"],
22 | ["modifyvm", "{{.Name}}", "--cpus", "2"],
23 | ["modifyvm", "{{.Name}}", "--natdnshostresolver1", "on"]
24 | ],
25 | "headless": true
26 | },
27 | {
28 | "name": "nightly_virtualbox_build",
29 | "type": "amazon-ebs",
30 | "access_key": "{{user `aws_access_key`}}",
31 | "secret_key": "{{user `aws_secret_key`}}",
32 | "ssh_private_key_file": "{{user `insecure_private_key`}}",
33 | "region": "eu-west-1",
34 | "source_ami": "{{user `aws_source_ami`}}",
35 | "instance_type": "{{user `instance_type`}}",
36 | "ssh_username": "vagrant",
37 | "user_data_file": "userdata/add_vagrant_user.sh",
38 | "ami_name": "cloudify nightly {{timestamp}}"
39 | },
40 | {
41 | "name": "amazon",
42 | "type": "amazon-ebs",
43 | "access_key": "{{user `aws_access_key`}}",
44 | "secret_key": "{{user `aws_secret_key`}}",
45 | "region": "eu-west-1",
46 | "source_ami": "{{user `aws_source_ami`}}",
47 | "instance_type": "{{user `instance_type`}}",
48 | "ssh_username": "ubuntu",
49 | "ami_name": "cloudify {{timestamp}}"
50 | }
51 | ],
52 | "provisioners": [
53 | {
54 | "type": "shell",
55 | "script": "provision/prepare_nightly.sh",
56 | "only": ["nightly_virtualbox_build"]
57 | },
58 | {
59 | "type": "shell",
60 | "script": "provision/common.sh"
61 | },
62 | {
63 | "type": "shell",
64 | "inline": ["sudo reboot"],
65 | "only": ["nightly_virtualbox_build"]
66 | },
67 | {
68 | "type": "shell",
69 | "script": "provision/install_ga.sh",
70 | "only": ["nightly_virtualbox_build"]
71 | },
72 | {
73 | "type": "shell",
74 | "script": "provision/influxdb_monkeypatch.sh",
75 | "only": ["nightly_virtualbox_build"]
76 | },
77 | {
78 | "type": "shell",
79 | "script": "provision/cleanup.sh",
80 | "only": ["nightly_virtualbox_build"]
81 | }
82 | ],
83 | "post-processors": [
84 | {
85 | "type": "vagrant",
86 | "only": ["virtualbox"],
87 | "output": "cloudify_{{.Provider}}.box"
88 | }
89 | ]
90 | }
91 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/Vagrantfile:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | AWS_ACCESS_KEY_ID = ENV['AWS_ACCESS_KEY_ID']
20 | AWS_ACCESS_KEY = ENV['AWS_ACCESS_KEY']
21 |
22 | UBUNTU_TRUSTY_BOX_NAME = 'ubuntu/trusty64'
23 |
24 | Vagrant.configure('2') do |config|
25 | config.vm.define :ubuntu_trusty_box do |local|
26 | local.vm.provider :virtualbox do |vb|
27 | vb.customize ['modifyvm', :id, '--memory', '4096']
28 | end
29 | local.vm.box = UBUNTU_TRUSTY_BOX_NAME
30 | local.vm.hostname = 'local'
31 | local.vm.network :private_network, ip: "10.10.1.10"
32 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
33 | local.vm.provision "shell" do |s|
34 | s.path = "common.sh"
35 | s.privileged = false
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/cleanup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | sudo apt-get purge -y build-essential
4 | sudo apt-get autoremove -y
5 | sudo apt-get clean -y
6 | sudo rm -rf /var/lib/apt/lists/*
7 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/common.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # accepted arguments
4 | # $1 = true iff install from PYPI
5 |
6 | function set_username
7 | {
8 | USERNAME=$(id -u -n)
9 | if [ "$USERNAME" = "" ]; then
10 | echo "using default username"
11 | USERNAME="vagrant"
12 | fi
13 | echo "username is [$USERNAME]"
14 | }
15 |
16 | function install_prereqs
17 | {
18 | echo updating apt cache
19 | sudo apt-get -y update
20 | echo installing prerequisites
21 | sudo apt-get install -y curl vim git gcc python-dev
22 | }
23 |
24 | function install_pip
25 | {
26 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python
27 | }
28 |
29 | function create_and_source_virtualenv
30 | {
31 | cd ~
32 | echo installing virtualenv
33 | sudo pip install virtualenv==1.11.4
34 | echo creating cloudify virtualenv
35 | virtualenv cloudify
36 | source cloudify/bin/activate
37 | }
38 |
39 | function install_cli
40 | {
41 | if [ "$INSTALL_FROM_PYPI" = "true" ]; then
42 | echo installing cli from pypi
43 | pip install cloudify
44 | else
45 | echo installing cli from github
46 | pip install git+https://github.com/cloudify-cosmo/cloudify-dsl-parser.git@$CORE_TAG_NAME
47 | pip install git+https://github.com/cloudify-cosmo/flask-securest.git@0.6
48 | pip install git+https://github.com/cloudify-cosmo/cloudify-rest-client.git@$CORE_TAG_NAME
49 | pip install git+https://github.com/cloudify-cosmo/cloudify-plugins-common.git@$CORE_TAG_NAME
50 | pip install git+https://github.com/cloudify-cosmo/cloudify-script-plugin.git@$PLUGINS_TAG_NAME
51 | pip install git+https://github.com/cloudify-cosmo/cloudify-cli.git@$CORE_TAG_NAME
52 | fi
53 | }
54 |
55 | function init_cfy_workdir
56 | {
57 | cd ~
58 | mkdir -p cloudify
59 | cd cloudify
60 | cfy init
61 | }
62 |
63 | function get_manager_blueprints
64 | {
65 | cd ~/cloudify
66 | echo "Retrieving Manager Blueprints"
67 | sudo curl -O http://cloudify-public-repositories.s3.amazonaws.com/cloudify-manager-blueprints/${CORE_TAG_NAME}/cloudify-manager-blueprints.tar.gz &&
68 | sudo tar -zxvf cloudify-manager-blueprints.tar.gz &&
69 | mv cloudify-manager-blueprints-*/ cloudify-manager-blueprints
70 | sudo rm cloudify-manager-blueprints.tar.gz
71 | }
72 |
73 | function generate_keys
74 | {
75 | # generate public/private key pair and add to authorized_keys
76 | ssh-keygen -t rsa -f ~/.ssh/id_rsa -q -N ''
77 | cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
78 |
79 | }
80 |
81 | function configure_manager_blueprint_inputs
82 | {
83 | # configure inputs
84 | cd ~/cloudify
85 | cp cloudify-manager-blueprints/simple-manager-blueprint-inputs.yaml inputs.yaml
86 | sed -i "s|public_ip: ''|public_ip: \'127.0.0.1\'|g" inputs.yaml
87 | sed -i "s|private_ip: ''|private_ip: \'127.0.0.1\'|g" inputs.yaml
88 | sed -i "s|ssh_user: ''|ssh_user: \'${USERNAME}\'|g" inputs.yaml
89 | sed -i "s|ssh_key_filename: ''|ssh_key_filename: \'~/.ssh/id_rsa\'|g" inputs.yaml
90 | # configure manager blueprint
91 | sudo sed -i "s|/cloudify-docker_3|/cloudify-docker-commercial_3|g" cloudify-manager-blueprints/simple-manager-blueprint.yaml
92 | }
93 |
94 | function bootstrap
95 | {
96 | cd ~/cloudify
97 | echo "bootstrapping..."
98 | # bootstrap the manager locally
99 | cfy bootstrap -v -p cloudify-manager-blueprints/simple-manager-blueprint.yaml -i inputs.yaml --install-plugins
100 | if [ "$?" -ne "0" ]; then
101 | echo "Bootstrap failed, stoping provision."
102 | exit 1
103 | fi
104 | echo "bootstrap done."
105 | }
106 |
107 | function create_blueprints_and_inputs_dir
108 | {
109 | mkdir -p ~/cloudify/blueprints/inputs
110 | }
111 |
112 | function configure_nodecellar_blueprint_inputs
113 | {
114 | echo '
115 | host_ip: 10.10.1.10
116 | agent_user: vagrant
117 | agent_private_key_path: /root/.ssh/id_rsa
118 | ' >> ~/cloudify/blueprints/inputs/nodecellar-singlehost.yaml
119 | }
120 |
121 | function configure_shell_login
122 | {
123 | # source virtualenv on login
124 | echo "source /home/${USERNAME}/cloudify/bin/activate" >> /home/${USERNAME}/.bashrc
125 |
126 | # set shell login base dir
127 | echo "cd ~/cloudify" >> /home/${USERNAME}/.bashrc
128 | }
129 |
130 | INSTALL_FROM_PYPI=$1
131 | echo "Install from PyPI: ${INSTALL_FROM_PYPI}"
132 | CORE_TAG_NAME="master"
133 | PLUGINS_TAG_NAME="master"
134 |
135 | set_username
136 | install_prereqs
137 | install_pip
138 | create_and_source_virtualenv
139 | install_cli
140 | activate_cfy_bash_completion
141 | init_cfy_workdir
142 | get_manager_blueprints
143 | generate_keys
144 | configure_manager_blueprint_inputs
145 | bootstrap
146 | create_blueprints_and_inputs_dir
147 | configure_nodecellar_blueprint_inputs
148 | configure_shell_login
149 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/influxdb_monkeypatch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 |
3 | sudo apt-get update
4 | sudo apt-get install -y jq
5 |
6 | DEST=`sudo docker inspect cfy | jq -r '.[0].Volumes["/opt/influxdb/shared/data"]'`
7 | sudo find $DEST -type f -delete
8 | sudo docker exec cfy /usr/bin/pkill influxdb
9 | sleep 10
10 | curl --fail "http://localhost:8086/db?u=root&p=root" -d "{\"name\": \"cloudify\"}"
11 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/install_ga.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # update apt cache
4 | sudo apt-get update
5 |
6 | # install guest additions
7 | sudo apt-get install -y dkms module-assistant
8 | sudo m-a -i prepare
9 | wget http://download.virtualbox.org/virtualbox/4.3.20/VBoxGuestAdditions_4.3.20.iso
10 | sudo mkdir -p /mnt/iso
11 | sudo sudo mount -o loop VBoxGuestAdditions_4.3.20.iso /mnt/iso/
12 | sudo /mnt/iso/VBoxLinuxAdditions.run
13 | sudo umount /mnt/iso
14 | sudo rmdir /mnt/iso
15 | rm VBoxGuestAdditions_4.3.20.iso
16 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/provision/prepare_nightly.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # update apt cache
4 | sudo apt-get update
5 |
6 | # change partition uuid
7 | sudo apt-get install -y uuid
8 | sudo tune2fs /dev/xvda1 -U `uuid`
9 |
10 | # disable cloud-init datasource retries
11 | echo 'datasource_list: [ None ]' | sudo -s tee /etc/cloud/cloud.cfg.d/90_dpkg.cfg
12 | sudo dpkg-reconfigure -f noninteractive cloud-init
13 |
14 | # disable ttyS0
15 | echo manual | sudo tee /etc/init/ttyS0.override
16 |
17 | # change hostname
18 | echo cloudify | sudo -S tee /etc/hostname
19 | echo 127.0.0.1 cloudify | sudo -S tee -a /etc/hosts
20 |
21 | # change dns resolver to 8.8.8.8
22 | # this is done so docker won't start with AWS dns resolver
23 | echo nameserver 8.8.8.8 | sudo -S tee /etc/resolv.conf
24 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/settings.py:
--------------------------------------------------------------------------------
1 | settings = {
2 | "region": "eu-west-1",
3 | "username": "ubuntu",
4 | "aws_s3_bucket": "cloudify-nightly-vagrant",
5 | "aws_iam_group": "nightly-vagrant-build",
6 | "factory_ami": "ami-6ca1011b",
7 | "instance_type": "m3.large",
8 | "packer_var_file": "packer_inputs.json"
9 | }
10 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/templates/box_Vagrantfile.template:
--------------------------------------------------------------------------------
1 | Vagrant::Config.run do |config|
2 | config.vm.base_mac = $MACHINE_MAC `VBoxManage showvminfo cloudify --machinereadable | grep macaddress1 | cut -d"=" -f2`"'
3 | end
4 |
5 |
6 | include_vagrantfile = File.expand_path("../include/_Vagrantfile", __FILE__)
7 | load include_vagrantfile if File.exist?(include_vagrantfile)
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/templates/extlinux.conf.template:
--------------------------------------------------------------------------------
1 | DEFAULT cloudify
2 | LABEL cloudify
3 | LINUX /vmlinuz
4 | APPEND root=/dev/disk/by-uuid/$UUID `sudo blkid -s UUID -o value /dev/mapper/loop0p1`
5 | INITRD /initrd.img
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/templates/publish_Vagrantfile.template:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | Vagrant.configure("2") do |config|
20 | config.vm.box = "$BOX_NAME"
21 | config.vm.box_url = "$BOX_URL"
22 | config.vm.network :private_network, ip: "10.10.1.10"
23 | end
24 |
--------------------------------------------------------------------------------
/image-builder/quickstart-vagrantbox/userdata/add_vagrant_user.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | useradd -m -s /bin/bash -U vagrant
4 | adduser vagrant admin
5 | echo 'vagrant ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/99-vagrant
6 | chmod 0440 /etc/sudoers.d/99-vagrant
7 | mkdir -p /home/vagrant/.ssh/
8 | wget https://raw.githubusercontent.com/mitchellh/vagrant/master/keys/vagrant.pub -O /home/vagrant/.ssh/authorized_keys
9 | chown -R vagrant:vagrant /home/vagrant/
10 | chmod 0700 /home/vagrant/.ssh/
11 | chmod 0600 /home/vagrant/.ssh/authorized_keys
--------------------------------------------------------------------------------
/offline-configuration/.pydistutils:
--------------------------------------------------------------------------------
1 | [easy_install]
2 | index-url = http:///simple/
--------------------------------------------------------------------------------
/offline-configuration/bandersnatch.conf:
--------------------------------------------------------------------------------
1 | # a configuration file for nginx to serve general files from
2 | # /srv/cloudify and PyPi mirror on /srv/pypi/web
3 | server {
4 | listen 80;
5 | location /simple/ {
6 | root /srv/pypi/web;
7 | }
8 | location /packages/{
9 | root /srv/pypi/web;
10 | }
11 | location /cloudify/ {
12 | root /srv/;
13 | }
14 | autoindex on;
15 | charset utf-8;
16 | }
17 |
--------------------------------------------------------------------------------
/offline-configuration/nginx.conf:
--------------------------------------------------------------------------------
1 | user www-data;
2 | worker_processes 4;
3 | pid /var/run/nginx.pid;
4 |
5 | events {
6 | worker_connections 768;
7 | # multi_accept on;
8 | }
9 |
10 | http {
11 |
12 | ##
13 | # Basic Settings
14 | ##
15 |
16 | sendfile on;
17 | tcp_nopush on;
18 | tcp_nodelay on;
19 | keepalive_timeout 65;
20 | types_hash_max_size 2048;
21 | # server_tokens off;
22 |
23 | # server_names_hash_bucket_size 64;
24 | # server_name_in_redirect off;
25 |
26 | include /etc/nginx/mime.types;
27 | default_type application/octet-stream;
28 |
29 | ##
30 | # Logging Settings
31 | ##
32 |
33 | access_log /var/log/nginx/access.log;
34 | error_log /var/log/nginx/error.log;
35 |
36 | ##
37 | # Gzip Settings
38 | ##
39 |
40 | gzip on;
41 | gzip_disable "msie6";
42 |
43 | ##
44 | # Virtual Host Configs
45 | ##
46 |
47 | include /etc/nginx/conf.d/*.conf;
48 | include /etc/nginx/sites-enabled/bandersnatch.conf;
49 |
50 | }
--------------------------------------------------------------------------------
/offline-configuration/pip.conf:
--------------------------------------------------------------------------------
1 | [global]
2 | index-url = http:///simple/
--------------------------------------------------------------------------------
/offline-configuration/provision.sh:
--------------------------------------------------------------------------------
1 | # This script configures and starts a PyPi mirror and http-server for cloudify offline
2 | # Note that you should edit HOME_FOLDER and FILES_DIR variables before running this script
3 |
4 | HOME_FOLDER="/home/ubuntu"
5 |
6 | # Expected to contain all the needed files - nginx.conf, bandersnatch.conf etc
7 | FILES_DIR="/home/ubuntu"
8 |
9 | echo "Using $HOME_FOLDER as a home folder"
10 | echo "Using $FILES_DIR as a files folder"
11 | echo ' INSTALLING '
12 | echo '------------'
13 | cd "$HOME_FOLDER"
14 | sudo apt-get update -y
15 | echo "### Installing python-pip python-dev build-essential nginx"
16 | sudo apt-get install python-pip python-dev build-essential nginx -y
17 | sudo pip install --upgrade pip
18 | sudo pip install virtualenv
19 | sudo pip install --upgrade virtualenv
20 | virtualenv env
21 | source "$HOME_FOLDER/env/bin/activate"
22 | echo "### Installing bandersnatch"
23 | pip install -r https://bitbucket.org/pypa/bandersnatch/raw/stable/requirements.txt
24 | echo "### Confuring nginx"
25 | sudo mv /etc/nginx/nginx.conf /etc/nginx/nginx.conf.original # backup the original
26 | sudo cp "$FILES_DIR/nginx.conf" /etc/nginx/nginx.conf
27 | sudo cp "$FILES_DIR/bandersnatch.conf" /etc/nginx/sites-available/bandersnatch.conf
28 | sudo ln -s /etc/nginx/sites-available/bandersnatch.conf /etc/nginx/sites-enabled/bandersnatch.conf
29 | sudo mkdir /srv/cloudify
30 | echo "### Starting nginx"
31 | sudo nginx
32 |
33 | echo "Creating configuration file for bandersnatch"
34 | sudo bandersnatch mirror
35 |
36 | echo "Using nohup to download PyPi mirror"
37 | nohup sudo bandersnatch mirror &
38 | PID=$!
39 | echo "nohup pid is $PID"
40 | echo "execute ps -m $PID to check if the download has finished"
--------------------------------------------------------------------------------
/package-configuration/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.deb
3 | debs/
--------------------------------------------------------------------------------
/package-configuration/debian-agent/debian-agent-disable-requiretty.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # now modify sudoers configuration to allow execution without tty
3 | grep -i ubuntu /proc/version > /dev/null
4 | if [ "$?" -eq "0" ]; then
5 | # ubuntu
6 | echo Running on Ubuntu
7 | if sudo grep -q -E '[^!]requiretty' /etc/sudoers; then
8 | echo creating sudoers user file
9 | echo "Defaults:`whoami` !requiretty" | sudo tee /etc/sudoers.d/`whoami` >/dev/null
10 | sudo chmod 0440 /etc/sudoers.d/`whoami`
11 | else
12 | echo No requiretty directive found, nothing to do
13 | fi
14 | else
15 | # other - modify sudoers file
16 | if [ ! -f "/etc/sudoers" ]; then
17 | error_exit 116 "Could not find sudoers file at expected location (/etc/sudoers)"
18 | fi
19 | echo Setting privileged mode
20 | sudo sed -i 's/^Defaults.*requiretty/#&/g' /etc/sudoers || error_exit_on_level $? 117 "Failed to edit sudoers file to disable requiretty directive" 1
21 | fi
22 |
--------------------------------------------------------------------------------
/package-configuration/debian-agent/debian-celeryd-cloudify.conf.template:
--------------------------------------------------------------------------------
1 | . {{ includes_file_path }}
2 | CELERY_BASE_DIR="{{ celery_base_dir }}"
3 |
4 | # replaces management__worker
5 | WORKER_MODIFIER="{{ worker_modifier }}"
6 |
7 | export BROKER_IP="{{ broker_ip }}"
8 | export MANAGEMENT_IP="{{ management_ip }}"
9 | export BROKER_URL="amqp://guest:guest@${BROKER_IP}:5672//"
10 | export MANAGER_REST_PORT="8101"
11 | export CELERY_WORK_DIR="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/work"
12 | export IS_MANAGEMENT_NODE="False"
13 | export AGENT_IP="{{ agent_ip }}"
14 | export VIRTUALENV="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/env"
15 | export MANAGER_FILE_SERVER_URL="http://${MANAGEMENT_IP}:53229"
16 | export MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL="${MANAGER_FILE_SERVER_URL}/blueprints"
17 | export PATH="${VIRTUALENV}/bin:${PATH}"
18 | # enable running celery as root
19 | export C_FORCE_ROOT="true"
20 |
21 | CELERYD_MULTI="${VIRTUALENV}/bin/celeryd-multi"
22 | CELERYD_USER="{{ celery_user }}"
23 | CELERYD_GROUP="{{ celery_group }}"
24 | CELERY_TASK_SERIALIZER="json"
25 | CELERY_RESULT_SERIALIZER="json"
26 | CELERY_RESULT_BACKEND="$BROKER_URL"
27 | DEFAULT_PID_FILE="${CELERY_WORK_DIR}/celery.pid"
28 | DEFAULT_LOG_FILE="${CELERY_WORK_DIR}/celery%I.log"
29 | CELERYD_OPTS="-Ofair --events --loglevel=debug --app=cloudify --include=${INCLUDES} -Q ${WORKER_MODIFIER} --broker=${BROKER_URL} --hostname=${WORKER_MODIFIER} --autoscale={{ worker_autoscale }} --maxtasksperchild=10 --without-gossip --without-mingle"
30 |
--------------------------------------------------------------------------------
/package-configuration/debian-agent/debian-celeryd-cloudify.init.template:
--------------------------------------------------------------------------------
1 | #!/bin/sh -e
2 | # ============================================
3 | # celeryd - Starts the Celery worker daemon.
4 | # ============================================
5 | #
6 | # :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}
7 | # :Configuration file: /etc/default/celeryd
8 | #
9 | # See http://docs.celeryproject.org/en/latest/tutorials/daemonizing.html#generic-init-scripts
10 |
11 |
12 | ### BEGIN INIT INFO
13 | # Provides: celeryd
14 | # Required-Start: $network $local_fs $remote_fs
15 | # Required-Stop: $network $local_fs $remote_fs
16 | # Default-Start: 2 3 4 5
17 | # Default-Stop: 0 1 6
18 | # Short-Description: celery task worker daemon
19 | ### END INIT INFO
20 |
21 | WORKER_MODIFIER="{{ worker_modifier }}"
22 | CELERY_BASE_DIR="{{ celery_base_dir }}"
23 | ME=$(basename $0)
24 | export CELERY_WORK_DIR="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/work"
25 | CELERY_DEFAULTS="/etc/default/${ME}"
26 |
27 | # some commands work asyncronously, so we'll wait this many seconds
28 | SLEEP_SECONDS=5
29 |
30 | DEFAULT_PID_FILE="/var/run/celery/%n.pid"
31 | DEFAULT_LOG_FILE="/var/log/celery/%n%I.log"
32 | DEFAULT_LOG_LEVEL="INFO"
33 | DEFAULT_NODES="celery"
34 | DEFAULT_CELERYD="-m celery.bin.celeryd_detach"
35 |
36 | test -f "$CELERY_DEFAULTS" && . "$CELERY_DEFAULTS"
37 |
38 | # Set CELERY_CREATE_DIRS to always create log/pid dirs.
39 | CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0}
40 | CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS
41 | CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS
42 | if [ -z "$CELERYD_PID_FILE" ]; then
43 | CELERYD_PID_FILE="$DEFAULT_PID_FILE"
44 | CELERY_CREATE_RUNDIR=1
45 | fi
46 | if [ -z "$CELERYD_LOG_FILE" ]; then
47 | CELERYD_LOG_FILE="$DEFAULT_LOG_FILE"
48 | CELERY_CREATE_LOGDIR=1
49 | fi
50 |
51 | CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
52 | CELERYD_MULTI=${CELERYD_MULTI:-"celeryd-multi"}
53 | CELERYD=${CELERYD:-$DEFAULT_CELERYD}
54 | CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES}
55 |
56 | export CELERY_LOADER
57 |
58 | if [ -n "$2" ]; then
59 | CELERYD_OPTS="$CELERYD_OPTS $2"
60 | fi
61 |
62 | CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE`
63 | CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE`
64 |
65 | # Extra start-stop-daemon options, like user/group.
66 | if [ -n "$CELERYD_USER" ]; then
67 | DAEMON_OPTS="$DAEMON_OPTS --uid=$CELERYD_USER"
68 | fi
69 | if [ -n "$CELERYD_GROUP" ]; then
70 | DAEMON_OPTS="$DAEMON_OPTS --gid=$CELERYD_GROUP"
71 | fi
72 |
73 | if [ -n "$CELERYD_CHDIR" ]; then
74 | DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR"
75 | fi
76 |
77 |
78 | check_dev_null() {
79 | if [ ! -c /dev/null ]; then
80 | echo "/dev/null is not a character device!"
81 | exit 75 # EX_TEMPFAIL
82 | fi
83 | }
84 |
85 |
86 | maybe_die() {
87 | if [ $? -ne 0 ]; then
88 | echo "Exiting: $* (errno $?)"
89 | exit 77 # EX_NOPERM
90 | fi
91 | }
92 |
93 | create_default_dir() {
94 | if [ ! -d "$1" ]; then
95 | echo "- Creating default directory: '$1'"
96 | mkdir -p "$1"
97 | maybe_die "Couldn't create directory $1"
98 | echo "- Changing permissions of '$1' to 02755"
99 | chmod 02755 "$1"
100 | maybe_die "Couldn't change permissions for $1"
101 | if [ -n "$CELERYD_USER" ]; then
102 | echo "- Changing owner of '$1' to '$CELERYD_USER'"
103 | chown "$CELERYD_USER" "$1"
104 | maybe_die "Couldn't change owner of $1"
105 | fi
106 | if [ -n "$CELERYD_GROUP" ]; then
107 | echo "- Changing group of '$1' to '$CELERYD_GROUP'"
108 | chgrp "$CELERYD_GROUP" "$1"
109 | maybe_die "Couldn't change group of $1"
110 | fi
111 | fi
112 | }
113 |
114 |
115 | check_paths() {
116 | if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then
117 | create_default_dir "$CELERYD_LOG_DIR"
118 | fi
119 | if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then
120 | create_default_dir "$CELERYD_PID_DIR"
121 | fi
122 | }
123 |
124 | create_paths() {
125 | create_default_dir "$CELERYD_LOG_DIR"
126 | create_default_dir "$CELERYD_PID_DIR"
127 | }
128 |
129 | export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
130 |
131 |
132 | _get_pid_files() {
133 | [ ! -d "$CELERYD_PID_DIR" ] && return
134 | echo `ls -1 "$CELERYD_PID_DIR"/*.pid 2> /dev/null`
135 | }
136 |
137 | stop_workers () {
138 | $CELERYD_MULTI stopwait $CELERYD_NODES --pidfile="$CELERYD_PID_FILE"
139 | sleep $SLEEP_SECONDS
140 | }
141 |
142 |
143 | start_workers () {
144 | $CELERYD_MULTI start $CELERYD_NODES $DAEMON_OPTS \
145 | --pidfile="$CELERYD_PID_FILE" \
146 | --logfile="$CELERYD_LOG_FILE" \
147 | --loglevel="$CELERYD_LOG_LEVEL" \
148 | --cmd="$CELERYD" \
149 | $CELERYD_OPTS
150 | sleep $SLEEP_SECONDS
151 | }
152 |
153 |
154 | restart_workers () {
155 | $CELERYD_MULTI restart $CELERYD_NODES $DAEMON_OPTS \
156 | --pidfile="$CELERYD_PID_FILE" \
157 | --logfile="$CELERYD_LOG_FILE" \
158 | --loglevel="$CELERYD_LOG_LEVEL" \
159 | --cmd="$CELERYD" \
160 | $CELERYD_OPTS
161 | sleep $SLEEP_SECONDS
162 | }
163 |
164 | check_status () {
165 | local pid_files=
166 | pid_files=`_get_pid_files`
167 | [ -z "$pid_files" ] && echo "celeryd not running (no pidfile)" && exit 1
168 |
169 | local one_failed=
170 | for pid_file in $pid_files; do
171 | local node=`basename "$pid_file" .pid`
172 | local pid=`cat "$pid_file"`
173 | local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'`
174 | if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then
175 | echo "bad pid file ($pid_file)"
176 | else
177 | local failed=
178 | kill -0 $pid 2> /dev/null || failed=true
179 | if [ "$failed" ]; then
180 | echo "celeryd (node $node) (pid $pid) is stopped, but pid file exists!"
181 | one_failed=true
182 | else
183 | echo "celeryd (node $node) (pid $pid) is running..."
184 | fi
185 | fi
186 | done
187 |
188 | [ "$one_failed" ] && exit 1 || exit 0
189 | }
190 |
191 |
192 | case "$1" in
193 | start)
194 | check_dev_null
195 | check_paths
196 | start_workers
197 | ;;
198 |
199 | stop)
200 | check_dev_null
201 | check_paths
202 | stop_workers
203 | ;;
204 |
205 | reload|force-reload)
206 | echo "Use restart"
207 | ;;
208 |
209 | status)
210 | check_status
211 | ;;
212 |
213 | restart)
214 | check_dev_null
215 | check_paths
216 | restart_workers
217 | ;;
218 | try-restart)
219 | check_dev_null
220 | check_paths
221 | restart_workers
222 | ;;
223 | create-paths)
224 | check_dev_null
225 | create_paths
226 | ;;
227 | check-paths)
228 | check_dev_null
229 | check_paths
230 | ;;
231 | *)
232 | echo "Usage: /etc/init.d/celeryd {start|stop|restart|kill|create-paths}"
233 | exit 64 # EX_USAGE
234 | ;;
235 | esac
236 |
237 | exit 0
--------------------------------------------------------------------------------
/package-configuration/linux-cli/test_cli_install.py:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | ############
16 | import testtools
17 | import shutil
18 | import tempfile
19 | import os
20 |
21 |
22 | get_cloudify = __import__("get-cloudify")
23 |
24 | cloudify_cli_url = \
25 | 'https://github.com/cloudify-cosmo/cloudify-cli/archive/17.6.30.tar.gz'
26 |
27 |
28 | class CliInstallTests(testtools.TestCase):
29 | @staticmethod
30 | def install_cloudify(args):
31 | installer = get_cloudify.CloudifyInstaller(**args)
32 | installer.execute()
33 |
34 | def setUp(self):
35 | super(CliInstallTests, self).setUp()
36 | self.get_cloudify = get_cloudify
37 |
38 | def test_full_cli_install(self):
39 | tempdir = tempfile.mkdtemp()
40 | install_args = {
41 | 'force': True,
42 | 'virtualenv': tempdir,
43 | }
44 |
45 | try:
46 | self.install_cloudify(install_args)
47 | cfy_path = os.path.join(
48 | self.get_cloudify._get_env_bin_path(tempdir), 'cfy')
49 | self.get_cloudify.run('{0} --version'.format(cfy_path))
50 | finally:
51 | shutil.rmtree(tempdir)
52 |
53 | def test_install_from_source_with_requirements(self):
54 | tempdir = tempfile.mkdtemp()
55 | temp_requirements_file = tempfile.NamedTemporaryFile(delete=True)
56 | with open(temp_requirements_file.name, 'w') as requirements_file:
57 | requirements_file.write('sh==1.11')
58 | install_args = {
59 | 'source': cloudify_cli_url,
60 | 'withrequirements': [temp_requirements_file.name],
61 | 'virtualenv': tempdir,
62 | }
63 |
64 | try:
65 | self.install_cloudify(install_args)
66 | cfy_path = os.path.join(
67 | self.get_cloudify._get_env_bin_path(tempdir), 'cfy')
68 | proc = self.get_cloudify.run('{0} --version'.format(cfy_path))
69 | self.assertIn('Cloudify CLI 17', proc.aggr_stdout)
70 | finally:
71 | shutil.rmtree(tempdir)
72 | temp_requirements_file.close()
73 |
74 | def test_cli_installed_and_upgrade(self):
75 | tempdir = tempfile.mkdtemp()
76 | install_args = {
77 | 'virtualenv': tempdir,
78 | 'upgrade': True
79 | }
80 |
81 | try:
82 | self.install_cloudify(install_args)
83 | self.get_cloudify.handle_upgrade(**install_args)
84 | finally:
85 | shutil.rmtree(tempdir)
86 |
87 | def test_cli_installed_and_no_upgrade(self):
88 | tempdir = tempfile.mkdtemp()
89 | install_args = {
90 | 'virtualenv': tempdir,
91 | 'upgrade': False
92 | }
93 |
94 | try:
95 | self.install_cloudify(install_args)
96 | ex = self.assertRaises(
97 | SystemExit, self.get_cloudify.handle_upgrade, **install_args)
98 | self.assertEqual(1, ex.message)
99 | finally:
100 | shutil.rmtree(tempdir)
101 |
--------------------------------------------------------------------------------
/package-configuration/linux-cli/test_get_cloudify.py:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # See the License for the specific language governing permissions and
14 | # limitations under the License.
15 | ############
16 | import testtools
17 | import urllib
18 | import tempfile
19 | from StringIO import StringIO
20 | import mock
21 | import shutil
22 | import os
23 | import tarfile
24 |
25 |
26 | get_cloudify = __import__("get-cloudify")
27 |
28 |
29 | class CliBuilderUnitTests(testtools.TestCase):
30 | """Unit tests for functions in get_cloudify.py"""
31 |
32 | def setUp(self):
33 | super(CliBuilderUnitTests, self).setUp()
34 | self.get_cloudify = get_cloudify
35 | self.get_cloudify.IS_VIRTUALENV = False
36 |
37 | def _create_dummy_requirements_tar(self, url, destination):
38 | tempdir = os.path.dirname(destination)
39 | fpath = self._generate_requirements_file(tempdir)
40 | try:
41 | tar = tarfile.open(name=destination, mode='w:gz')
42 | tar.add(name=tempdir, arcname='maindir')
43 | tar.close()
44 | finally:
45 | os.remove(fpath)
46 | return destination
47 |
48 | def _generate_requirements_file(self, path):
49 | fpath = os.path.join(path, 'dev-requirements.txt')
50 | with open(fpath, 'w') as f:
51 | f.write('sh==1.11\n')
52 | return fpath
53 |
54 | def test_validate_urls(self):
55 | self._validate_url(self.get_cloudify.PIP_URL)
56 | self._validate_url(self.get_cloudify.PYCR64_URL)
57 | self._validate_url(self.get_cloudify.PYCR32_URL)
58 |
59 | @staticmethod
60 | def _validate_url(url):
61 | try:
62 | status = urllib.urlopen(url).getcode()
63 | if not status == 200:
64 | raise AssertionError('url {} is not valid.'.format(url))
65 | except Exception:
66 | raise AssertionError('url {} is not valid.'.format(url))
67 |
68 | def test_run_valid_command(self):
69 | proc = self.get_cloudify.run('echo Hi!')
70 | self.assertEqual(proc.returncode, 0, 'process execution failed')
71 |
72 | def test_run_invalid_command(self):
73 | builder_stdout = StringIO()
74 | # replacing builder stdout
75 | self.get_cloudify.sys.stdout = builder_stdout
76 | cmd = 'this is not a valid command'
77 | proc = self.get_cloudify.run(cmd)
78 | self.assertIsNot(proc.returncode, 0, 'command \'{}\' execution was '
79 | 'expected to fail'.format(cmd))
80 |
81 | def test_install_pip_failed_download(self):
82 | installer = self.get_cloudify.CloudifyInstaller()
83 |
84 | mock_boom = mock.MagicMock()
85 | mock_boom.side_effect = StandardError('Boom!')
86 | self.get_cloudify.download_file = mock_boom
87 |
88 | mock_false = mock.MagicMock()
89 |
90 | def side_effect():
91 | return False
92 | mock_false.side_effect = side_effect
93 | installer.find_pip = mock_false
94 |
95 | ex = self.assertRaises(SystemExit, installer.install_pip)
96 | self.assertEqual(
97 | 'Failed downloading pip from {0}. (Boom!)'.format(
98 | self.get_cloudify.PIP_URL), ex.message)
99 |
100 | def test_install_pip_fail(self):
101 | self.get_cloudify.download_file = mock.MagicMock(return_value=None)
102 |
103 | pythonpath = 'non_existing_path'
104 | installer = self.get_cloudify.CloudifyInstaller(pythonpath=pythonpath)
105 |
106 | mock_false = mock.MagicMock()
107 |
108 | def side_effect():
109 | return False
110 | mock_false.side_effect = side_effect
111 | installer.find_pip = mock_false
112 |
113 | ex = self.assertRaises(SystemExit, installer.install_pip)
114 | self.assertIn('Could not install pip', ex.message)
115 |
116 | def test_make_virtualenv_fail(self):
117 | ex = self.assertRaises(
118 | SystemExit, self.get_cloudify.make_virtualenv,
119 | '/path/to/dir', 'non_existing_path')
120 | self.assertEqual(
121 | 'Could not create virtualenv: /path/to/dir', ex.message)
122 |
123 | def test_install_non_existing_module(self):
124 | ex = self.assertRaises(
125 | SystemExit, self.get_cloudify.install_module, 'nonexisting_module')
126 | self.assertEqual(
127 | 'Could not install module: nonexisting_module.', ex.message)
128 |
129 | def test_get_os_props(self):
130 | distro = self.get_cloudify.get_os_props()[0]
131 | distros = ('ubuntu', 'redhat', 'debian', 'fedora', 'centos',
132 | 'archlinux')
133 | if distro.lower() not in distros:
134 | self.fail('distro prop \'{0}\' should be equal to one of: '
135 | '{1}'.format(distro, distros))
136 |
137 | def test_download_file(self):
138 | self.get_cloudify.VERBOSE = True
139 | tmp_file = tempfile.NamedTemporaryFile(delete=True)
140 | self.get_cloudify.download_file('http://www.google.com', tmp_file.name)
141 | with open(tmp_file.name) as f:
142 | content = f.readlines()
143 | self.assertIsNotNone(content)
144 |
145 | def test_check_cloudify_not_installed_in_venv(self):
146 | tmp_venv = tempfile.mkdtemp()
147 | try:
148 | self.get_cloudify.make_virtualenv(tmp_venv, 'python')
149 | self.assertFalse(
150 | self.get_cloudify.check_cloudify_installed(tmp_venv))
151 | finally:
152 | shutil.rmtree(tmp_venv)
153 |
154 | def test_check_cloudify_installed_in_venv(self):
155 | tmp_venv = tempfile.mkdtemp()
156 | try:
157 | self.get_cloudify.make_virtualenv(tmp_venv, 'python')
158 | installer = get_cloudify.CloudifyInstaller(virtualenv=tmp_venv)
159 | installer.execute()
160 | self.assertTrue(
161 | self.get_cloudify.check_cloudify_installed(tmp_venv))
162 | finally:
163 | shutil.rmtree(tmp_venv)
164 |
165 | def test_get_requirements_from_source_url(self):
166 | def get(url, destination):
167 | return self._create_dummy_requirements_tar(url, destination)
168 |
169 | self.get_cloudify.download_file = get
170 | try:
171 | installer = self.get_cloudify.CloudifyInstaller()
172 | req_list = installer._get_default_requirement_files('null')
173 | self.assertEquals(len(req_list), 1)
174 | self.assertIn('dev-requirements.txt', req_list[0])
175 | finally:
176 | self.get_cloudify.download_file = get_cloudify.download_file
177 |
178 | def test_get_requirements_from_source_path(self):
179 | tempdir = tempfile.mkdtemp()
180 | self._generate_requirements_file(tempdir)
181 | try:
182 | installer = self.get_cloudify.CloudifyInstaller()
183 | req_list = installer._get_default_requirement_files(tempdir)
184 | self.assertEquals(len(req_list), 1)
185 | self.assertIn('dev-requirements.txt', req_list[0])
186 | finally:
187 | shutil.rmtree(tempdir)
188 |
189 |
190 | class TestArgParser(testtools.TestCase):
191 | """Unit tests for functions in get_cloudify.py"""
192 |
193 | def setUp(self):
194 | super(TestArgParser, self).setUp()
195 | self.get_cloudify = get_cloudify
196 | self.get_cloudify.IS_VIRTUALENV = False
197 |
198 | def test_args_parser_linux(self):
199 | self.get_cloudify.IS_LINUX = True
200 | self.get_cloudify.IS_WIN = False
201 | args = self.get_cloudify.parse_args([])
202 | self.assertEqual(args.pythonpath, 'python',
203 | 'wrong default python path {} set for linux'
204 | .format(args.pythonpath))
205 | self.assertFalse(hasattr(args, 'installpycrypto'))
206 | self.assertTrue(hasattr(args, 'installpythondev'))
207 |
208 | def test_args_parser_windows(self):
209 | self.get_cloudify.IS_LINUX = False
210 | self.get_cloudify.IS_WIN = True
211 | args = self.get_cloudify.parse_args([])
212 | self.assertEqual(args.pythonpath, 'c:/python27/python.exe',
213 | 'wrong default python path {} set for win32'
214 | .format(args.pythonpath))
215 | self.assertTrue(hasattr(args, 'installpycrypto'))
216 | self.assertFalse(hasattr(args, 'installpythondev'))
217 |
218 | def test_default_args(self):
219 | args = self.get_cloudify.parse_args([])
220 | self.assertFalse(args.force)
221 | self.assertFalse(args.forceonline)
222 | self.assertFalse(args.installpip)
223 | self.assertFalse(args.installvirtualenv)
224 | self.assertFalse(args.pre)
225 | self.assertFalse(args.quiet)
226 | self.assertFalse(args.verbose)
227 | self.assertIsNone(args.version)
228 | self.assertIsNone(args.virtualenv)
229 | self.assertEqual(args.wheelspath, 'wheelhouse')
230 |
231 | def test_args_chosen(self):
232 | self.get_cloudify.IS_LINUX = True
233 | set_args = self.get_cloudify.parse_args(['-f',
234 | '--forceonline',
235 | '--installpip',
236 | '--virtualenv=venv_path',
237 | '--quiet',
238 | '--version=3.2',
239 | '--installpip',
240 | '--installpythondev'])
241 |
242 | self.assertTrue(set_args.force)
243 | self.assertTrue(set_args.forceonline)
244 | self.assertTrue(set_args.installpip)
245 | self.assertTrue(set_args.quiet)
246 | self.assertEqual(set_args.version, '3.2')
247 | self.assertEqual(set_args.virtualenv, 'venv_path')
248 |
249 | def test_mutually_exclude_groups(self):
250 | # # test with args that do not go together
251 | ex = self.assertRaises(
252 | SystemExit, self.get_cloudify.parse_args, ['--version', '--pre'])
253 | self.assertEqual(2, ex.message)
254 |
255 | ex = self.assertRaises(
256 | SystemExit, self.get_cloudify.parse_args, ['--verbose', '--quiet'])
257 | self.assertEqual(2, ex.message)
258 |
259 | ex = self.assertRaises(
260 | SystemExit, self.get_cloudify.parse_args,
261 | ['--wheelspath', '--forceonline'])
262 | self.assertEqual(2, ex.message)
263 |
264 |
265 | class ArgsObject(object):
266 | pass
267 |
--------------------------------------------------------------------------------
/package-configuration/manager/conf/guni.conf.template:
--------------------------------------------------------------------------------
1 | {
2 | events_file_path: '/tmp/cloudifySfRomotest/events',
3 | <<<<<<< HEAD
4 | rest_service_log_path: '{{ config_templates.params_init.rest_service_log_path }}',
5 | =======
6 | rest_service_log_path: '{{ config_templates.__params_init.rest_service_log_path }}',
7 | security_bypass_port: {{ config_templates.__params_init.rest_internal_port }},
8 | >>>>>>> master
9 | file_server_base_uri: 'http://localhost:53229',
10 | file_server_root: '{{ config_templates.params_conf.file_server_dir }}',
11 | file_server_blueprints_folder: 'blueprints',
12 | file_server_uploaded_blueprints_folder: 'uploaded-blueprints',
13 | file_server_resources_uri: '/resources'
14 | }
--------------------------------------------------------------------------------
/package-configuration/manager/init/amqpflux.conf.template:
--------------------------------------------------------------------------------
1 | description "amqp-influxdb consumer instance"
2 |
3 | start on (started rabbitmq-server
4 | and runlevel [2345])
5 | stop on runlevel [016]
6 |
7 | # Respawn it if the process exits
8 | respawn
9 | respawn limit 5 30
10 | limit nofile 65550 65550
11 |
12 | exec /opt/manager/bin/cloudify-amqp-influxdb --amqp-exchange cloudify-monitoring --amqp-routing-key '*' --influx-database cloudify
13 |
--------------------------------------------------------------------------------
/package-configuration/manager/init/manager.conf.template:
--------------------------------------------------------------------------------
1 | description "manager gunicorn"
2 |
3 | start on (filesystem)
4 | stop on runlevel [016]
5 |
6 | respawn
7 | console log
8 | # setuid nobody
9 | # setgid nogroup
10 | chdir {{ config_templates.params_init.rest_server_path }}
11 |
12 | script
13 | export MANAGER_REST_CONFIG_PATH={{ config_templates.params_init.gunicorn_conf_path }}
14 | WORKERS=$(($(nproc)*2+1))
15 | exec sudo -u {{ config_templates.params_init.gunicorn_user }} -E {{ sources_path }}/bin/gunicorn -w ${WORKERS} -b 0.0.0.0:{{ config_templates.params_init.rest_port }} --timeout 300 server:app --log-file {{ config_templates.params_init.gunicorn_log_path }} --access-logfile {{ config_templates.params_init.gunicorn_access_log_path }}
16 | end script
--------------------------------------------------------------------------------
/package-configuration/rabbitmq/init/rabbitmq-server.conf.template:
--------------------------------------------------------------------------------
1 | description "RabbitMQ Server"
2 |
3 | start on runlevel [2345]
4 | stop on runlevel [016]
5 |
6 | kill timeout 60
7 | respawn
8 | respawn limit 10 5
9 |
10 | setuid rabbitmq
11 | setgid rabbitmq
12 | env HOME=/var/lib/rabbitmq
13 |
14 | exec /usr/sbin/rabbitmq-server
15 | post-start exec /usr/sbin/rabbitmqctl wait
16 | pre-stop exec /usr/sbin/rabbitmqctl stop
--------------------------------------------------------------------------------
/package-configuration/riemann/init/riemann.conf.template:
--------------------------------------------------------------------------------
1 | description "Riemann"
2 |
3 | start on (started rabbitmq-server
4 | and runlevel [2345])
5 | stop on runlevel [016]
6 |
7 | kill timeout 60
8 | respawn
9 | respawn limit 10 5
10 |
11 | setuid riemann
12 | setgid riemann
13 |
14 | script
15 | export EXTRA_CLASSPATH="{{ config_templates.params_riemann.langohr_jar }}"
16 | CONFIG_PATH=""
17 | MANAGER_CONFIG_PATH="{{ config_templates.params_riemann.manager_config }}"
18 | if [ -f ${MANAGER_CONFIG_PATH} ]; then
19 | CONFIG_PATH=${MANAGER_CONFIG_PATH}
20 | fi
21 | exec /usr/bin/riemann -a ${CONFIG_PATH}
22 | end script
23 |
--------------------------------------------------------------------------------
/package-configuration/ubuntu-commercial-agent/Ubuntu-agent-disable-requiretty.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # now modify sudoers configuration to allow execution without tty
3 | grep -i ubuntu /proc/version > /dev/null
4 | if [ "$?" -eq "0" ]; then
5 | # ubuntu
6 | echo Running on Ubuntu
7 | if sudo grep -q -E '[^!]requiretty' /etc/sudoers; then
8 | echo creating sudoers user file
9 | echo "Defaults:`whoami` !requiretty" | sudo tee /etc/sudoers.d/`whoami` >/dev/null
10 | sudo chmod 0440 /etc/sudoers.d/`whoami`
11 | else
12 | echo No requiretty directive found, nothing to do
13 | fi
14 | else
15 | # other - modify sudoers file
16 | if [ ! -f "/etc/sudoers" ]; then
17 | error_exit 116 "Could not find sudoers file at expected location (/etc/sudoers)"
18 | fi
19 | echo Setting privileged mode
20 | sudo sed -i 's/^Defaults.*requiretty/#&/g' /etc/sudoers || error_exit_on_level $? 117 "Failed to edit sudoers file to disable requiretty directive" 1
21 | fi
22 |
--------------------------------------------------------------------------------
/package-configuration/ubuntu-commercial-agent/Ubuntu-celeryd-cloudify.conf.template:
--------------------------------------------------------------------------------
1 | . {{ includes_file_path }}
2 | CELERY_BASE_DIR="{{ celery_base_dir }}"
3 |
4 | # replaces management__worker
5 | WORKER_MODIFIER="{{ worker_modifier }}"
6 |
7 | export BROKER_IP="{{ broker_ip }}"
8 | export MANAGEMENT_IP="{{ management_ip }}"
9 | export BROKER_URL="amqp://guest:guest@${BROKER_IP}:5672//"
10 | export MANAGER_REST_PORT="8101"
11 | export CELERY_WORK_DIR="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/work"
12 | export IS_MANAGEMENT_NODE="False"
13 | export AGENT_IP="{{ agent_ip }}"
14 | export VIRTUALENV="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/env"
15 | export MANAGER_FILE_SERVER_URL="http://${MANAGEMENT_IP}:53229"
16 | export MANAGER_FILE_SERVER_BLUEPRINTS_ROOT_URL="${MANAGER_FILE_SERVER_URL}/blueprints"
17 | export PATH="${VIRTUALENV}/bin:${PATH}"
18 | # enable running celery as root
19 | export C_FORCE_ROOT="true"
20 |
21 | CELERYD_MULTI="${VIRTUALENV}/bin/celeryd-multi"
22 | CELERYD_USER="{{ celery_user }}"
23 | CELERYD_GROUP="{{ celery_group }}"
24 | CELERY_TASK_SERIALIZER="json"
25 | CELERY_RESULT_SERIALIZER="json"
26 | CELERY_RESULT_BACKEND="$BROKER_URL"
27 | DEFAULT_PID_FILE="${CELERY_WORK_DIR}/celery.pid"
28 | DEFAULT_LOG_FILE="${CELERY_WORK_DIR}/celery%I.log"
29 | CELERYD_OPTS="-Ofair --events --loglevel=debug --app=cloudify --include="${INCLUDES},softlayer_plugin.server,network_plugin.network,network_plugin.port,server_plugin.server,storage_plugin.storage" -Q ${WORKER_MODIFIER} --broker=${BROKER_URL} --hostname=${WORKER_MODIFIER} --autoscale={{ worker_autoscale }} --maxtasksperchild=10 --without-gossip --without-mingle"
30 |
--------------------------------------------------------------------------------
/package-configuration/ubuntu-commercial-agent/Ubuntu-celeryd-cloudify.init.template:
--------------------------------------------------------------------------------
1 | #!/bin/sh -e
2 | # ============================================
3 | # celeryd - Starts the Celery worker daemon.
4 | # ============================================
5 | #
6 | # :Usage: /etc/init.d/celeryd {start|stop|force-reload|restart|try-restart|status}
7 | # :Configuration file: /etc/default/celeryd
8 | #
9 | # See http://docs.celeryproject.org/en/latest/tutorials/daemonizing.html#generic-init-scripts
10 |
11 |
12 | ### BEGIN INIT INFO
13 | # Provides: celeryd
14 | # Required-Start: $network $local_fs $remote_fs
15 | # Required-Stop: $network $local_fs $remote_fs
16 | # Default-Start: 2 3 4 5
17 | # Default-Stop: 0 1 6
18 | # Short-Description: celery task worker daemon
19 | ### END INIT INFO
20 |
21 | WORKER_MODIFIER="{{ worker_modifier }}"
22 | CELERY_BASE_DIR="{{ celery_base_dir }}"
23 | ME=$(basename $0)
24 | export CELERY_WORK_DIR="${CELERY_BASE_DIR}/cloudify.${WORKER_MODIFIER}/work"
25 | CELERY_DEFAULTS="/etc/default/${ME}"
26 |
27 | # some commands work asyncronously, so we'll wait this many seconds
28 | SLEEP_SECONDS=5
29 |
30 | DEFAULT_PID_FILE="/var/run/celery/%n.pid"
31 | DEFAULT_LOG_FILE="/var/log/celery/%n%I.log"
32 | DEFAULT_LOG_LEVEL="INFO"
33 | DEFAULT_NODES="celery"
34 | DEFAULT_CELERYD="-m celery.bin.celeryd_detach"
35 |
36 | test -f "$CELERY_DEFAULTS" && . "$CELERY_DEFAULTS"
37 |
38 | # Set CELERY_CREATE_DIRS to always create log/pid dirs.
39 | CELERY_CREATE_DIRS=${CELERY_CREATE_DIRS:-0}
40 | CELERY_CREATE_RUNDIR=$CELERY_CREATE_DIRS
41 | CELERY_CREATE_LOGDIR=$CELERY_CREATE_DIRS
42 | if [ -z "$CELERYD_PID_FILE" ]; then
43 | CELERYD_PID_FILE="$DEFAULT_PID_FILE"
44 | CELERY_CREATE_RUNDIR=1
45 | fi
46 | if [ -z "$CELERYD_LOG_FILE" ]; then
47 | CELERYD_LOG_FILE="$DEFAULT_LOG_FILE"
48 | CELERY_CREATE_LOGDIR=1
49 | fi
50 |
51 | CELERYD_LOG_LEVEL=${CELERYD_LOG_LEVEL:-${CELERYD_LOGLEVEL:-$DEFAULT_LOG_LEVEL}}
52 | CELERYD_MULTI=${CELERYD_MULTI:-"celeryd-multi"}
53 | CELERYD=${CELERYD:-$DEFAULT_CELERYD}
54 | CELERYD_NODES=${CELERYD_NODES:-$DEFAULT_NODES}
55 |
56 | export CELERY_LOADER
57 |
58 | if [ -n "$2" ]; then
59 | CELERYD_OPTS="$CELERYD_OPTS $2"
60 | fi
61 |
62 | CELERYD_LOG_DIR=`dirname $CELERYD_LOG_FILE`
63 | CELERYD_PID_DIR=`dirname $CELERYD_PID_FILE`
64 |
65 | # Extra start-stop-daemon options, like user/group.
66 | if [ -n "$CELERYD_USER" ]; then
67 | DAEMON_OPTS="$DAEMON_OPTS --uid=$CELERYD_USER"
68 | fi
69 | if [ -n "$CELERYD_GROUP" ]; then
70 | DAEMON_OPTS="$DAEMON_OPTS --gid=$CELERYD_GROUP"
71 | fi
72 |
73 | if [ -n "$CELERYD_CHDIR" ]; then
74 | DAEMON_OPTS="$DAEMON_OPTS --workdir=$CELERYD_CHDIR"
75 | fi
76 |
77 |
78 | check_dev_null() {
79 | if [ ! -c /dev/null ]; then
80 | echo "/dev/null is not a character device!"
81 | exit 75 # EX_TEMPFAIL
82 | fi
83 | }
84 |
85 |
86 | maybe_die() {
87 | if [ $? -ne 0 ]; then
88 | echo "Exiting: $* (errno $?)"
89 | exit 77 # EX_NOPERM
90 | fi
91 | }
92 |
93 | create_default_dir() {
94 | if [ ! -d "$1" ]; then
95 | echo "- Creating default directory: '$1'"
96 | mkdir -p "$1"
97 | maybe_die "Couldn't create directory $1"
98 | echo "- Changing permissions of '$1' to 02755"
99 | chmod 02755 "$1"
100 | maybe_die "Couldn't change permissions for $1"
101 | if [ -n "$CELERYD_USER" ]; then
102 | echo "- Changing owner of '$1' to '$CELERYD_USER'"
103 | chown "$CELERYD_USER" "$1"
104 | maybe_die "Couldn't change owner of $1"
105 | fi
106 | if [ -n "$CELERYD_GROUP" ]; then
107 | echo "- Changing group of '$1' to '$CELERYD_GROUP'"
108 | chgrp "$CELERYD_GROUP" "$1"
109 | maybe_die "Couldn't change group of $1"
110 | fi
111 | fi
112 | }
113 |
114 |
115 | check_paths() {
116 | if [ $CELERY_CREATE_LOGDIR -eq 1 ]; then
117 | create_default_dir "$CELERYD_LOG_DIR"
118 | fi
119 | if [ $CELERY_CREATE_RUNDIR -eq 1 ]; then
120 | create_default_dir "$CELERYD_PID_DIR"
121 | fi
122 | }
123 |
124 | create_paths() {
125 | create_default_dir "$CELERYD_LOG_DIR"
126 | create_default_dir "$CELERYD_PID_DIR"
127 | }
128 |
129 | export PATH="${PATH:+$PATH:}/usr/sbin:/sbin"
130 |
131 |
132 | _get_pid_files() {
133 | [ ! -d "$CELERYD_PID_DIR" ] && return
134 | echo `ls -1 "$CELERYD_PID_DIR"/*.pid 2> /dev/null`
135 | }
136 |
137 | stop_workers () {
138 | $CELERYD_MULTI stopwait $CELERYD_NODES --pidfile="$CELERYD_PID_FILE"
139 | sleep $SLEEP_SECONDS
140 | }
141 |
142 |
143 | start_workers () {
144 | $CELERYD_MULTI start $CELERYD_NODES $DAEMON_OPTS \
145 | --pidfile="$CELERYD_PID_FILE" \
146 | --logfile="$CELERYD_LOG_FILE" \
147 | --loglevel="$CELERYD_LOG_LEVEL" \
148 | --cmd="$CELERYD" \
149 | $CELERYD_OPTS
150 | sleep $SLEEP_SECONDS
151 | }
152 |
153 |
154 | restart_workers () {
155 | $CELERYD_MULTI restart $CELERYD_NODES $DAEMON_OPTS \
156 | --pidfile="$CELERYD_PID_FILE" \
157 | --logfile="$CELERYD_LOG_FILE" \
158 | --loglevel="$CELERYD_LOG_LEVEL" \
159 | --cmd="$CELERYD" \
160 | $CELERYD_OPTS
161 | sleep $SLEEP_SECONDS
162 | }
163 |
164 | check_status () {
165 | local pid_files=
166 | pid_files=`_get_pid_files`
167 | [ -z "$pid_files" ] && echo "celeryd not running (no pidfile)" && exit 1
168 |
169 | local one_failed=
170 | for pid_file in $pid_files; do
171 | local node=`basename "$pid_file" .pid`
172 | local pid=`cat "$pid_file"`
173 | local cleaned_pid=`echo "$pid" | sed -e 's/[^0-9]//g'`
174 | if [ -z "$pid" ] || [ "$cleaned_pid" != "$pid" ]; then
175 | echo "bad pid file ($pid_file)"
176 | else
177 | local failed=
178 | kill -0 $pid 2> /dev/null || failed=true
179 | if [ "$failed" ]; then
180 | echo "celeryd (node $node) (pid $pid) is stopped, but pid file exists!"
181 | one_failed=true
182 | else
183 | echo "celeryd (node $node) (pid $pid) is running..."
184 | fi
185 | fi
186 | done
187 |
188 | [ "$one_failed" ] && exit 1 || exit 0
189 | }
190 |
191 |
192 | case "$1" in
193 | start)
194 | check_dev_null
195 | check_paths
196 | start_workers
197 | ;;
198 |
199 | stop)
200 | check_dev_null
201 | check_paths
202 | stop_workers
203 | ;;
204 |
205 | reload|force-reload)
206 | echo "Use restart"
207 | ;;
208 |
209 | status)
210 | check_status
211 | ;;
212 |
213 | restart)
214 | check_dev_null
215 | check_paths
216 | restart_workers
217 | ;;
218 | try-restart)
219 | check_dev_null
220 | check_paths
221 | restart_workers
222 | ;;
223 | create-paths)
224 | check_dev_null
225 | create_paths
226 | ;;
227 | check-paths)
228 | check_dev_null
229 | check_paths
230 | ;;
231 | *)
232 | echo "Usage: /etc/init.d/celeryd {start|stop|restart|kill|create-paths}"
233 | exit 64 # EX_USAGE
234 | ;;
235 | esac
236 |
237 | exit 0
--------------------------------------------------------------------------------
/package-scripts/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !.gitignore
--------------------------------------------------------------------------------
/package-templates/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.deb
--------------------------------------------------------------------------------
/package-templates/agent-centos-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | function check_pkg
10 | {
11 | echo "checking to see if package $1 is installed..."
12 | dpkg -s $1 || state_error "package $1 is not installed"
13 | echo "package $1 is installed"
14 | }
15 |
16 | function check_user
17 | {
18 | echo "checking to see if user $1 exists..."
19 | id -u $1 || state_error "user $1 doesn't exists"
20 | echo "user $1 exists"
21 | }
22 |
23 | function check_port
24 | {
25 | echo "checking to see if port $1 is opened..."
26 | nc -z $1 $2 || state_error "port $2 is closed"
27 | echo "port $2 on $1 is opened"
28 | }
29 |
30 | function check_dir
31 | {
32 | echo "checking to see if dir $1 exists..."
33 | if [ -d $1 ]; then
34 | echo "dir $1 exists"
35 | else
36 | state_error "dir $1 doesn't exist"
37 | fi
38 | }
39 |
40 | function check_file
41 | {
42 | echo "checking to see if file $1 exists..."
43 | if [ -f $1 ]; then
44 | echo "file $1 exists"
45 | # if [ -$2 $1 ]; then
46 | # echo "$1 exists and contains the right attribs"
47 | # else
48 | # state_error "$1 exists but does not contain the right attribs"
49 | # fi
50 | else
51 | state_error "file $1 doesn't exists"
52 | fi
53 | }
54 |
55 | function check_upstart
56 | {
57 | echo "checking to see if $1 daemon is running..."
58 | status $1 || state_error "daemon $1 is not running"
59 | echo "daemon $1 is running"
60 | }
61 |
62 | function check_service
63 | {
64 | echo "checking to see if $1 service is running..."
65 | service $1 status || state_error "service $1 is not running"
66 | echo "service $1 is running"
67 | }
68 |
69 | PKG_NAME="{{ name }}"
70 | PKG_DIR="{{ sources_path }}"
71 | BOOTSTRAP_LOG="{{ bootstrap_log }}"
72 | VERSION="{{ version }}"
73 |
74 | BASE_DIR="/env"
75 | HOME_DIR="${BASE_DIR}/${PKG_NAME}/cloudify.${WORKER_MODIFIER}/env"
76 |
77 | FILE_SERVER_PATH={{ bootstrap_params.file_server_path }}
78 | DST_AGENT_LOCATION={{ bootstrap_params.dst_agent_location }}
79 | DST_TEMPLATE_LOCATION={{ bootstrap_params.dst_template_location }}
80 | DST_SCRIPT_LOCATION={{ bootstrap_params.dst_script_location }}
81 |
82 | echo -e "\nInstalling ${PKG_NAME} version ${VERSION}...\n" | tee -a ${BOOTSTRAP_LOG}
83 |
84 | mkdir -p ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
85 | mkdir -p ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
86 | mkdir -p ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
87 |
88 | check_dir ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
89 | check_dir ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
90 | check_dir ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
91 |
92 | cp -R ${PKG_DIR}/*.tar.gz ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
93 | cp -R ${PKG_DIR}/config/*.template ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
94 | cp -R ${PKG_DIR}/config/centos-agent-disable-requiretty.sh ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
95 |
96 | echo -e "${PKG_NAME} ${VERSION} installation completed successfully!\n" | tee -a ${BOOTSTRAP_LOG}
97 |
--------------------------------------------------------------------------------
/package-templates/agent-debian-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | function check_pkg
10 | {
11 | echo "checking to see if package $1 is installed..."
12 | dpkg -s $1 || state_error "package $1 is not installed"
13 | echo "package $1 is installed"
14 | }
15 |
16 | function check_user
17 | {
18 | echo "checking to see if user $1 exists..."
19 | id -u $1 || state_error "user $1 doesn't exists"
20 | echo "user $1 exists"
21 | }
22 |
23 | function check_port
24 | {
25 | echo "checking to see if port $1 is opened..."
26 | nc -z $1 $2 || state_error "port $2 is closed"
27 | echo "port $2 on $1 is opened"
28 | }
29 |
30 | function check_dir
31 | {
32 | echo "checking to see if dir $1 exists..."
33 | if [ -d $1 ]; then
34 | echo "dir $1 exists"
35 | else
36 | state_error "dir $1 doesn't exist"
37 | fi
38 | }
39 |
40 | function check_file
41 | {
42 | echo "checking to see if file $1 exists..."
43 | if [ -f $1 ]; then
44 | echo "file $1 exists"
45 | # if [ -$2 $1 ]; then
46 | # echo "$1 exists and contains the right attribs"
47 | # else
48 | # state_error "$1 exists but does not contain the right attribs"
49 | # fi
50 | else
51 | state_error "file $1 doesn't exists"
52 | fi
53 | }
54 |
55 | function check_upstart
56 | {
57 | echo "checking to see if $1 daemon is running..."
58 | status $1 || state_error "daemon $1 is not running"
59 | echo "daemon $1 is running"
60 | }
61 |
62 | function check_service
63 | {
64 | echo "checking to see if $1 service is running..."
65 | service $1 status || state_error "service $1 is not running"
66 | echo "service $1 is running"
67 | }
68 |
69 | PKG_NAME="{{ name }}"
70 | PKG_DIR="{{ sources_path }}"
71 | BOOTSTRAP_LOG="{{ bootstrap_log }}"
72 | VERSION="{{ version }}"
73 |
74 | BASE_DIR="/env"
75 | HOME_DIR="${BASE_DIR}/${PKG_NAME}/cloudify.${WORKER_MODIFIER}/env"
76 |
77 | FILE_SERVER_PATH={{ bootstrap_params.file_server_path }}
78 | DST_AGENT_LOCATION={{ bootstrap_params.dst_agent_location }}
79 | DST_TEMPLATE_LOCATION={{ bootstrap_params.dst_template_location }}
80 | DST_SCRIPT_LOCATION={{ bootstrap_params.dst_script_location }}
81 |
82 | echo -e "\nInstalling ${PKG_NAME} version ${VERSION}...\n" | tee -a ${BOOTSTRAP_LOG}
83 |
84 | mkdir -p ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
85 | mkdir -p ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
86 | mkdir -p ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
87 |
88 | check_dir ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
89 | check_dir ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
90 | check_dir ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
91 |
92 | cp -R ${PKG_DIR}/*.tar.gz ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
93 | cp -R ${PKG_DIR}/config/*.template ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
94 | cp -R ${PKG_DIR}/config/debian-agent-disable-requiretty.sh ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
95 |
96 | echo -e "${PKG_NAME} ${VERSION} installation completed successfully!\n" | tee -a ${BOOTSTRAP_LOG}
97 |
--------------------------------------------------------------------------------
/package-templates/agent-ubuntu-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | function check_pkg
10 | {
11 | echo "checking to see if package $1 is installed..."
12 | dpkg -s $1 || state_error "package $1 is not installed"
13 | echo "package $1 is installed"
14 | }
15 |
16 | function check_user
17 | {
18 | echo "checking to see if user $1 exists..."
19 | id -u $1 || state_error "user $1 doesn't exists"
20 | echo "user $1 exists"
21 | }
22 |
23 | function check_port
24 | {
25 | echo "checking to see if port $1 is opened..."
26 | nc -z $1 $2 || state_error "port $2 is closed"
27 | echo "port $2 on $1 is opened"
28 | }
29 |
30 | function check_dir
31 | {
32 | echo "checking to see if dir $1 exists..."
33 | if [ -d $1 ]; then
34 | echo "dir $1 exists"
35 | else
36 | state_error "dir $1 doesn't exist"
37 | fi
38 | }
39 |
40 | function check_file
41 | {
42 | echo "checking to see if file $1 exists..."
43 | if [ -f $1 ]; then
44 | echo "file $1 exists"
45 | # if [ -$2 $1 ]; then
46 | # echo "$1 exists and contains the right attribs"
47 | # else
48 | # state_error "$1 exists but does not contain the right attribs"
49 | # fi
50 | else
51 | state_error "file $1 doesn't exists"
52 | fi
53 | }
54 |
55 | function check_upstart
56 | {
57 | echo "checking to see if $1 daemon is running..."
58 | status $1 || state_error "daemon $1 is not running"
59 | echo "daemon $1 is running"
60 | }
61 |
62 | function check_service
63 | {
64 | echo "checking to see if $1 service is running..."
65 | service $1 status || state_error "service $1 is not running"
66 | echo "service $1 is running"
67 | }
68 |
69 | PKG_NAME="{{ name }}"
70 | PKG_DIR="{{ sources_path }}"
71 | BOOTSTRAP_LOG="{{ bootstrap_log }}"
72 | VERSION="{{ version }}"
73 |
74 | BASE_DIR="/env"
75 | HOME_DIR="${BASE_DIR}/${PKG_NAME}/cloudify.${WORKER_MODIFIER}/env"
76 |
77 | FILE_SERVER_PATH={{ bootstrap_params.file_server_path }}
78 | DST_AGENT_LOCATION={{ bootstrap_params.dst_agent_location }}
79 | DST_TEMPLATE_LOCATION={{ bootstrap_params.dst_template_location }}
80 | DST_SCRIPT_LOCATION={{ bootstrap_params.dst_script_location }}
81 |
82 | echo -e "\nInstalling ${PKG_NAME} version ${VERSION}...\n" | tee -a ${BOOTSTRAP_LOG}
83 |
84 | mkdir -p ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
85 | mkdir -p ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
86 | mkdir -p ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
87 |
88 | check_dir ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
89 | check_dir ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
90 | check_dir ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
91 |
92 | cp -R ${PKG_DIR}/*.tar.gz ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
93 | cp -R ${PKG_DIR}/config/*.template ${FILE_SERVER_PATH}/${DST_TEMPLATE_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
94 | cp -R ${PKG_DIR}/config/Ubuntu-agent-disable-requiretty.sh ${FILE_SERVER_PATH}/${DST_SCRIPT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
95 |
96 | echo -e "${PKG_NAME} ${VERSION} installation completed successfully!\n" | tee -a ${BOOTSTRAP_LOG}
97 |
--------------------------------------------------------------------------------
/package-templates/agent-windows-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | function check_pkg
10 | {
11 | echo "checking to see if package $1 is installed..."
12 | dpkg -s $1 || state_error "package $1 is not installed"
13 | echo "package $1 is installed"
14 | }
15 |
16 | function check_user
17 | {
18 | echo "checking to see if user $1 exists..."
19 | id -u $1 || state_error "user $1 doesn't exists"
20 | echo "user $1 exists"
21 | }
22 |
23 | function check_port
24 | {
25 | echo "checking to see if port $1 is opened..."
26 | nc -z $1 $2 || state_error "port $2 is closed"
27 | echo "port $2 on $1 is opened"
28 | }
29 |
30 | function check_dir
31 | {
32 | echo "checking to see if dir $1 exists..."
33 | if [ -d $1 ]; then
34 | echo "dir $1 exists"
35 | else
36 | state_error "dir $1 doesn't exist"
37 | fi
38 | }
39 |
40 | function check_file
41 | {
42 | echo "checking to see if file $1 exists..."
43 | if [ -f $1 ]; then
44 | echo "file $1 exists"
45 | # if [ -$2 $1 ]; then
46 | # echo "$1 exists and contains the right attribs"
47 | # else
48 | # state_error "$1 exists but does not contain the right attribs"
49 | # fi
50 | else
51 | state_error "file $1 doesn't exists"
52 | fi
53 | }
54 |
55 | function check_upstart
56 | {
57 | echo "checking to see if $1 daemon is running..."
58 | sudo status $1 || state_error "daemon $1 is not running"
59 | echo "daemon $1 is running"
60 | }
61 |
62 | function check_service
63 | {
64 | echo "checking to see if $1 service is running..."
65 | sudo service $1 status || state_error "service $1 is not running"
66 | echo "service $1 is running"
67 | }
68 |
69 | PKG_NAME="{{ name }}"
70 | PKG_DIR="{{ sources_path }}"
71 | BOOTSTRAP_LOG="{{ bootstrap_log }}"
72 | VERSION="{{ version }}"
73 |
74 | BASE_DIR="/env"
75 | HOME_DIR="${BASE_DIR}/${PKG_NAME}/cloudify.${WORKER_MODIFIER}/env"
76 |
77 | FILE_SERVER_PATH={{ bootstrap_params.file_server_path }}
78 | DST_AGENT_LOCATION={{ bootstrap_params.dst_agent_location }}
79 |
80 | echo -e "\nInstalling ${PKG_NAME} version ${VERSION}...\n" | tee -a ${BOOTSTRAP_LOG}
81 |
82 | sudo mkdir -p ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
83 | check_dir ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION} >> ${BOOTSTRAP_LOG} 2>&1
84 | sudo cp ${PKG_DIR}/*.exe ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION}/CloudifyWindowsAgent.exe >> ${BOOTSTRAP_LOG} 2>&1
85 | sudo chmod 744 ${FILE_SERVER_PATH}/${DST_AGENT_LOCATION}/CloudifyWindowsAgent.exe >> ${BOOTSTRAP_LOG} 2>&1
86 |
87 | echo -e "${PKG_NAME} ${VERSION} installation completed successfully!\n" | tee -a ${BOOTSTRAP_LOG}
88 |
--------------------------------------------------------------------------------
/package-templates/cli-linux.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | PKG_NAME="{{ name }}"
10 | PKG_DIR="{{ sources_path }}"
11 | VERSION="{{ version }}"
12 |
13 | echo -e "\nInstalling ${PKG_NAME} version ${VERSION}...\n"
14 |
15 | function check_pip
16 | {
17 | if ! which pip >> /dev/null; then
18 | state_error "pip not in path. Please verify that pip is installed and is in the path."
19 | fi
20 | }
21 |
22 | function install_virtualenv
23 | {
24 | if ! which virtualenv >> /dev/null; then
25 | echo "Installing Virtualenv..."
26 | pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse virtualenv
27 | fi
28 | }
29 |
30 | function install_cloudify
31 | {
32 | echo "Creating Virtualenv /cfy/env..."
33 | virtualenv /cfy/env &&
34 | if ! which cfy >> /dev/null; then
35 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify --pre
36 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify-vsphere-plugin --pre
37 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify-softlayer-plugin --pre
38 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify-fabric-plugin --pre
39 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify-openstack-plugin --pre
40 | /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse cloudify-aws-plugin --pre
41 | # when the cli is built for py2.6, unless argparse is put within `install_requires`, we'll have to enable this:
42 | # if which yum; then
43 | # /cfy/env/bin/pip install --use-wheel --no-index --find-links=${PKG_DIR}/wheelhouse argparse=#SOME_VERSION#
44 | # fi
45 | else
46 | state_error "Cloudify's CLI appears to be installed already and is in your path."
47 | fi
48 | }
49 |
50 | check_pip &&
51 | # Questionable. Do we want to install virtualenv for the user if it isn't already installed?
52 | install_virtualenv &&
53 | install_cloudify &&
54 |
55 | echo "Cleaning up..."
56 | rm -rf ${PKG_DIR}/wheelhouse &&
57 |
58 | echo -e "${PKG_NAME} ${VERSION} Installation completed successfully!\n"
59 |
--------------------------------------------------------------------------------
/package-templates/manager-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function state_error
4 | {
5 | echo "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
6 | exit 1
7 | }
8 |
9 | function check_pkg
10 | {
11 | echo "checking to see if package $1 is installed..."
12 | dpkg -s $1 || state_error "package $1 is not installed"
13 | echo "package $1 is installed"
14 | }
15 |
16 | function check_user
17 | {
18 | echo "checking to see if user $1 exists..."
19 | id -u $1 || state_error "user $1 doesn't exists"
20 | echo "user $1 exists"
21 | }
22 |
23 | function check_port
24 | {
25 | echo "checking to see if port $1 is opened..."
26 | nc -z $1 $2 || state_error "port $2 is closed"
27 | echo "port $2 on $1 is opened"
28 | }
29 |
30 | function check_dir
31 | {
32 | echo "checking to see if dir $1 exists..."
33 | if [ -d $1 ]; then
34 | echo "dir $1 exists"
35 | else
36 | state_error "dir $1 doesn't exist"
37 | fi
38 | }
39 |
40 | function check_file
41 | {
42 | echo "checking to see if file $1 exists..."
43 | if [ -f $1 ]; then
44 | echo "file $1 exists"
45 | # if [ -$2 $1 ]; then
46 | # echo "$1 exists and contains the right attribs"
47 | # else
48 | # state_error "$1 exists but does not contain the right attribs"
49 | # fi
50 | else
51 | state_error "file $1 doesn't exists"
52 | fi
53 | }
54 |
55 | function check_upstart
56 | {
57 | echo "checking to see if $1 daemon is running..."
58 | sudo status $1 || state_error "daemon $1 is not running"
59 | echo "daemon $1 is running"
60 | }
61 |
62 | function check_service
63 | {
64 | echo "checking to see if $1 service is running..."
65 | sudo service $1 status || state_error "service $1 is not running"
66 | echo "service $1 is running"
67 | }
68 |
69 |
70 | PKG_NAME="{{ name }}"
71 | PKG_DIR="{{ sources_path }}"
72 | BOOTSTRAP_LOG="/var/log/cloudify3-bootstrap.log"
73 |
74 | BASE_DIR="/opt"
75 | HOME_DIR="${BASE_DIR}/${PKG_NAME}"
76 |
77 | LOG_DIR="/var/log/cosmo"
78 |
79 | PKG_INIT_DIR="${PKG_DIR}/{{ config_templates.template_dir_init.config_dir }}"
80 | INIT_DIR="{{ config_templates.template_dir_init.dst_dir }}"
81 | INIT_FILE="{{ config_templates.template_dir_init.output_file }}"
82 |
83 | PKG_CONF_DIR="${PKG_DIR}/{{ config_templates.template_file_conf.config_dir }}"
84 | CONF_DIR="{{ config_templates.template_file_conf.dst_dir }}"
85 | CONF_FILE="{{ config_templates.template_file_conf.output_file }}"
86 |
87 |
88 | # echo "creating virtualenv dir..."
89 | # sudo mkdir -p ${PKG_DIR}
90 | # check_dir "${PKG_DIR}"
91 |
92 | # echo "copying some stuff..."
93 | # sudo cp -R ${PKG_DIR}/{{ bootstrap_params.resources_dir_src }} ${PKG_DIR}/{{ bootstrap_params.resources_dir_dst }}/
94 | # sudo cp ${PKG_DIR}/{{ bootstrap_params.alias_file_src }} ${PKG_DIR}/{{ bootstrap_params.alias_file_dst }}/
95 |
96 | # echo "running gunicorn..."
97 | # sudo ${PKG_DIR}/bin/gunicorn -w 1 -b 0.0.0.0:8100 --timeout 300 ${PKG_DIR}/cosmo-manager-develop/manager-rest/manager_rest/server.py:app
98 |
99 | # use this to test...
100 | # sudo mkdir -p /opt/manager/filesrv
101 | # sudo cp -R /opt/manager/cosmo-manager-develop/orchestrator/src/main/resources/cloudify/ /opt/manager/filesrv/
102 | # sudo cp /opt/manager/cosmo-manager-develop/orchestrator/src/main/resources/org/cloudifysource/cosmo/dsl/alias-mappings.yaml /opt/manager/filesrv/cloudify/
103 | # cd /opt/manager/cosmo-manager-develop/manager-rest/manager_rest
104 | # sudo /opt/manager/bin/gunicorn -w 1 -b 0.0.0.0:8100 --timeout 300 server:app
105 |
106 | echo "creating virtualenv..."
107 | sudo virtualenv ${HOME_DIR}
108 | check_dir "${HOME_DIR}"
109 |
110 | echo "creating log dir..."
111 | sudo mkdir -p ${LOG_DIR}
112 | check_dir "${LOG_DIR}"
113 |
114 | echo "moving some stuff aorund..."
115 | sudo cp ${PKG_INIT_DIR}/*.conf ${INIT_DIR}
116 |
117 | check_file "${INIT_DIR}/manager.conf"
118 | check_file "${INIT_DIR}/amqpflux.conf"
119 |
120 | # sudo mv ${PKG_DIR}/${PKG_NAME} ${BASE_DIR}
121 | sudo ln -sf ${HOME_DIR}/cosmo-manager-*/ ${HOME_DIR}/${PKG_NAME}
122 | # check_dir "${BASE_DIR}/${PKG_NAME}"
123 |
124 | #python-dbus module
125 | sudo ln -sf /usr/share/pyshared/dbus ${HOME_DIR}/lib/python2.7/site-packages/dbus
126 | sudo ln -sf /usr/lib/python2.7/dist-packages/_dbus_*.so ${HOME_DIR}/lib/python2.7/site-packages
127 |
128 |
129 | echo "restarting riemann with manager.config"
130 | sudo restart riemann
131 | sleep 1
132 | check_upstart "riemann"
133 |
134 | echo "starting manager..."
135 | sudo start manager
136 | check_upstart "manager"
137 |
138 | echo "starting amqpflux..."
139 | sudo start amqpflux
140 | check_upstart "amqpflux"
141 |
--------------------------------------------------------------------------------
/package-templates/virtualenv-bootstrap.template:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | function echop
4 | {
5 | echo "${PKG_NAME}: $1"
6 | }
7 |
8 | function state_error
9 | {
10 | echop "ERROR: ${1:-UNKNOWN} (status $?)" 1>&2
11 | exit 1
12 | }
13 |
14 | function check_pkg
15 | {
16 | echop "checking to see if package $1 is installed..."
17 | dpkg -s $1 || state_error "package $1 is not installed"
18 | echop "package $1 is installed"
19 | }
20 |
21 | function check_user
22 | {
23 | echop "checking to see if user $1 exists..."
24 | id -u $1 || state_error "user $1 doesn't exists"
25 | echop "user $1 exists"
26 | }
27 |
28 | function check_port
29 | {
30 | echop "checking to see if port $1 is opened..."
31 | nc -z $1 $2 || state_error "port $2 is closed"
32 | echop "port $2 on $1 is opened"
33 | }
34 |
35 | function check_dir
36 | {
37 | echop "checking to see if dir $1 exists..."
38 | if [ -d $1 ]; then
39 | echop "dir $1 exists"
40 | else
41 | state_error "dir $1 doesn't exist"
42 | fi
43 | }
44 |
45 | function check_file
46 | {
47 | echop "checking to see if file $1 exists..."
48 | if [ -f $1 ]; then
49 | echop "file $1 exists"
50 | # if [ -$2 $1 ]; then
51 | # echop "$1 exists and contains the right attribs"
52 | # else
53 | # state_error "$1 exists but does not contain the right attribs"
54 | # fi
55 | else
56 | state_error "file $1 doesn't exists"
57 | fi
58 | }
59 |
60 | function check_upstart
61 | {
62 | echop "checking to see if $1 daemon is running..."
63 | sudo status $1 || state_error "daemon $1 is not running"
64 | echop "daemon $1 is running"
65 | }
66 |
67 | function check_service
68 | {
69 | echop "checking to see if $1 service is running..."
70 | sudo service $1 status || state_error "service $1 is not running"
71 | echop "service $1 is running"
72 | }
73 |
74 |
75 | PKG_NAME="{{ name }}"
76 | PKG_DIR="{{ sources_path }}"
77 | BOOTSTRAP_LOG="/var/log/cloudify3-bootstrap.log"
78 |
79 |
80 | echo "extracting ${PKG_NAME}..."
81 | sudo tar -C ${PKG_DIR} -xvf ${PKG_DIR}/*.tar.gz
82 | echo "removing tar..."
83 | sudo rm ${PKG_DIR}/*.tar.gz
84 | cd ${PKG_DIR}/virtualenv*
85 | echo "installing ${PKG_NAME}..."
86 | sudo python setup.py install
87 | # sudo pip install --no-index --find-links="${PKG_DIR}" ${PKG_DIR}/*.tar.gz
88 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | skipsdist = True
3 |
4 | [testenv:py27]
5 | deps =
6 | -rdev-requirements.txt
7 | commands =
8 | nosetests --with-cov --cov cloudify_packager package-configuration/linux-cli/test_get_cloudify.py -v
9 | nosetests --with-cov --cov cloudify_packager package-configuration/linux-cli/test_cli_install.py -v
10 |
11 | [testenv:flake8]
12 | deps =
13 | flake8
14 | commands =
15 | flake8 package-configuration/linux-cli
16 |
--------------------------------------------------------------------------------
/user_definitions-DEPRECATED.py:
--------------------------------------------------------------------------------
1 | # flake8: NOQA
2 |
3 | # user configuration (OPTIONAL)
4 | MAIN_BRANCH = 'master' # branch to use when retrieving resources
5 | MANAGER_BRANCH = MAIN_BRANCH
6 | REST_CLIENT_BRANCH = MAIN_BRANCH
7 | PLUGINS_COMMON_BRANCH = MAIN_BRANCH
8 | DSL_PARSER_BRANCH = MAIN_BRANCH
9 |
10 | PACKAGES_PATH = "/packages" # temporary directory to which items are downloaded and in which packages are created.
11 | VIRTUALENVS_PATH = "/opt" # directory for cosmo modules and virtual environments
12 | CLOUDIFY_LOGS_PATH = "/var/log/cloudify" # directory for cloudify logs
13 | AGENT_VIRTUALENVS_PATH = "/env" # final directory to put the created packages in.
14 | COMPONENT_PACKAGES_PATH = "/cloudify-components" # where to put 3rd party components packages
15 | CORE_PACKAGES_PATH = "/cloudify-core" # where to put code package
16 | UI_PACKAGE_PATH = "/agents" # where to put the ui package
17 | AGENT_PACKAGES_PATH = "/agents" # where to put agent packages
18 | SCRIPTS_PATH = "package-scripts" # directory for bootstrap/download/removal/package scripts - if applicable
19 | CONFIGS_PATH = "package-configuration" # directory for configuration files and templates - if applicable
20 |
--------------------------------------------------------------------------------
/vagrant/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 | *.pyc
3 |
4 | # C extensions
5 | *.so
6 |
7 | # Packages
8 | *.egg
9 | *.egg-info
10 | *.deb
11 | dist
12 | build
13 | eggs
14 | parts
15 | bin
16 | var
17 | sdist
18 | develop-eggs
19 | .installed.cfg
20 | lib
21 | lib64
22 | __pycache__
23 |
24 | # Installer logs
25 | pip-log.txt
26 |
27 | # Logs files
28 | *.log
29 |
30 | # Unit test / coverage reports
31 | .coverage
32 | .tox
33 | nosetests.xml
34 | .travis-solo
35 |
36 | # Translations
37 | *.mo
38 |
39 | # Mr Developer
40 | .mr.developer.cfg
41 | .project
42 | .pydevproject
43 |
44 | ~
45 | .vagrant/
46 | debs/
47 | rpms/
48 |
--------------------------------------------------------------------------------
/vagrant/agents/Vagrantfile:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | AWS_ACCESS_KEY_ID = ENV['AWS_ACCESS_KEY_ID']
20 | AWS_ACCESS_KEY = ENV['AWS_ACCESS_KEY']
21 | GITHUB_USERNAME = ENV['GITHUB_USERNAME']
22 | GITHUB_PASSWORD = ENV['GITHUB_PASSWORD']
23 |
24 | UBUNTU_TRUSTY_BOX_NAME = 'ubuntu/trusty64'
25 | UBUNTU_PRECISE_BOX_NAME = 'hashicorp/precise64'
26 | DEBIAN_JESSIE_BOX_NAME = 'binarydata/debian-jessie'
27 | CENTOS_FINAL_BOX_NAME = 'chef/centos-6.5'
28 |
29 | Vagrant.configure('2') do |config|
30 | config.vm.define "debian_jessie_aws" do |debian_jessie|
31 | # dummy box, will be overriden
32 | config.nfs.functional = false
33 | debian_jessie.vm.box = "dummy"
34 | debian_jessie.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
35 |
36 | debian_jessie.vm.provider :aws do |aws, override|
37 | aws.access_key_id = AWS_ACCESS_KEY_ID
38 | aws.secret_access_key = AWS_ACCESS_KEY
39 |
40 | # debian-jessie 64bit box
41 | aws.ami = "ami-699f021e"
42 | aws.region = "eu-west-1"
43 | aws.instance_type = "m3.medium"
44 |
45 | aws.keypair_name = "vagrant_build"
46 | override.ssh.username = "admin"
47 | override.ssh.private_key_path = "~/.ssh/aws/vagrant_build.pem"
48 |
49 | aws.tags = {
50 | "Name" => "vagrant debian jessie agent build",
51 | }
52 | aws.security_groups = "vagrant_cfy_build"
53 | end
54 |
55 | # need to sync folders
56 | debian_jessie.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync",
57 | rsync__exclude: ".git/",
58 | rsync__args: ["--verbose", "--rsync-path='sudo rsync'"]
59 | debian_jessie.vm.provision "shell" do |s|
60 | s.path = "provision.sh"
61 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
62 | s.privileged = false
63 | end
64 | end
65 | config.vm.define "ubuntu_trusty_aws" do |ubuntu_trusty|
66 | # dummy box, will be overriden
67 | ubuntu_trusty.vm.box = "dummy"
68 | ubuntu_trusty.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
69 |
70 | ubuntu_trusty.vm.provider :aws do |aws, override|
71 | aws.access_key_id = AWS_ACCESS_KEY_ID
72 | aws.secret_access_key = AWS_ACCESS_KEY
73 |
74 | # official ubuntu 14.04 64bit box
75 | aws.ami = "ami-f0b11187"
76 | aws.region = "eu-west-1"
77 | aws.instance_type = "m3.medium"
78 |
79 | aws.keypair_name = "vagrant_build"
80 | override.ssh.username = "ubuntu"
81 | override.ssh.private_key_path = "~/.ssh/aws/vagrant_build.pem"
82 |
83 | aws.tags = {
84 | "Name" => "vagrant ubuntu trusty agent build",
85 | }
86 | aws.security_groups = "vagrant_cfy_build"
87 | end
88 |
89 | # need to sync folders
90 | ubuntu_trusty.vm.synced_folder "../../", "/cloudify-packager", create: true
91 | ubuntu_trusty.vm.provision "shell" do |s|
92 | s.path = "provision.sh"
93 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
94 | s.privileged = false
95 | end
96 | end
97 | config.vm.define "ubuntu_precise_aws" do |ubuntu_precise|
98 | # dummy box, will be overriden
99 | ubuntu_precise.vm.box = "dummy"
100 | ubuntu_precise.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
101 |
102 | ubuntu_precise.vm.provider :aws do |aws, override|
103 | aws.access_key_id = AWS_ACCESS_KEY_ID
104 | aws.secret_access_key = AWS_ACCESS_KEY
105 |
106 | # official ubuntu 12.04 64bit box
107 | aws.ami = "ami-036eaa74"
108 | aws.region = "eu-west-1"
109 | aws.instance_type = "m3.medium"
110 |
111 | aws.keypair_name = "vagrant_build"
112 | override.ssh.username = "ubuntu"
113 | override.ssh.private_key_path = "~/.ssh/aws/vagrant_build.pem"
114 |
115 | aws.tags = {
116 | "Name" => "vagrant ubuntu precise agent build",
117 | }
118 | aws.security_groups = "vagrant_cfy_build"
119 | end
120 |
121 | # need to sync folders
122 | ubuntu_precise.vm.synced_folder "../../", "/cloudify-packager", create: true
123 | ubuntu_precise.vm.provision "shell" do |s|
124 | s.path = "provision.sh"
125 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
126 | s.privileged = false
127 | end
128 | end
129 | config.vm.define "centos_final_aws" do |centos_final|
130 | #dummy box, will be overriden
131 | centos_final.vm.box = "dummy"
132 | centos_final.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
133 |
134 | centos_final.vm.provider :aws do |aws, override|
135 | aws.access_key_id = AWS_ACCESS_KEY_ID
136 | aws.secret_access_key = AWS_ACCESS_KEY
137 |
138 | # unofficial centos6.4 64bit box
139 | aws.ami = "ami-3b39ee4c"
140 | aws.region = "eu-west-1"
141 | aws.instance_type = "m3.medium"
142 |
143 | aws.keypair_name = "vagrant_build"
144 | override.ssh.username = "root"
145 | override.ssh.private_key_path = "~/.ssh/aws/vagrant_centos_build.pem"
146 |
147 | aws.tags = {
148 | "Name" => "vagrant agent centos build",
149 | }
150 | aws.security_groups = "vagrant_linux_build"
151 | end
152 |
153 | #need to sync folders
154 | centos_final.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync", rsync__exclude: ".git/"
155 | centos_final.vm.provision "shell" do |s|
156 | s.path = "provision.sh"
157 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
158 | s.privileged = false
159 | end
160 | end
161 | config.vm.define "centos_core_aws" do |centos_core|
162 | #dummy box, will be overriden
163 | centos_core.vm.box = "dummy"
164 | centos_core.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
165 |
166 | centos_core.vm.provider :aws do |aws, override|
167 | aws.access_key_id = AWS_ACCESS_KEY_ID
168 | aws.secret_access_key = AWS_ACCESS_KEY
169 |
170 | # unofficial centos6.4 64bit box
171 | aws.ami = "ami-2399f054"
172 | aws.region = "eu-west-1"
173 | aws.instance_type = "m3.medium"
174 |
175 | aws.keypair_name = "vagrant_build"
176 | override.ssh.username = "ec2-user"
177 | override.ssh.private_key_path = "~/.ssh/aws/vagrant_build.pem"
178 |
179 | aws.tags = {
180 | "Name" => "vagrant agent centos build",
181 | }
182 | aws.security_groups = "vagrant_linux_build"
183 | end
184 |
185 | #need to sync folders
186 | centos_core.vm.synced_folder "../../", "/cloudify-packager", create: true,
187 | type: "rsync", rsync__exclude: ".git/"
188 | centos_core.vm.provision "shell" do |s|
189 | s.path = "provision.sh"
190 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
191 | s.privileged = false
192 | end
193 | end
194 | config.vm.define "windows_aws" do |windows|
195 | windows.vm.box = "dummy"
196 | windows.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
197 |
198 | windows.vm.provider :aws do |aws, override|
199 | aws.access_key_id = AWS_ACCESS_KEY_ID
200 | aws.secret_access_key = AWS_ACCESS_KEY
201 |
202 | # this a pre-baked AMI, not pure base image
203 | aws.ami = "ami-bbd90ecc"
204 | aws.region = "eu-west-1"
205 | aws.instance_type = "m3.medium"
206 |
207 | aws.keypair_name = "windows_agent_packager"
208 | override.ssh.username = "Administrator"
209 | override.ssh.private_key_path = "~/.ssh/aws/windows_agent_packager.pem"
210 |
211 | aws.tags = {
212 | "Name" => "windows agent packager build",
213 | }
214 | aws.security_groups = "vagrant_windows"
215 | end
216 |
217 | windows.vm.synced_folder ".", "/vagrant", disabled: true
218 | windows.vm.synced_folder ".", "/home/vagrant"
219 |
220 | # shell provisioning uses bash, so use cmd to run batch script
221 | windows.vm.provision "shell" do |shell|
222 | shell.inline = 'cmd /c "c:\\cygwin64\\home\\vagrant\\provision.bat"'
223 | shell.privileged = false
224 | end
225 | end
226 |
227 | config.vm.define "new_windows_aws" do |windows|
228 | windows.vm.box = "dummy"
229 | windows.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
230 | windows.vm.guest = :windows
231 |
232 | windows.vm.provider :aws do |aws, override|
233 | aws.access_key_id = AWS_ACCESS_KEY_ID
234 | aws.secret_access_key = AWS_ACCESS_KEY
235 |
236 | # this a pre-baked AMI, not pure base image
237 | aws.ami = "ami-118ee566"
238 | aws.region = "eu-west-1"
239 | aws.instance_type = "m3.medium"
240 |
241 | aws.keypair_name = "vagrant_build"
242 | override.ssh.username = "Administrator"
243 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
244 |
245 | aws.tags = {
246 | "Name" => "vagrant windows agent build",
247 | }
248 | aws.security_groups = "vagrant_windows"
249 | end
250 |
251 | windows.vm.synced_folder ".", "/vagrant", disabled: true
252 | windows.vm.synced_folder "./windows/packaging", "/home/Administrator/packaging"
253 |
254 | # shell provisioning uses bash, so use cmd to run batch script
255 | windows.vm.provision "shell" do |shell|
256 | shell.path = 'windows/provision.sh'
257 | shell.privileged = false
258 | end
259 | end
260 |
261 | config.vm.define :debian_jessie do |local|
262 | local.vm.provider :virtualbox do |vb|
263 | vb.customize ['modifyvm', :id, '--memory', '1024']
264 | end
265 | local.vm.box = DEBIAN_JESSIE_BOX_NAME
266 | local.vm.hostname = 'local'
267 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
268 | local.vm.provision "shell" do |s|
269 | s.path = "provision.sh"
270 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
271 | s.privileged = false
272 | end
273 | end
274 | config.vm.define :ubuntu_precise do |local|
275 | local.vm.provider :virtualbox do |vb|
276 | vb.customize ['modifyvm', :id, '--memory', '1024']
277 | end
278 | local.vm.box = UBUNTU_PRECISE_BOX_NAME
279 | local.vm.hostname = 'local'
280 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
281 | local.vm.provision "shell" do |s|
282 | s.path = "provision.sh"
283 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
284 | s.privileged = false
285 | end
286 | end
287 | config.vm.define :ubuntu_trusty do |local|
288 | local.vm.provider :virtualbox do |vb|
289 | vb.customize ['modifyvm', :id, '--memory', '1024']
290 | end
291 | local.vm.box = UBUNTU_TRUSTY_BOX_NAME
292 | local.vm.hostname = 'local'
293 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
294 | local.vm.provision "shell" do |s|
295 | s.path = "provision.sh"
296 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
297 | s.privileged = false
298 | end
299 | end
300 | config.vm.define :centos_final do |local|
301 | local.vm.provider :virtualbox do |vb|
302 | vb.customize ['modifyvm', :id, '--memory', '1024']
303 | end
304 | local.vm.box = CENTOS_FINAL_BOX_NAME
305 | local.vm.hostname = 'local'
306 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
307 | local.vm.provision "shell" do |s|
308 | s.path = "provision.sh"
309 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
310 | s.privileged = false
311 | end
312 | end
313 | end
314 |
--------------------------------------------------------------------------------
/vagrant/agents/packager.yaml:
--------------------------------------------------------------------------------
1 | python_path: /usr/bin/python
2 | cloudify_agent_module: https://github.com/cloudify-cosmo/cloudify-agent/archive/master.zip
3 | requirements_file: https://raw.githubusercontent.com/cloudify-cosmo/cloudify-agent/master/dev-requirements.txt
4 | additional_plugins:
5 | # These are cloned during the machine provisioning process as they are private repositories
6 | cloudify_vsphere_plugin: /tmp/cloudify-vsphere-plugin
7 | cloudify_softlayer_plugin: /tmp/cloudify-softlayer-plugin
8 | keep_virtualenv: true
9 |
--------------------------------------------------------------------------------
/vagrant/agents/provision.bat:
--------------------------------------------------------------------------------
1 | SET CORE_TAG_NAME="master"
2 | SET PLUGINS_TAG_NAME="master"
3 |
4 | cd c:\\
5 | virtualenv CloudifyAgent
6 | md C:\\CloudifyAgent\\nssm\
7 | copy C:\\Tools\\nssm.exe C:\\CloudifyAgent\\nssm\\nssm.exe
8 | cd CloudifyAgent
9 | call Scripts\\activate.bat
10 | pip install celery==3.1.17
11 | pip install pyzmq==14.3.1
12 | git clone https://github.com/cloudify-cosmo/cloudify-rest-client.git
13 | cd C:\\CloudifyAgent\\cloudify-rest-client
14 | git checkout -b tmp_branch %CORE_TAG_NAME%
15 | git log -1
16 | pip install .
17 | cd C:\\CloudifyAgent
18 | git clone https://github.com/cloudify-cosmo/cloudify-plugins-common.git
19 | cd C:\\CloudifyAgent\\cloudify-plugins-common
20 | git checkout -b tmp_branch %CORE_TAG_NAME%
21 | git log -1
22 | pip install .
23 | cd C:\\CloudifyAgent
24 | git clone https://github.com/cloudify-cosmo/cloudify-script-plugin.git
25 | cd C:\\CloudifyAgent\\cloudify-script-plugin
26 | git checkout -b tmp_branch %PLUGINS_TAG_NAME%
27 | git log -1
28 | pip install .
29 | cd C:\\CloudifyAgent
30 | git clone https://github.com/cloudify-cosmo/cloudify-diamond-plugin.git
31 | cd C:\\CloudifyAgent\\cloudify-diamond-plugin
32 | git checkout -b tmp_branch %PLUGINS_TAG_NAME%
33 | git log -1
34 | pip install .
35 | cd c:\\
36 | rmdir /s /q C:\\CloudifyAgent\\cloudify-rest-client
37 | rmdir /s /q C:\\CloudifyAgent\\cloudify-plugins-common
38 | rmdir /s /q C:\\CloudifyAgent\\cloudify-script-plugin
39 | rmdir /s /q C:\\CloudifyAgent\\cloudify-diamond-plugin
40 | 7z a -r -sfx -x!.* cloudify-windows-agent.exe c:\\CloudifyAgent\\*
41 |
--------------------------------------------------------------------------------
/vagrant/agents/provision.sh:
--------------------------------------------------------------------------------
1 | function install_deps
2 | {
3 | echo Installing necessary dependencies
4 | if which apt-get; then
5 | # ubuntu
6 | sudo apt-get -y update &&
7 | # trusty
8 | sudo apt-get install -y software-properties-common ||
9 | #precise
10 | sudo apt-get install -y python-software-properties
11 | sudo add-apt-repository -y ppa:git-core/ppa
12 | sudo apt-get install -y curl python-dev git make gcc libyaml-dev zlib1g-dev
13 | elif which yum; then
14 | # centos/REHL
15 | sudo yum -y update
16 | sudo yum install curl python-devel make gcc git libyaml-devel -y
17 | else
18 | echo 'unsupported package manager, exiting'
19 | exit 1
20 | fi
21 | }
22 |
23 | function install_pip
24 | {
25 | echo Installing pip
26 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python
27 | }
28 |
29 | GITHUB_USERNAME=$1
30 | GITHUB_PASSWORD=$2
31 |
32 | install_deps
33 |
34 | cd ~
35 | install_pip &&
36 | sudo pip install pip==6.0.8 --upgrade &&
37 | sudo pip install virtualenv==12.0.7 &&
38 | sudo pip install boto==2.36.0 &&
39 | sudo rm -rf ~/.cache
40 |
41 | # clone commercial plugins. this should be a feature in the agent-packager
42 | git clone https://${GITHUB_USERNAME}:${GITHUB_PASSWORD}@github.com/cloudify-cosmo/cloudify-vsphere-plugin.git /tmp/cloudify-vsphere-plugin
43 | git clone https://${GITHUB_USERNAME}:${GITHUB_PASSWORD}@github.com/cloudify-cosmo/cloudify-softlayer-plugin.git /tmp/cloudify-softlayer-plugin
44 |
45 |
46 | # REPLACE branch before production
47 | sudo pip install cloudify-agent-packager==3.5.0 &&
48 | cd /tmp &&
49 | cfy-ap -c /vagrant/packager.yaml -f -v
50 |
--------------------------------------------------------------------------------
/vagrant/agents/runit.sh:
--------------------------------------------------------------------------------
1 | vagrant destroy -f $1 && vagrant up $1 && vagrant ssh $1
2 |
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/create_install_wizard.iss:
--------------------------------------------------------------------------------
1 | #define AppName "Cloudify Windows Agent"
2 | #define AppVersion GetEnv('VERSION')
3 | #define AppPublisher "GigaSpaces Technologies"
4 | #define AppURL "http://getcloudify.org/"
5 |
6 | [Setup]
7 | ; NOTE: The value of AppId uniquely identifies this application.
8 | ; Do not use the same AppId value in installers for other applications.
9 | ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
10 | AppId={{94B9D938-5123-4AC5-AA99-68F07F773DE2}
11 | AppName={#AppName}
12 | AppVersion={#AppVersion}
13 | AppPublisher={#AppPublisher}
14 | AppPublisherURL={#AppURL}
15 | AppSupportURL={#AppURL}
16 | AppUpdatesURL={#AppURL}
17 | DefaultDirName={pf}\Cloudify
18 | DisableProgramGroupPage=yes
19 | OutputBaseFilename=cloudify_agent_{#AppVersion}
20 | Compression=lzma
21 | SolidCompression=yes
22 | ArchitecturesInstallIn64BitMode=
23 | LicenseFile=source\license.txt
24 | MinVersion=6.0
25 | SetupIconFile=source\icons\Cloudify.ico
26 | UninstallDisplayIcon={app}\Cloudify.ico
27 | OutputDir=output\
28 |
29 | [Languages]
30 | Name: "english"; MessagesFile: "compiler:Default.isl"
31 |
32 | [Files]
33 | Source: "source\python\python.msi"; Flags: dontcopy nocompression
34 | Source: "source\wheels\*.whl"; Flags: dontcopy
35 | Source: "source\pip\*"; Flags: dontcopy
36 | Source: "source\virtualenv\*"; Flags: dontcopy
37 | Source: "source\icons\Cloudify.ico"; DestDir: "{app}"
38 |
39 | [Tasks]
40 | Name: "desktopicon"; Description: "Create a desktop icon";
41 |
42 | [Icons]
43 | Name: "{userdesktop}\Cloudify Agent"; Filename: "{cmd}"; Parameters: "/k ""{app}\Scripts\activate.bat"""; WorkingDir: "{app}"; IconFilename: "{app}\Cloudify.ico"; Tasks: "desktopicon";
44 |
45 | [UninstallDelete]
46 | ;this is NOT recommended but in our case, no user data here
47 | Type: "filesandordirs"; Name: "{app}"
48 |
49 | [Code]
50 | const
51 | mainPackageName = 'cloudify-agent';
52 | //Registry key path
53 | RegPythonPath = 'SOFTWARE\Python\PythonCore\2.7\InstallPath';
54 | //Error messages
55 | errPythonMissing = 'Python installation was not found. In order to install {#AppName} you will need Python installed. Procceed to Python 2.7 installation?';
56 | errPipMissing = 'Pip was not found. Pip is a package management tool that is required to successfully install {#AppName}. Would you like to install it?';
57 | errVenvMissing = 'Virtualenv was not found. Virtualenv is a python environment managment tool that is required to successfully install {#AppName}. Would you like to install it?';
58 | errUnexpected = 'Unexpected error. Check install logs';
59 | infoPythonUninstall = 'Cloudify uninstaller will not remove Python as a safety precaution. Uninstalling Python should be done independently by the user';
60 |
61 |
62 | function getPythonDir(): String;
63 | var
64 | InstallPath: String;
65 | begin
66 | RegQueryStringValue(HKLM, RegPythonPath, '', InstallPath);
67 | RegQueryStringValue(HKCU, RegPythonPath, '', InstallPath);
68 | Result := InstallPath;
69 | end;
70 |
71 |
72 | function isPythonInstalled(): Boolean;
73 | begin
74 | if getPythonDir <> '' then
75 | Result := True
76 | else
77 | Result := False;
78 | end;
79 |
80 |
81 | function getPythonPath(): String;
82 | var
83 | PythonPath: String;
84 | begin
85 | if isPythonInstalled then begin
86 | PythonPath := AddBackslash(getPythonDir) + 'python.exe';
87 | if FileExists(PythonPath) then
88 | Result := PythonPath
89 | end;
90 | end;
91 |
92 |
93 | function runPythonSetup(): Boolean;
94 | var
95 | PythonArgs: String;
96 | InstallerPath: String;
97 | ErrorCode: Integer;
98 | begin
99 | ExtractTemporaryFile('python.msi');
100 | InstallerPath := Expandconstant('{tmp}\python.msi');
101 | PythonArgs := 'ADDDEFAULT=pip_feature';
102 | if WizardSilent then
103 | PythonArgs := PythonArgs + ' /qn';
104 | ShellExec('', InstallerPath, PythonArgs, '', SW_SHOW, ewWaituntilterminated, ErrorCode);
105 |
106 | if Errorcode <> 0 then
107 | Result := False
108 | else
109 | Result := True;
110 | end;
111 |
112 |
113 | function getPipPath(): String;
114 | var
115 | PipPath: String;
116 | begin
117 | if isPythonInstalled then begin
118 | PipPath := AddBackslash(getPythonDir) + 'Scripts\pip.exe';
119 | if FileExists(PipPath) then
120 | Result := PipPath
121 | end;
122 | end;
123 |
124 |
125 | function isPipInstalled(): Boolean;
126 | begin
127 | if getPipPath <> '' then
128 | Result := True
129 | else
130 | Result := False;
131 | end;
132 |
133 |
134 | function runPipSetup(): Boolean;
135 | var
136 | GetPipArgs: String;
137 | ErrorCode: Integer;
138 | begin
139 | if isPythonInstalled then begin
140 | ExtractTemporaryFiles('*.whl');
141 | ExtractTemporaryFile('get-pip.py');
142 | GetPipArgs := 'get-pip.py --use-wheel --no-index --find-links .';
143 | ShellExec('', getPythonPath, GetPipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
144 |
145 | if Errorcode <> 0 then
146 | Result := False
147 | else
148 | Result := True;
149 | end;
150 | end;
151 |
152 |
153 | function getVenvPath(): String;
154 | var
155 | VenvPath: String;
156 | begin
157 | if isPythonInstalled then begin
158 | VenvPath := AddBackslash(getPythonDir) + 'Scripts\virtualenv.exe';
159 | if FileExists(VenvPath) then
160 | Result := VenvPath
161 | end;
162 | end;
163 |
164 |
165 | function isVenvInstalled(): Boolean;
166 | begin
167 | if getVenvPath <> '' then
168 | Result := True
169 | else
170 | Result := False;
171 | end;
172 |
173 |
174 | function runVenvSetup(): Boolean;
175 | var
176 | GetPipArgs: String;
177 | ErrorCode: Integer;
178 | begin
179 | if isPythonInstalled then begin
180 | ExtractTemporaryFiles('*.whl');
181 | GetPipArgs := 'install --use-wheel --no-index --find-links . virtualenv';
182 | ShellExec('', getPipPath, GetPipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
183 |
184 | if Errorcode <> 0 then
185 | Result := False
186 | else
187 | Result := True;
188 | end;
189 | end;
190 |
191 |
192 | function runVenvInitialization(): Boolean;
193 | var
194 | ErrorCode: Integer;
195 | begin
196 | Exec(getVenvPath, Expandconstant('--clear "{app}"'), Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
197 |
198 | if Errorcode <> 0 then
199 | Result := False
200 | else
201 | Result := True;
202 | end;
203 |
204 |
205 | function runWheelsInstall(): Boolean;
206 | var
207 | PipArgs: String;
208 | ErrorCode: Integer;
209 | begin
210 | ExtractTemporaryFiles('*.whl');
211 |
212 | if not (isVenvInstalled and runVenvInitialization) then begin
213 | Result := False;
214 | Exit;
215 | end;
216 |
217 | PipArgs := Expandconstant('/c set "VIRTUAL_ENV={app}" && set "PATH={app}\Scripts;%PATH%" && pip install --pre --use-wheel --no-index --find-links . --force-reinstall --ignore-installed ' + mainPackageName);
218 | Exec(Expandconstant('{sys}\cmd.exe'), PipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
219 |
220 | if Errorcode <> 0 then
221 | Result := False
222 | else
223 | Result := True;
224 | end;
225 |
226 |
227 | //wrap MsgBox to handle silent install case
228 | function getUserResponse(Message: String): Integer;
229 | begin
230 | if not WizardSilent then
231 | Result := MsgBox(Message, mbError, MB_OKCANCEL)
232 | else
233 | Result := IDOK;
234 | end;
235 |
236 |
237 | //Pre-Assumptions: Python and pip are installed
238 | procedure CurStepChanged(CurStep: TSetupStep);
239 | begin
240 | if CurStep = ssInstall then begin
241 | if not runWheelsInstall then
242 | RaiseException(errUnexpected);
243 | end;
244 | end;
245 |
246 |
247 | //Check for pre-requirements (Python, Pip, Virtualenv)
248 | function PrepareToInstall(var NeedsRestart: Boolean): String;
249 | var
250 | UserResponse: Integer;
251 | begin
252 | if not isPythonInstalled then begin
253 | UserResponse := getUserResponse(errPythonMissing);
254 | if UserResponse <> IDOK then begin
255 | Result := 'Installation cannot continue without Python installed';
256 | Exit;
257 | end
258 | else if not runPythonSetup then begin
259 | Result := 'Python setup failed';
260 | Exit;
261 | end;
262 | end;
263 |
264 | if not isPipInstalled then begin
265 | UserResponse := getUserResponse(errPipMissing)
266 | if UserResponse <> IDOK then begin
267 | Result := 'Installation cannot continue without Pip installed';
268 | exit;
269 | end
270 | else if not runPipSetup then begin
271 | Result := 'Pip installation failed';
272 | Exit;
273 | end;
274 | end;
275 |
276 | if not isVenvInstalled then begin
277 | UserResponse := getUserResponse(errVenvMissing)
278 | if UserResponse <> IDOK then begin
279 | Result := 'Installation cannot continue without Virtualenv installed';
280 | Exit;
281 | end
282 | else if not runVenvSetup then begin
283 | Result := 'Virtualenv installation failed';
284 | Exit;
285 | end;
286 | end;
287 | end;
288 |
289 |
290 | //Display info message when install done about Python uninstall
291 | procedure CurUninstallStepChanged(CurUninstallStep: TUninstallStep);
292 | begin
293 | if (CurUninstallStep = usPostUninstall) and (not UninstallSilent) then
294 | MsgBox(infoPythonUninstall, mbInformation, MB_OK);
295 | end;
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/icons/Cloudify.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/agents/windows/packaging/source/icons/Cloudify.ico
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/license.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2015 GigaSpaces Technologies TLD
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/pip/pip-7.0.1-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/agents/windows/packaging/source/pip/pip-7.0.1-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/pip/setuptools-17.0-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/agents/windows/packaging/source/pip/setuptools-17.0-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/python/python.msi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/agents/windows/packaging/source/python/python.msi
--------------------------------------------------------------------------------
/vagrant/agents/windows/packaging/source/virtualenv/virtualenv-13.0.1-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/agents/windows/packaging/source/virtualenv/virtualenv-13.0.1-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/agents/windows/provision.sh:
--------------------------------------------------------------------------------
1 | export CORE_TAG_NAME="4.4.dev1"
2 | export PLUGINS_TAG_NAME="1.3"
3 |
4 | pip install wheel
5 |
6 | pip wheel --wheel-dir packaging/source/wheels --requirement "https://raw.githubusercontent.com/cloudify-cosmo/cloudify-agent/$CORE_TAG_NAME/dev-requirements.txt"
7 | pip wheel --find-links packaging/source/wheels --wheel-dir packaging/source/wheels "https://github.com/cloudify-cosmo/cloudify-agent/archive/$CORE_TAG_NAME.zip"
8 |
9 | export VERSION=`ls packaging/source/wheels/cloudify_agent-* | cut -d"-" -f2`
10 |
11 | echo "VERSION=$VERSION"
12 |
13 | iscc packaging/create_install_wizard.iss
14 |
--------------------------------------------------------------------------------
/vagrant/cli/Vagrantfile:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | AWS_ACCESS_KEY_ID = ENV['AWS_ACCESS_KEY_ID']
20 | AWS_ACCESS_KEY = ENV['AWS_ACCESS_KEY']
21 |
22 | UBUNTU_TRUSTY_BOX_NAME = 'ubuntu/trusty64'
23 | UBUNTU_PRECISE_BOX_NAME = 'hashicorp/precise64'
24 | UBUNTU_LUCID_BOX_NAME = 'f500/ubuntu-lucid64'
25 | DEBIAN_JESSIE_BOX_NAME = 'binarydata/debian-jessie'
26 | DEBIAN_SQUEEZE_BOX_NAME = 'dene/debian-squeeze'
27 | CENTOS_FINAL_BOX_NAME = 'chef/centos-6.5'
28 |
29 | GITHUB_USERNAME = ENV['GITHUB_USERNAME']
30 | GITHUB_PASSWORD = ENV['GITHUB_PASSWORD']
31 |
32 | Vagrant.configure('2') do |config|
33 | config.vm.define "debian_jessie_cli_aws" do |debian_jessie|
34 | # dummy box, will be overriden
35 | config.nfs.functional = false
36 | debian_jessie.vm.box = "dummy"
37 | debian_jessie.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
38 |
39 | debian_jessie.vm.provider :aws do |aws, override|
40 | aws.access_key_id = AWS_ACCESS_KEY_ID
41 | aws.secret_access_key = AWS_ACCESS_KEY
42 |
43 | # debian-jessie 64bit box
44 | aws.ami = "ami-699f021e"
45 | aws.region = "eu-west-1"
46 | aws.instance_type = "m3.medium"
47 |
48 | aws.keypair_name = "vagrant_build"
49 | override.ssh.username = "admin"
50 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
51 |
52 | aws.tags = {
53 | "Name" => "vagrant debian jessie agent build",
54 | }
55 | aws.security_groups = "vagrant_cfy_build"
56 | end
57 |
58 | # need to sync folders
59 | debian_jessie.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync",
60 | rsync__exclude: ".git/",
61 | rsync__args: ["--verbose", "--rsync-path='sudo rsync'"]
62 | debian_jessie.vm.provision "shell" do |s|
63 | s.path = "provision.sh"
64 | s.privileged = false
65 | end
66 | end
67 | config.vm.define "ubuntu_trusty_cli_aws" do |ubuntu_trusty|
68 | # dummy box, will be overriden
69 | ubuntu_trusty.vm.box = "dummy"
70 | ubuntu_trusty.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
71 |
72 | ubuntu_trusty.vm.provider :aws do |aws, override|
73 | aws.access_key_id = AWS_ACCESS_KEY_ID
74 | aws.secret_access_key = AWS_ACCESS_KEY
75 |
76 | # official ubuntu 14.04 64bit box
77 | aws.ami = "ami-f0b11187"
78 | aws.region = "eu-west-1"
79 | aws.instance_type = "m3.medium"
80 |
81 | aws.keypair_name = "vagrant_build"
82 | override.ssh.username = "ubuntu"
83 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
84 |
85 | aws.tags = {
86 | "Name" => "vagrant ubuntu trusty agent build",
87 | }
88 | aws.security_groups = "vagrant_cfy_build"
89 | end
90 |
91 | # need to sync folders
92 | ubuntu_trusty.vm.synced_folder "../../", "/cloudify-packager", create: true
93 | ubuntu_trusty.vm.provision "shell" do |s|
94 | s.path = "provision.sh"
95 | s.privileged = false
96 | end
97 | end
98 | config.vm.define "ubuntu_precise_cli_aws" do |ubuntu_precise|
99 | # dummy box, will be overriden
100 | ubuntu_precise.vm.box = "dummy"
101 | ubuntu_precise.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
102 |
103 | ubuntu_precise.vm.provider :aws do |aws, override|
104 | aws.access_key_id = AWS_ACCESS_KEY_ID
105 | aws.secret_access_key = AWS_ACCESS_KEY
106 |
107 | # official ubuntu 12.04 64bit box
108 | aws.ami = "ami-036eaa74"
109 | aws.region = "eu-west-1"
110 | aws.instance_type = "m3.medium"
111 |
112 | aws.keypair_name = "vagrant_build"
113 | override.ssh.username = "ubuntu"
114 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
115 |
116 | aws.tags = {
117 | "Name" => "vagrant ubuntu precise agent build",
118 | }
119 | aws.security_groups = "vagrant_cfy_build"
120 | end
121 |
122 | # need to sync folders
123 | ubuntu_precise.vm.synced_folder "../../", "/cloudify-packager", create: true
124 | ubuntu_precise.vm.provision "shell" do |s|
125 | s.path = "provision.sh"
126 | s.privileged = false
127 | end
128 | end
129 | config.vm.define "centos_final_cli_aws" do |centos_final|
130 | #dummy box, will be overriden
131 | centos_final.vm.box = "dummy"
132 | centos_final.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
133 |
134 | centos_final.vm.provider :aws do |aws, override|
135 | aws.access_key_id = AWS_ACCESS_KEY_ID
136 | aws.secret_access_key = AWS_ACCESS_KEY
137 |
138 | # unofficial centos6.4 64bit box
139 | aws.ami = "ami-3b39ee4c"
140 | aws.region = "eu-west-1"
141 | aws.instance_type = "m3.medium"
142 |
143 | aws.keypair_name = "vagrant_build"
144 | override.ssh.username = "root"
145 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_centos_build.pem"
146 |
147 | aws.tags = {
148 | "Name" => "vagrant agent centos build",
149 | }
150 | aws.security_groups = "vagrant_linux_build"
151 | end
152 |
153 | #need to sync folders
154 | centos_final.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync", rsync__exclude: ".git/"
155 | centos_final.vm.provision "shell" do |s|
156 | s.path = "provision.sh"
157 | s.privileged = false
158 | end
159 | end
160 | config.vm.define "centos7_0_final_cli_aws" do |centos_final|
161 | #dummy box, will be overriden
162 | centos_final.vm.box = "dummy"
163 | centos_final.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
164 |
165 | centos_final.vm.provider :aws do |aws, override|
166 | aws.access_key_id = AWS_ACCESS_KEY_ID
167 | aws.secret_access_key = AWS_ACCESS_KEY
168 |
169 | # unofficial centos6.4 64bit box
170 | aws.ami = "ami-fd69068a"
171 | aws.region = "eu-west-1"
172 | aws.instance_type = "m3.medium"
173 |
174 | aws.keypair_name = "vagrant_build"
175 | override.ssh.username = "ec2-user"
176 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
177 |
178 | aws.tags = {
179 | "Name" => "vagrant agent centos 7.0 build",
180 | }
181 | aws.security_groups = "vagrant_linux_build"
182 | end
183 |
184 | #need to sync folders
185 | centos_final.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync", rsync__exclude: ".git/"
186 | centos_final.vm.provision "shell" do |s|
187 | s.path = "provision.sh"
188 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
189 | s.privileged = false
190 | end
191 | end
192 | config.vm.define "centos6_5_final_cli_aws" do |centos_final|
193 | #dummy box, will be overriden
194 | centos_final.vm.box = "dummy"
195 | centos_final.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
196 |
197 | centos_final.vm.provider :aws do |aws, override|
198 | aws.access_key_id = AWS_ACCESS_KEY_ID
199 | aws.secret_access_key = AWS_ACCESS_KEY
200 |
201 | # unofficial centos6.5 64bit
202 | aws.ami = "ami-77385400"
203 | aws.region = "eu-west-1"
204 | aws.instance_type = "m3.medium"
205 |
206 | aws.keypair_name = "vagrant_build"
207 | override.ssh.username = "root"
208 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
209 |
210 | aws.tags = {
211 | "Name" => "vagrant agent centos 6.5 build",
212 | }
213 | aws.security_groups = "vagrant_linux_build"
214 | end
215 |
216 | #need to sync folders
217 | centos_final.vm.synced_folder "../../", "/cloudify-packager", create: true, type: "rsync", rsync__exclude: ".git/"
218 | centos_final.vm.provision "shell" do |s|
219 | s.path = "provision.sh"
220 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
221 | s.privileged = false
222 | end
223 | end
224 | config.vm.define "windows_aws" do |windows|
225 | windows.vm.box = "dummy"
226 | windows.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
227 | windows.vm.guest = :windows
228 |
229 | windows.vm.provider :aws do |aws, override|
230 | aws.access_key_id = AWS_ACCESS_KEY_ID
231 | aws.secret_access_key = AWS_ACCESS_KEY
232 |
233 | # this a pre-baked AMI, not pure base image
234 | aws.ami = "ami-118ee566"
235 | aws.region = "eu-west-1"
236 | aws.instance_type = "m3.medium"
237 |
238 | aws.keypair_name = "vagrant_build"
239 | override.ssh.username = "Administrator"
240 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
241 |
242 | aws.tags = {
243 | "Name" => "vagrant windows cli build",
244 | }
245 | aws.security_groups = "vagrant_windows"
246 | end
247 |
248 | windows.vm.synced_folder ".", "/vagrant", disabled: true
249 | windows.vm.synced_folder "./windows/packaging", "/home/Administrator/packaging"
250 |
251 | # shell provisioning uses bash, so use cmd to run batch script
252 | windows.vm.provision "shell" do |shell|
253 | shell.path = 'windows/provision.sh'
254 | shell.privileged = false
255 | end
256 | end
257 |
258 |
259 | config.vm.define :debian_jessie_cli do |local|
260 | local.vm.provider :virtualbox do |vb|
261 | vb.customize ['modifyvm', :id, '--memory', '1024']
262 | end
263 | local.vm.box = DEBIAN_JESSIE_BOX_NAME
264 | local.vm.hostname = 'local'
265 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
266 | local.vm.provision "shell" do |s|
267 | s.path = "provision.sh"
268 | s.privileged = false
269 | end
270 | end
271 | config.vm.define :ubuntu_precise_cli do |local|
272 | local.vm.provider :virtualbox do |vb|
273 | vb.customize ['modifyvm', :id, '--memory', '1024']
274 | end
275 | local.vm.box = UBUNTU_PRECISE_BOX_NAME
276 | local.vm.hostname = 'local'
277 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
278 | local.vm.provision "shell" do |s|
279 | s.path = "provision.sh"
280 | s.privileged = false
281 | end
282 | end
283 | config.vm.define :ubuntu_trusty_cli do |local|
284 | local.vm.provider :virtualbox do |vb|
285 | vb.customize ['modifyvm', :id, '--memory', '1024']
286 | end
287 | local.vm.box = UBUNTU_TRUSTY_BOX_NAME
288 | local.vm.hostname = 'local'
289 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
290 | local.vm.provision "shell" do |s|
291 | s.path = "provision.sh"
292 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
293 | s.privileged = false
294 | end
295 | end
296 | config.vm.define :centos_final_cli do |local|
297 | local.vm.provider :virtualbox do |vb|
298 | vb.customize ['modifyvm', :id, '--memory', '1024']
299 | end
300 | local.vm.box = CENTOS_FINAL_BOX_NAME
301 | local.vm.hostname = 'local'
302 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
303 | local.vm.provision "shell" do |s|
304 | s.path = "provision.sh"
305 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
306 | s.privileged = false
307 | end
308 | end
309 | config.vm.define :ubuntu_lucid_cli do |local|
310 | local.vm.provider :virtualbox do |vb|
311 | vb.customize ['modifyvm', :id, '--memory', '1024']
312 | end
313 | local.vm.box = UBUNTU_LUCID_BOX_NAME
314 | local.vm.hostname = 'local'
315 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
316 | local.vm.provision "shell" do |s|
317 | s.path = "provision.sh"
318 | s.privileged = false
319 | end
320 | end
321 | config.vm.define :debian_squeeze_cli do |local|
322 | local.vm.provider :virtualbox do |vb|
323 | vb.customize ['modifyvm', :id, '--memory', '1024']
324 | end
325 | local.vm.box = DEBIAN_SQUEEZE_BOX_NAME
326 | local.vm.hostname = 'local'
327 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
328 | local.vm.provision "shell" do |s|
329 | s.path = "provision.sh"
330 | s.args = "#{GITHUB_USERNAME} #{GITHUB_PASSWORD}"
331 | s.privileged = false
332 | end
333 | end
334 | end
335 |
--------------------------------------------------------------------------------
/vagrant/cli/provision.sh:
--------------------------------------------------------------------------------
1 | function install_prereqs
2 | {
3 | if which apt-get; then
4 | # ubuntu
5 | sudo apt-get -y update &&
6 | # precise
7 | sudo apt-get install -y python-software-properties
8 | # # trusty
9 | # sudo apt-get install -y software-properties-common
10 | # sudo add-apt-repository -y ppa:git-core/ppa &&
11 | sudo apt-get install -y curl python-dev git make gcc libyaml-dev zlib1g-dev g++ rpm
12 | elif which yum; then
13 | echo "UPDATING LOCAL REPO"
14 | sudo yum -y --exclude=kernel\* update &&
15 | sudo yum install -y yum-downloadonly wget mlocate yum-utils &&
16 | echo "INSTALLING python-devel"
17 | sudo yum install -y python-devel
18 | echo "INSTALLING libyaml-devel"
19 | sudo yum install -y libyaml-devel
20 | echo "INSTALLING ruby"
21 | sudo yum install -y ruby
22 | echo "INSTALLING rubygems"
23 | sudo yum install -y rubygems
24 | echo "INSTALLING ruby-devel"
25 | sudo yum install -y ruby-devel
26 | echo "INSTALLING make"
27 | sudo yum install -y make
28 | echo "INSTALLING gcc"
29 | sudo yum install -y gcc
30 | echo "INSTALLING g++"
31 | sudo yum install -y g++
32 | echo "INSTALLING git"
33 | sudo yum install -y git
34 | echo "INSTALLING rpm-build"
35 | sudo yum install -y rpm-build
36 | echo "INSTALLING libxml2-devel"
37 | sudo yum install -y libxml2-devel
38 | echo "INSTALLING libxslt-devel"
39 | sudo yum install -y libxslt-devel
40 | else
41 | echo 'unsupported package manager, exiting'
42 | exit 1
43 | fi
44 | }
45 |
46 | function install_ruby
47 | {
48 | wget http://mirrors.ibiblio.org/ruby/1.9/ruby-1.9.3-rc1.tar.gz --no-check-certificate
49 | tar -xzvf ruby-1.9.3-rc1.tar.gz
50 | cd ruby-1.9.3-rc1
51 | ./configure --disable-install-doc
52 | make
53 | sudo make install
54 | cd ~
55 | }
56 |
57 | function install_fpm
58 | {
59 | sudo gem install fpm -v 1.3.3 --no-ri --no-rdoc
60 | sudo which fpm
61 | RESULT=$?
62 | if [ $RESULT -ne 0 ]; then
63 | FPM_PATH="$(which fpm)"
64 | sudo ln -s $FPM_PATH /bin/fpm
65 | # sudo ln -s /usr/local/bin/fpm /bin/fpm
66 | fi
67 | # if we want to downlod gems as a part of the packman run, this should be enabled
68 | # echo -e 'gem: --no-ri --no-rdoc\ninstall: --no-rdoc --no-ri\nupdate: --no-rdoc --no-ri' >> ~/.gemrc
69 | }
70 |
71 | function install_pip
72 | {
73 | if ! which pip >> /dev/null; then
74 | if which apt-get; then
75 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python
76 | else
77 | curl --silent --show-error --retry 5 https://bootstrap.pypa.io/get-pip.py | sudo python2.7
78 | fi
79 | fi
80 | }
81 |
82 | function install_module
83 | {
84 |
85 | module=$1
86 | venv=${2:-""}
87 | tag=${3:-""}
88 | if [[ ! -z "$tag" ]]; then
89 | org=${4:-cloudify-cosmo}
90 | url=https://github.com/${org}/${module}.git
91 | echo cloning ${url}
92 | git clone ${url}
93 | pushd ${module}
94 | git checkout -b tmp_branch ${tag}
95 | git log -1
96 | sudo ${venv}/bin/pip install .
97 | popd
98 | else
99 | if [[ ! -z "$venv" ]]; then
100 | # if [[ ! -z "$tag" ]]; then
101 | # pip install git+git://github.com/${org}/${module}.git@${tag}#egg=${module}
102 | # else
103 | sudo ${venv}/bin/pip install ${module}
104 | # fi
105 | else
106 | sudo pip install ${module}
107 | fi
108 | fi
109 | }
110 |
111 | function install_py27
112 | {
113 | # install python and additions
114 | # http://bicofino.io/blog/2014/01/16/installing-python-2-dot-7-6-on-centos-6-dot-5/
115 | sudo yum groupinstall -y 'development tools'
116 | sudo yum install -y zlib-devel bzip2-devel openssl-devel xz-libs
117 | sudo mkdir /py27
118 | cd /py27
119 | sudo wget http://www.python.org/ftp/python/2.7.6/Python-2.7.6.tar.xz
120 | sudo xz -d Python-2.7.6.tar.xz
121 | sudo tar -xvf Python-2.7.6.tar
122 | cd Python-2.7.6
123 | sudo ./configure --prefix=/usr
124 | sudo make
125 | sudo make altinstall
126 | if which python2.7; then
127 | alias python=python2.7
128 | fi
129 | }
130 |
131 | function copy_version_file
132 | {
133 | pushd /cfy/wheelhouse/
134 | sudo mkdir -p cloudify_cli
135 | sudo cp -f /cloudify-packager/VERSION cloudify_cli
136 | cloudify_cli=$(basename `find . -name cloudify-*.whl`)
137 | sudo zip $cloudify_cli cloudify_cli/VERSION
138 | sudo rm -f cloudify_cli
139 | popd
140 | }
141 |
142 | function get_wheels
143 | {
144 | echo "Retrieving Wheels"
145 | sudo pip wheel virtualenv==12.0.7 &&
146 | # when the cli is built for py2.6, unless argparse is put within `install_requires`, we'll have to enable this:
147 | # if which yum; then
148 | # pip wheel argparse==#SOME_VERSION#
149 | # fi
150 | echo 'installing cloudify-rest-plugin wheel'
151 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-rest-client@${CORE_TAG_NAME} --find-links=wheelhouse &&
152 | echo 'installing cloudify-dsl-parser wheel' &&
153 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-dsl-parser@${CORE_TAG_NAME} --find-links=wheelhouse &&
154 | echo 'installing cloudify-plugins-common wheel' &&
155 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-plugins-common@${CORE_TAG_NAME} --find-links=wheelhouse &&
156 | echo 'installing cloudify-script-plugin wheel' &&
157 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-script-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
158 | echo 'installing cloudify-fabric-plugin wheel' &&
159 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-fabric-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
160 | echo 'installing cloudify-openstack-plugin wheel' &&
161 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-openstack-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
162 | echo 'installing cloudify-aws-plugin wheel' &&
163 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-aws-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
164 | echo 'installing cloudify-vsphere-plugin wheel' &&
165 | sudo pip wheel git+https://${GITHUB_USERNAME}:${GITHUB_PASSWORD}@github.com/cloudify-cosmo/cloudify-vsphere-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
166 | echo 'installing cloudify-softlayer-plugin wheel' &&
167 | sudo pip wheel git+https://${GITHUB_USERNAME}:${GITHUB_PASSWORD}@github.com/cloudify-cosmo/cloudify-softlayer-plugin@${PLUGINS_TAG_NAME} --find-links=wheelhouse &&
168 | echo 'installing cloudify-cli wheel' &&
169 | sudo pip wheel git+https://github.com/cloudify-cosmo/cloudify-cli@${CORE_TAG_NAME} --find-links=wheelhouse
170 | copy_version_file
171 | }
172 |
173 | function get_manager_blueprints
174 | {
175 | sudo curl -O http://cloudify-public-repositories.s3.amazonaws.com/cloudify-manager-blueprints/${CORE_TAG_NAME}/cloudify-manager-blueprints.tar.gz &&
176 | sudo tar -zxvf cloudify-manager-blueprints.tar.gz &&
177 | sudo rm cloudify-manager-blueprints.tar.gz &&
178 | echo "Retrieving Manager Blueprints"
179 | }
180 |
181 | function get_license
182 | {
183 | # copy license to virtualenv
184 | lic_dir="cloudify-license"
185 | sudo mkdir -p ${lic_dir}
186 | sudo cp -f /cloudify-packager/docker/cloudify-ui/LICENSE ${lic_dir}
187 | }
188 |
189 | CORE_TAG_NAME="4.4.dev1"
190 | PLUGINS_TAG_NAME="1.3"
191 | GITHUB_USERNAME=$1
192 | GITHUB_PASSWORD=$2
193 |
194 | install_prereqs &&
195 | if which apt-get; then
196 | install_ruby
197 | fi
198 | if which yum; then
199 | if ! which python2.7 >> /dev/null; then
200 | install_py27
201 | else
202 | alias python=python2.7
203 | fi
204 |
205 | fi
206 | install_fpm &&
207 | install_pip &&
208 | install_module "packman==0.5.0" &&
209 | install_module "wheel==0.24.0" &&
210 |
211 | sudo mkdir -p /cfy && cd /cfy &&
212 |
213 | echo '# GET PROCESS'
214 | get_license &&
215 | get_wheels &&
216 | get_manager_blueprints &&
217 |
218 | cd /cloudify-packager/ && sudo pkm pack -c cloudify-linux-cli -v
219 |
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/create_install_wizard.iss:
--------------------------------------------------------------------------------
1 | #define AppName "Cloudify CLI"
2 | #define AppVersion GetEnv('VERSION')
3 | #define AppPublisher "GigaSpaces Technologies"
4 | #define AppURL "http://getcloudify.org/"
5 |
6 | [Setup]
7 | ; NOTE: The value of AppId uniquely identifies this application.
8 | ; Do not use the same AppId value in installers for other applications.
9 | ; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
10 | AppId={{94B9D938-5123-4AC5-AA99-68F07F773DE2}
11 | AppName={#AppName}
12 | AppVersion={#AppVersion}
13 | AppPublisher={#AppPublisher}
14 | AppPublisherURL={#AppURL}
15 | AppSupportURL={#AppURL}
16 | AppUpdatesURL={#AppURL}
17 | DefaultDirName={pf}\Cloudify
18 | DisableProgramGroupPage=yes
19 | OutputBaseFilename=cloudify_cli_{#AppVersion}
20 | Compression=lzma
21 | SolidCompression=yes
22 | ArchitecturesInstallIn64BitMode=
23 | LicenseFile=source\license.txt
24 | MinVersion=6.0
25 | SetupIconFile=source\icons\Cloudify.ico
26 | UninstallDisplayIcon={app}\Cloudify.ico
27 | OutputDir=output\
28 |
29 | [Languages]
30 | Name: "english"; MessagesFile: "compiler:Default.isl"
31 |
32 | [Files]
33 | Source: "source\python\python.msi"; Flags: dontcopy nocompression
34 | Source: "source\wheels\*.whl"; Flags: dontcopy
35 | Source: "source\pip\*"; Flags: dontcopy
36 | Source: "source\virtualenv\*"; Flags: dontcopy
37 | Source: "source\icons\Cloudify.ico"; DestDir: "{app}"
38 |
39 | [Tasks]
40 | Name: "desktopicon"; Description: "Create a desktop icon";
41 |
42 | [Icons]
43 | Name: "{userdesktop}\Cloudify CLI"; Filename: "{cmd}"; Parameters: "/k ""{app}\Scripts\activate.bat"""; WorkingDir: "{app}"; IconFilename: "{app}\Cloudify.ico"; Tasks: "desktopicon";
44 |
45 | [UninstallDelete]
46 | ;this is NOT recommended but in our case, no user data here
47 | Type: "filesandordirs"; Name: "{app}"
48 |
49 | [Code]
50 | const
51 | mainPackageName = 'cloudify';
52 | //Registry key path
53 | RegPythonPath = 'SOFTWARE\Python\PythonCore\2.7\InstallPath';
54 | //Error messages
55 | errPythonMissing = 'Python installation was not found. In order to install {#AppName} you will need Python installed. Procceed to Python 2.7 installation?';
56 | errPipMissing = 'Pip was not found. Pip is a package management tool that is required to successfully install {#AppName}. Would you like to install it?';
57 | errVenvMissing = 'Virtualenv was not found. Virtualenv is a python environment managment tool that is required to successfully install {#AppName}. Would you like to install it?';
58 | errUnexpected = 'Unexpected error. Check install logs';
59 | infoPythonUninstall = 'Cloudify uninstaller will not remove Python as a safety precaution. Uninstalling Python should be done independently by the user';
60 |
61 |
62 | function getPythonDir(): String;
63 | var
64 | InstallPath: String;
65 | begin
66 | RegQueryStringValue(HKLM, RegPythonPath, '', InstallPath);
67 | RegQueryStringValue(HKCU, RegPythonPath, '', InstallPath);
68 | Result := InstallPath;
69 | end;
70 |
71 |
72 | function isPythonInstalled(): Boolean;
73 | begin
74 | if getPythonDir <> '' then
75 | Result := True
76 | else
77 | Result := False;
78 | end;
79 |
80 |
81 | function getPythonPath(): String;
82 | var
83 | PythonPath: String;
84 | begin
85 | if isPythonInstalled then begin
86 | PythonPath := AddBackslash(getPythonDir) + 'python.exe';
87 | if FileExists(PythonPath) then
88 | Result := PythonPath
89 | end;
90 | end;
91 |
92 |
93 | function runPythonSetup(): Boolean;
94 | var
95 | PythonArgs: String;
96 | InstallerPath: String;
97 | ErrorCode: Integer;
98 | begin
99 | ExtractTemporaryFile('python.msi');
100 | InstallerPath := Expandconstant('{tmp}\python.msi');
101 | PythonArgs := 'ADDDEFAULT=pip_feature';
102 | if WizardSilent then
103 | PythonArgs := PythonArgs + ' /qn';
104 | ShellExec('', InstallerPath, PythonArgs, '', SW_SHOW, ewWaituntilterminated, ErrorCode);
105 |
106 | if Errorcode <> 0 then
107 | Result := False
108 | else
109 | Result := True;
110 | end;
111 |
112 |
113 | function getPipPath(): String;
114 | var
115 | PipPath: String;
116 | begin
117 | if isPythonInstalled then begin
118 | PipPath := AddBackslash(getPythonDir) + 'Scripts\pip.exe';
119 | if FileExists(PipPath) then
120 | Result := PipPath
121 | end;
122 | end;
123 |
124 |
125 | function isPipInstalled(): Boolean;
126 | begin
127 | if getPipPath <> '' then
128 | Result := True
129 | else
130 | Result := False;
131 | end;
132 |
133 |
134 | function runPipSetup(): Boolean;
135 | var
136 | GetPipArgs: String;
137 | ErrorCode: Integer;
138 | begin
139 | if isPythonInstalled then begin
140 | ExtractTemporaryFiles('*.whl');
141 | ExtractTemporaryFile('get-pip.py');
142 | GetPipArgs := 'get-pip.py --use-wheel --no-index --find-links .';
143 | ShellExec('', getPythonPath, GetPipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
144 |
145 | if Errorcode <> 0 then
146 | Result := False
147 | else
148 | Result := True;
149 | end;
150 | end;
151 |
152 |
153 | function getVenvPath(): String;
154 | var
155 | VenvPath: String;
156 | begin
157 | if isPythonInstalled then begin
158 | VenvPath := AddBackslash(getPythonDir) + 'Scripts\virtualenv.exe';
159 | if FileExists(VenvPath) then
160 | Result := VenvPath
161 | end;
162 | end;
163 |
164 |
165 | function isVenvInstalled(): Boolean;
166 | begin
167 | if getVenvPath <> '' then
168 | Result := True
169 | else
170 | Result := False;
171 | end;
172 |
173 |
174 | function runVenvSetup(): Boolean;
175 | var
176 | GetPipArgs: String;
177 | ErrorCode: Integer;
178 | begin
179 | if isPythonInstalled then begin
180 | ExtractTemporaryFiles('*.whl');
181 | GetPipArgs := 'install --use-wheel --no-index --find-links . virtualenv';
182 | ShellExec('', getPipPath, GetPipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
183 |
184 | if Errorcode <> 0 then
185 | Result := False
186 | else
187 | Result := True;
188 | end;
189 | end;
190 |
191 |
192 | function runVenvInitialization(): Boolean;
193 | var
194 | ErrorCode: Integer;
195 | begin
196 | Exec(getVenvPath, Expandconstant('--clear "{app}"'), Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
197 |
198 | if Errorcode <> 0 then
199 | Result := False
200 | else
201 | Result := True;
202 | end;
203 |
204 |
205 | function runWheelsInstall(): Boolean;
206 | var
207 | PipArgs: String;
208 | ErrorCode: Integer;
209 | begin
210 | ExtractTemporaryFiles('*.whl');
211 |
212 | if not (isVenvInstalled and runVenvInitialization) then begin
213 | Result := False;
214 | Exit;
215 | end;
216 |
217 | PipArgs := Expandconstant('/c set "VIRTUAL_ENV={app}" && set "PATH={app}\Scripts;%PATH%" && pip install --pre --use-wheel --no-index --find-links . --force-reinstall --ignore-installed ' + mainPackageName);
218 | Exec(Expandconstant('{sys}\cmd.exe'), PipArgs, Expandconstant('{tmp}'), SW_SHOW, ewWaituntilterminated, ErrorCode);
219 |
220 | if Errorcode <> 0 then
221 | Result := False
222 | else
223 | Result := True;
224 | end;
225 |
226 |
227 | //wrap MsgBox to handle silent install case
228 | function getUserResponse(Message: String): Integer;
229 | begin
230 | if not WizardSilent then
231 | Result := MsgBox(Message, mbError, MB_OKCANCEL)
232 | else
233 | Result := IDOK;
234 | end;
235 |
236 |
237 | //Pre-Assumptions: Python and pip are installed
238 | procedure CurStepChanged(CurStep: TSetupStep);
239 | begin
240 | if CurStep = ssInstall then begin
241 | if not runWheelsInstall then
242 | RaiseException(errUnexpected);
243 | end;
244 | end;
245 |
246 |
247 | //Check for pre-requirements (Python, Pip, Virtualenv)
248 | function PrepareToInstall(var NeedsRestart: Boolean): String;
249 | var
250 | UserResponse: Integer;
251 | begin
252 | if not isPythonInstalled then begin
253 | UserResponse := getUserResponse(errPythonMissing);
254 | if UserResponse <> IDOK then begin
255 | Result := 'Installation cannot continue without Python installed';
256 | Exit;
257 | end
258 | else if not runPythonSetup then begin
259 | Result := 'Python setup failed';
260 | Exit;
261 | end;
262 | end;
263 |
264 | if not isPipInstalled then begin
265 | UserResponse := getUserResponse(errPipMissing)
266 | if UserResponse <> IDOK then begin
267 | Result := 'Installation cannot continue without Pip installed';
268 | exit;
269 | end
270 | else if not runPipSetup then begin
271 | Result := 'Pip installation failed';
272 | Exit;
273 | end;
274 | end;
275 |
276 | if not isVenvInstalled then begin
277 | UserResponse := getUserResponse(errVenvMissing)
278 | if UserResponse <> IDOK then begin
279 | Result := 'Installation cannot continue without Virtualenv installed';
280 | Exit;
281 | end
282 | else if not runVenvSetup then begin
283 | Result := 'Virtualenv installation failed';
284 | Exit;
285 | end;
286 | end;
287 | end;
288 |
289 |
290 | //Display info message when install done about Python uninstall
291 | procedure CurUninstallStepChanged(CurUninstallStep: TUninstallStep);
292 | begin
293 | if (CurUninstallStep = usPostUninstall) and (not UninstallSilent) then
294 | MsgBox(infoPythonUninstall, mbInformation, MB_OK);
295 | end;
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/icons/Cloudify.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/cli/windows/packaging/source/icons/Cloudify.ico
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/license.txt:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2015 GigaSpaces Technologies TLD
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/pip/pip-6.1.1-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/cli/windows/packaging/source/pip/pip-6.1.1-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/pip/setuptools-15.2-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/cli/windows/packaging/source/pip/setuptools-15.2-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/python/python.msi:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/cli/windows/packaging/source/python/python.msi
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/source/virtualenv/virtualenv-12.1.1-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/cloudify-cosmo/cloudify-packager/0c2c2637fd1fe024e61c55d959ead28e9b9178af/vagrant/cli/windows/packaging/source/virtualenv/virtualenv-12.1.1-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/vagrant/cli/windows/packaging/update_wheel.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import argparse
3 | from zipfile import ZipFile, ZIP_DEFLATED
4 | from hashlib import sha256
5 | from wheel.util import urlsafe_b64encode
6 | from collections import namedtuple
7 |
8 |
9 | def get_sha(data):
10 | return urlsafe_b64encode(sha256(data).digest())
11 |
12 |
13 | def modify_wheel(path, name, data):
14 | with ZipFile(path) as zf:
15 | zf.getinfo(name)
16 | new = ZipFile(path + '-new', 'w', ZIP_DEFLATED)
17 | for item in zf.infolist():
18 | if item.filename.endswith('dist-info/RECORD'):
19 | records = zf.read(item.filename)
20 | newrecord = generate_record(records, name, data)
21 | new.writestr(item.filename, newrecord)
22 | elif item.filename == name:
23 | new.writestr(name, data)
24 | else:
25 | zipdata = zf.read(item.filename)
26 | new.writestr(item.filename, zipdata)
27 |
28 |
29 | def generate_record(records, name, data):
30 | data_sha = 'sha256=' + get_sha(data)
31 | data_size = str(len(data))
32 | out = []
33 | Record = namedtuple('Record', 'name hash size')
34 | for item in records.split():
35 | record = Record(*item.split(','))
36 | if record.name != name:
37 | out.append(item)
38 | else:
39 | if not record.hash.startswith('sha256'):
40 | raise Exception('Unexpected checksum method: {0}'.format(
41 | record.hash.split('=')[0]))
42 | out.append(','.join((record.name, data_sha, data_size)))
43 | return '\r\n'.join(out)
44 |
45 |
46 | def parse_args():
47 | description = """This script will modify wheel file by puting data into
48 | the target inside wheel archive. It will also update the RECORD file
49 | with new checksum and file size"""
50 | parser = argparse.ArgumentParser(description=description)
51 |
52 | parser.add_argument('--path', required=True, help="wheel's file path")
53 | parser.add_argument('--name', required=True, help='name of the target '
54 | 'file inside wheel')
55 | parser.add_argument('--data', required=True, help='data to write into '
56 | 'target file')
57 |
58 | return parser.parse_args()
59 |
60 |
61 | def main():
62 | args = parse_args()
63 | if args.data == '-':
64 | data = sys.stdin.read()
65 | else:
66 | data = args.data
67 | modify_wheel(path=args.path, name=args.name, data=data)
68 |
69 |
70 | if __name__ == '__main__':
71 | main()
72 |
--------------------------------------------------------------------------------
/vagrant/cli/windows/provision.sh:
--------------------------------------------------------------------------------
1 | export CORE_TAG_NAME="4.4.dev1"
2 | export PLUGINS_TAG_NAME="1.3"
3 | export VERSION=`cat packaging/VERSION | grep version | sed 's/"version": "//g' | sed 's/"//g' | sed 's/,//g' | sed 's/ //g'`
4 |
5 | echo "VERSION=$VERSION"
6 |
7 | pip install wheel
8 |
9 | pip wheel --wheel-dir packaging/source/wheels https://github.com/cloudify-cosmo/cloudify-cli/archive/$CORE_TAG_NAME.zip#egg=cloudify-cli \
10 | https://github.com/cloudify-cosmo/cloudify-rest-client/archive/$CORE_TAG_NAME.zip#egg=cloudify-rest-client \
11 | https://github.com/cloudify-cosmo/cloudify-dsl-parser/archive/$CORE_TAG_NAME.zip#egg=cloudify-dsl-parser \
12 | https://github.com/cloudify-cosmo/cloudify-plugins-common/archive/$CORE_TAG_NAME.zip#egg=cloudify-plugins-common \
13 | https://github.com/cloudify-cosmo/cloudify-script-plugin/archive/$PLUGINS_TAG_NAME.zip#egg=cloudify-script-plugin
14 |
15 | export VERSION_FILE=$(cat packaging/VERSION)
16 |
17 | python packaging/update_wheel.py --path packaging/source/wheels/cloudify-*.whl --name cloudify_cli/VERSION --data "$VERSION_FILE"
18 | mv packaging/source/wheels/cloudify-*.whl-new packaging/source/wheels/cloudify-*.whl
19 |
20 | iscc packaging/create_install_wizard.iss
21 |
--------------------------------------------------------------------------------
/vagrant/docker_images/Vagrantfile:
--------------------------------------------------------------------------------
1 | ########
2 | # Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | # * See the License for the specific language governing permissions and
14 | # * limitations under the License.
15 |
16 | # -*- mode: ruby -*-
17 | # vi: set ft=ruby :
18 |
19 | AWS_ACCESS_KEY_ID = ENV['AWS_ACCESS_KEY_ID']
20 | AWS_ACCESS_KEY = ENV['AWS_ACCESS_KEY']
21 |
22 | BASE_BOX_NAME = 'ubuntu/trusty64'
23 |
24 | Vagrant.configure('2') do |config|
25 | config.vm.define "ubuntu" do |ubuntu|
26 | #dummy box, will be overriden
27 | ubuntu.vm.box = "dummy"
28 | ubuntu.vm.box_url = "https://github.com/mitchellh/vagrant-aws/raw/master/dummy.box"
29 |
30 | ubuntu.vm.provider :aws do |aws, override|
31 | aws.access_key_id = AWS_ACCESS_KEY_ID
32 | aws.secret_access_key = AWS_ACCESS_KEY
33 |
34 | #official ubuntu 14.04 64bit box
35 | aws.ami = "ami-234ecc54"
36 | aws.region = "eu-west-1"
37 | aws.instance_type = "m3.medium"
38 |
39 | aws.keypair_name = "vagrant_build"
40 | override.ssh.username = "ubuntu"
41 | override.ssh.private_key_path = "/home/.ssh/aws/vagrant_build.pem"
42 |
43 | aws.tags = {
44 | "Name" => "vagrant docker images build",
45 | }
46 | aws.security_groups = "vagrant_cfy_build"
47 | end
48 |
49 | #need to sync folders
50 | ubuntu.vm.synced_folder "../../", "/cloudify-packager", create: true
51 | ubuntu.vm.provision "shell", path: "provision.sh", privileged: false
52 | end
53 |
54 | config.vm.define :local do |local|
55 | local.vm.provider :virtualbox do |vb|
56 | vb.customize ['modifyvm', :id, '--memory', '1024']
57 | end
58 | local.vm.box = BASE_BOX_NAME
59 | local.vm.hostname = 'local'
60 | local.vm.synced_folder "../../", "/cloudify-packager", create: true
61 | local.vm.provision "shell", path: "provision.sh", privileged: false
62 | end
63 | end
64 |
--------------------------------------------------------------------------------
/vagrant/docker_images/provision.sh:
--------------------------------------------------------------------------------
1 | #! /bin/bash -e
2 |
3 | CORE_TAG_NAME="4.4.dev1"
4 |
5 |
6 | install_docker()
7 | {
8 | export DEBIAN_FRONTEND=noninteractive
9 | kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
10 | sudo apt-get update
11 | sudo -E apt-get install -y -q $kern_extras
12 | sudo modprobe aufs
13 |
14 | sudo apt-get install -y -q curl ca-certificates
15 | sudo apt-get install -y -q apt-transport-https ca-certificates
16 |
17 | sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
18 | echo deb https://get.docker.com/ubuntu docker main | sudo tee /etc/apt/sources.list.d/docker.list
19 | sudo apt-get update
20 | sudo apt-get install -y lxc-docker-1.6.0
21 | }
22 |
23 | setup_jocker_env()
24 | {
25 | sudo apt-get install -y python-pip
26 | }
27 |
28 | clone_packager()
29 | {
30 | git clone https://github.com/cloudify-cosmo/cloudify-packager.git $1
31 | pushd $1
32 | git checkout -b tmp_branch $CORE_TAG_NAME
33 | git log -1
34 | popd
35 | }
36 |
37 | build_images()
38 | {
39 | CLONE_LOCATION=/tmp/cloudify-packager
40 | clone_packager $CLONE_LOCATION
41 | cp /cloudify-packager/docker/metadata/* /tmp/cloudify-packager/docker/metadata/
42 | setup_jocker_env
43 | echo Building cloudify stack image.
44 | pushd $CLONE_LOCATION
45 | ./docker/build.sh $CLONE_LOCATION
46 | popd
47 | }
48 |
49 | start_and_export_containers()
50 | {
51 | sudo docker run -t --name=cloudify -d cloudify:latest /bin/bash
52 | sudo docker export cloudify > /tmp/cloudify-docker_.tar
53 | sudo docker run -t --name=cloudifycommercial -d cloudify-commercial:latest /bin/bash
54 | sudo docker export cloudifycommercial > /tmp/cloudify-docker_commercial.tar
55 | }
56 |
57 | main()
58 | {
59 | install_docker
60 | build_images
61 | start_and_export_containers
62 | }
63 |
64 | main
65 |
--------------------------------------------------------------------------------