├── solar ├── core │ ├── __init__.py │ ├── transports │ │ ├── __init__.py │ │ └── helpers │ │ │ └── __init__.py │ ├── resource │ │ └── __init__.py │ ├── handlers │ │ ├── python.py │ │ └── naive_sync.py │ ├── log.py │ └── actions.py ├── test │ ├── __init__.py │ ├── resource_fixtures │ │ ├── node │ │ │ ├── actions │ │ │ │ └── run.yaml │ │ │ └── meta.yaml │ │ ├── update.yaml.tmpl │ │ ├── with_location.yaml.tmpl │ │ ├── resource_with_dict.yaml.tmpl │ │ ├── resource_with_list.yaml.tmpl │ │ ├── nodes.yaml.tmpl │ │ └── base_service │ │ │ └── meta.yaml │ ├── orch_fixtures │ │ ├── simple.yaml │ │ ├── sequential.yaml │ │ ├── README.md │ │ ├── two_path.yaml │ │ ├── sleeping_beauty.yaml │ │ ├── test_errors.yaml │ │ └── upd_test_errors.yaml │ └── test_celery_executor.py ├── system_log │ ├── __init__.py │ ├── consts.py │ └── tasks.py ├── dblayer │ ├── test │ │ └── __init__.py │ ├── standalone_session_wrapper.py │ └── riak_client.py ├── orchestration │ ├── __init__.py │ ├── consts.py │ ├── runner.py │ └── utils.py ├── computable_inputs │ ├── helpers │ │ ├── __init__.py │ │ ├── python_helpers.py │ │ └── lua_helpers.lua │ └── __init__.py ├── cli │ └── __init__.py ├── __init__.py ├── events │ └── __init__.py └── errors.py ├── doc ├── source │ ├── _static │ │ └── .gitignore │ ├── _templates │ │ └── .gitignore │ ├── tutorials │ │ └── index.rst │ └── index.rst ├── libvirt.md └── removal.md ├── resources ├── apt_repo │ ├── templates │ │ ├── source │ │ └── preferences │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── file │ ├── actions │ │ ├── remove.sh │ │ └── run.sh │ └── meta.yaml ├── dnsmasq │ ├── actions │ │ ├── run.yaml │ │ └── exclude_mac_pxe.yaml │ └── meta.yaml ├── nova_neutron_puppet │ └── actions │ │ └── remove.pp ├── cinder_glance_puppet │ ├── actions │ │ ├── remove.pp │ │ └── run.pp │ ├── meta.yaml │ └── README.md ├── not_provisioned_node │ ├── actions │ │ ├── reboot.sh │ │ ├── run.sh │ │ └── provision.sh │ ├── templates │ │ ├── agent.config │ │ └── cloud-init-templates │ │ │ ├── meta-data_centos.jinja2 │ │ │ ├── meta-data_ubuntu.jinja2 │ │ │ ├── cloud_config_centos.jinja2 │ │ │ └── cloud_config_ubuntu.jinja2 │ └── meta.yaml ├── nova_compute_libvirt_puppet │ └── actions │ │ └── remove.pp ├── ansible_local │ ├── actions │ │ ├── test_role │ │ │ ├── defaults │ │ │ │ └── main.yaml │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── run.yaml │ └── meta.yaml ├── transport_torrent │ ├── scripts │ │ └── solar_torrent.py │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── node_network_puppet │ ├── actions │ │ └── remove.pp │ ├── test.py │ └── README.md ├── glance_config │ ├── actions │ │ └── remove.yaml │ ├── templates │ │ ├── exports │ │ ├── glance-registry.conf │ │ ├── glance-api.conf │ │ ├── policy.json │ │ ├── schema-image.json │ │ └── glance-registry-paste.ini │ └── meta.yaml ├── neutron_plugins_ml2_puppet │ ├── actions │ │ └── remove.pp │ └── meta.yaml ├── data_container │ ├── actions │ │ ├── echo.yaml │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── keystone_config │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── templates │ │ ├── exports │ │ └── logging.conf │ └── meta.yaml ├── rabbitmq_config │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── meta.yaml │ └── templates │ │ └── rabbitmq.conf ├── cinder_scheduler_puppet │ ├── README.md │ ├── actions │ │ ├── remove.pp │ │ ├── run.pp │ │ └── update.pp │ ├── test.py │ └── meta.yaml ├── openrc_file │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── templates │ │ └── openrc.template │ └── meta.yaml ├── riak_join_single │ ├── actions │ │ └── join.yaml │ └── meta.yaml ├── cinder_volume_puppet │ ├── actions │ │ ├── remove.pp │ │ ├── update.pp │ │ └── run.pp │ ├── README.md │ ├── test.py │ └── meta.yaml ├── keystone_puppet │ ├── actions │ │ ├── remove.pp │ │ ├── update.pp │ │ └── run.pp │ ├── test.py │ ├── README.md │ └── meta.yaml ├── riak_node │ ├── actions │ │ ├── remove.yaml │ │ ├── commit.yaml │ │ ├── update.yaml │ │ ├── join.yaml │ │ └── run.yaml │ └── meta.yaml ├── cinder_puppet │ ├── actions │ │ └── remove.pp │ └── test.py ├── nova_puppet │ └── actions │ │ └── remove.pp ├── ansible_remote │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── apache_puppet │ ├── README.md │ ├── actions │ │ └── remove.pp │ └── test.py ├── haproxy_config │ ├── actions │ │ ├── remove.yaml │ │ ├── run.yaml │ │ └── update.yaml │ ├── meta.yaml │ └── README.md ├── neutron_agents_ml2_ovs_puppet │ └── actions │ │ └── remove.pp ├── haproxy_service │ ├── actions │ │ ├── update.yaml │ │ ├── apply_config.yaml │ │ ├── remove.yaml │ │ ├── run.yaml │ │ └── install.yaml │ ├── test.py │ ├── README.md │ └── meta.yaml ├── rabbitmq_vhost │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── hosts_file │ ├── actions │ │ ├── remove.yaml │ │ ├── run.yaml │ │ └── update.yaml │ └── meta.yaml ├── neutron_puppet │ └── actions │ │ └── remove.pp ├── sources │ └── meta.yaml ├── keystone_role │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── keystone_user │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── test.py │ └── meta.yaml ├── docker_container │ ├── actions │ │ ├── remove.yaml │ │ ├── run.yaml │ │ └── update.yaml │ └── meta.yaml ├── rabbitmq_user │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── ceph_mon │ ├── actions │ │ └── test.pp │ └── meta.yaml ├── lxc_host │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── ex_managed │ ├── managers │ │ └── manager.py │ └── meta.yaml ├── glance_api_service │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── test.py │ └── meta.yaml ├── glance_registry_puppet │ ├── actions │ │ └── remove.pp │ └── test.py ├── librarian │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── meta.yaml │ └── templates │ │ └── Puppetfile ├── nova_generic_service_puppet │ ├── README.md │ ├── actions │ │ ├── remove.pp │ │ ├── run.pp │ │ └── update.pp │ └── meta.yaml ├── glance_registry_service │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── rabbitmq_service │ ├── actions │ │ ├── remove.pp │ │ └── run.pp │ └── meta.yaml ├── keystone_service │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ ├── test.py │ ├── README.md │ └── meta.yaml ├── keystone_tenant │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── managed_apt │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── haproxy_service_config │ ├── README.md │ └── meta.yaml ├── cinder_api_puppet │ ├── test.py │ └── actions │ │ └── remove.pp ├── docker │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── nova_conductor_puppet │ ├── actions │ │ ├── remove.pp │ │ ├── run.pp │ │ └── update.pp │ ├── README.md │ └── meta.yaml ├── glance_puppet │ ├── actions │ │ └── remove.pp │ └── test.py ├── mariadb_service │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── neutron_server_puppet │ └── actions │ │ └── remove.pp ├── remote_file │ ├── actions │ │ └── run.sh │ └── meta.yaml ├── solar_bootstrap │ ├── meta.yaml │ └── actions │ │ └── run.yaml ├── mariadb_user │ ├── actions │ │ ├── remove.yaml │ │ ├── run.yaml │ │ └── update.yaml │ └── meta.yaml ├── transports │ └── meta.yaml ├── container_networks │ ├── meta.yaml │ └── actions │ │ └── run.yaml ├── ceph_keys │ ├── meta.yaml │ └── actions │ │ └── run.sh ├── mariadb_db │ ├── actions │ │ ├── remove.yaml │ │ └── run.yaml │ └── meta.yaml ├── neutron_agents_l3_puppet │ └── actions │ │ └── remove.pp ├── neutron_agents_dhcp_puppet │ ├── actions │ │ └── remove.pp │ └── meta.yaml ├── neutron_agents_metadata_puppet │ └── actions │ │ └── remove.pp ├── transport_ssh │ └── meta.yaml ├── nova_api_puppet │ └── actions │ │ └── remove.pp ├── ro_node │ └── meta.yaml ├── vxlan_mesh │ ├── meta.yaml │ └── actions │ │ └── run.yaml ├── fuel_library │ ├── actions │ │ └── run.sh │ └── meta.yaml ├── ssh_key │ ├── actions │ │ └── run.yaml │ └── meta.yaml ├── nova_compute_puppet │ └── actions │ │ └── remove.pp ├── keystone_service_endpoint │ ├── actions │ │ ├── run.yaml │ │ └── remove.yaml │ └── meta.yaml ├── volume_group │ ├── meta.yaml │ └── actions │ │ ├── remove.yaml │ │ └── run.yaml ├── transport_solar_agent │ ├── meta.yaml │ └── actions │ │ ├── run.yaml │ │ └── update.yaml └── lxc_container │ ├── actions │ └── run.yaml │ └── meta.yaml ├── bootstrap ├── ansible.cfg ├── playbooks │ ├── files │ │ ├── sshd.sh │ │ ├── supervisor.conf │ │ ├── nginx.cfg │ │ ├── nginx_vagrant_dir.cfg │ │ ├── hiera.yaml │ │ ├── pxelinux.cfg │ │ ├── dnsmasq_pxe.conf │ │ ├── ubuntu-ansible.sh │ │ ├── update.sh │ │ └── minimize.sh │ ├── tasks │ │ ├── cloud_archive.yaml │ │ ├── puppet.yaml │ │ ├── mos.yaml │ │ └── docker.yaml │ ├── build-main.yaml │ ├── solar.yaml │ └── celery.yaml ├── build-image.yaml ├── packer.yaml ├── vagrant_plugins │ └── noop.rb └── README.md ├── MANIFEST.in ├── jenkins-config.yaml ├── .testr.conf ├── library └── README.md ├── .config ├── examples ├── bootstrap │ ├── vagrant-settings.yaml │ └── README.md ├── cli │ ├── README │ └── example.sh ├── openstack │ ├── README.md │ └── rabbitmq_user.yaml ├── librarian │ ├── README │ └── librarian.yaml ├── provisioning │ └── provision.sh ├── torrent │ └── README.md ├── lxc │ └── README.md ├── library_ceph │ └── README.md ├── compiled-resources │ └── README.md ├── hosts_file │ └── hosts.py └── riak │ ├── README.md │ └── riak_cluster.yaml ├── test-requirements.txt ├── templates ├── sources.yaml ├── seed_node.yaml ├── keystone_api.yaml ├── glance.yaml ├── nodes_with_transports.yaml ├── glance_registry.yaml ├── glance_base.yaml ├── glance_db.yaml ├── keystone_base.yaml ├── nodes.yaml └── haproxy.yaml ├── run.sh ├── .vagrantplugins ├── utils └── docker_build.sh ├── config.yaml ├── requirements.txt ├── .travis.yml ├── Dockerfile ├── .gitignore ├── setup.cfg ├── setup.py ├── docker-compose.yml ├── tox.ini ├── vagrant-settings.yaml_defaults └── run_tests.sh /solar/core/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/system_log/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/_static/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /doc/source/_templates/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/core/transports/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/dblayer/test/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/orchestration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/orchestration/consts.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /solar/computable_inputs/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /resources/apt_repo/templates/source: -------------------------------------------------------------------------------- 1 | {{repo}} 2 | -------------------------------------------------------------------------------- /solar/core/transports/helpers/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/node/actions/run.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /bootstrap/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | -------------------------------------------------------------------------------- /resources/file/actions/remove.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm {{ path }} 4 | -------------------------------------------------------------------------------- /resources/file/actions/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | touch {{ path }} 4 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.md 2 | include *.txt 3 | recursive-include solar/ * 4 | -------------------------------------------------------------------------------- /resources/dnsmasq/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | -------------------------------------------------------------------------------- /solar/cli/__init__.py: -------------------------------------------------------------------------------- 1 | from solar.dblayer import standalone_session_wrapper 2 | -------------------------------------------------------------------------------- /resources/nova_neutron_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | notify { "Nothing to remove here": } -------------------------------------------------------------------------------- /resources/cinder_glance_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | notify { 'Nothing to remove here': } 2 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/sshd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | echo "UseDNS no" >> /etc/ssh/sshd_config 4 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/actions/reboot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | reboot now 6 | 7 | -------------------------------------------------------------------------------- /resources/nova_compute_libvirt_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | notify { 'Remove action is not supported upstream': } -------------------------------------------------------------------------------- /resources/ansible_local/actions/test_role/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | 2 | var1: initial 3 | uuid: stuff 4 | def1: the_same -------------------------------------------------------------------------------- /resources/transport_torrent/scripts/solar_torrent.py: -------------------------------------------------------------------------------- 1 | ../../../solar/solar/core/transports/helpers/solar_torrent.py -------------------------------------------------------------------------------- /resources/apt_repo/templates/preferences: -------------------------------------------------------------------------------- 1 | Package: {{package}} 2 | Pin: {{pin}} 3 | Pin-Priority: {{pin_priority}} 4 | -------------------------------------------------------------------------------- /resources/node_network_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'l23network': 2 | ensure_package => 'absent', 3 | } 4 | -------------------------------------------------------------------------------- /jenkins-config.yaml: -------------------------------------------------------------------------------- 1 | clients-data-file: /tmp/connections.yaml 2 | 3 | file-system-db: 4 | storage-path: /tmp/storage 5 | -------------------------------------------------------------------------------- /resources/glance_config/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - shell: echo 'removed' 6 | -------------------------------------------------------------------------------- /resources/neutron_plugins_ml2_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::plugins::ml2': 2 | package_ensure => 'absent', 3 | } -------------------------------------------------------------------------------- /resources/ansible_local/actions/test_role/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - debug: msg="Variable1 {{ var1 }} with uuid {{ uuid }} and default var {{ def1 }}" -------------------------------------------------------------------------------- /resources/data_container/actions/echo.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - shell: echo `/sbin/ifconfig` 6 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/update.yaml.tmpl: -------------------------------------------------------------------------------- 1 | id: simple_multinode 2 | updates: 3 | - id: node1 4 | values: 5 | ip: '10.0.0.4' 6 | -------------------------------------------------------------------------------- /resources/keystone_config/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - file: path={{config_dir}} state=absent 5 | -------------------------------------------------------------------------------- /resources/rabbitmq_config/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - file: path={{config_dir}} state=absent 5 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/supervisor.conf: -------------------------------------------------------------------------------- 1 | [program:{{name}}] 2 | command={{cmd}} 3 | redirect_stderr=true 4 | stdout_logfile=/var/log/{{name}}.log 5 | -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Cinder Scheduler resource for puppet handler 2 | 3 | Setup and configure the cinder scheduler service 4 | -------------------------------------------------------------------------------- /resources/keystone_config/templates/exports: -------------------------------------------------------------------------------- 1 | export OS_SERVICE_ENDPOINT=http://localhost:35357/v2.0/ 2 | export OS_SERVICE_TOKEN={{ admin_token }} 3 | -------------------------------------------------------------------------------- /resources/openrc_file/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [ {{ host }} ] 2 | sudo: yes 3 | tasks: 4 | - file: path=/root/openrc state=absent 5 | 6 | -------------------------------------------------------------------------------- /resources/riak_join_single/actions/join.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - shell: riak-admin cluster join {{join_to}} 5 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/node/meta.yaml: -------------------------------------------------------------------------------- 1 | id: node 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'cinder::volume': 2 | enabled => false, 3 | package_ensure => 'absent', 4 | } 5 | -------------------------------------------------------------------------------- /resources/file/meta.yaml: -------------------------------------------------------------------------------- 1 | id: file 2 | handler: shell 3 | version: 1.0.0 4 | input: 5 | path: 6 | schema: str! 7 | value: /tmp/test_file 8 | -------------------------------------------------------------------------------- /resources/keystone_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'keystone': 2 | admin_token => '{{ admin_token }}', 3 | package_ensure => 'absent' 4 | } 5 | -------------------------------------------------------------------------------- /resources/riak_node/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - apt: 5 | name: riak 6 | state: absent 7 | -------------------------------------------------------------------------------- /resources/cinder_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'cinder': 2 | package_ensure => 'absent', 3 | rabbit_password => 'not important as removed', 4 | } 5 | -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'cinder::scheduler': 2 | enabled => false, 3 | package_ensure => 'absent', 4 | } 5 | -------------------------------------------------------------------------------- /resources/nova_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'nova': 2 | ensure_package => 'absent', 3 | rabbit_password => 'not important as removed', 4 | } 5 | -------------------------------------------------------------------------------- /resources/ansible_remote/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | vars: 4 | default1: playbook 5 | tasks: 6 | - debug: msg="my message {{default1}}" -------------------------------------------------------------------------------- /resources/apache_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Apache puppet resource 2 | 3 | This class installs Apache and manages apache service. 4 | Defaults provided for Debian OS family. -------------------------------------------------------------------------------- /.testr.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | test_command=py.test ./solar --subunit $LISTOPT $IDOPTION 3 | test_id_option=--subunit-load-list=$IDFILE 4 | test_list_option=--collectonly 5 | -------------------------------------------------------------------------------- /library/README.md: -------------------------------------------------------------------------------- 1 | We keep there up-to-date / required ansible modules. It's just temporary situation, that will be solved soon. 2 | Those files are not part of solar. 3 | -------------------------------------------------------------------------------- /resources/haproxy_config/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | # TODO 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - file: path={{ config_dir.value['src'] }} state=absent 6 | -------------------------------------------------------------------------------- /resources/neutron_agents_ml2_ovs_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::agents::ml2::ovs': 2 | package_ensure => 'absent', 3 | enabled => false, 4 | } -------------------------------------------------------------------------------- /resources/haproxy_service/actions/update.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - service: 6 | name: haproxy 7 | state: reloaded 8 | -------------------------------------------------------------------------------- /resources/apache_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'apache': 2 | service_enable => false, 3 | service_ensure => 'stopped', 4 | package_ensure => 'absent', 5 | } 6 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/templates/agent.config: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | debug=true 3 | nc_template_path=/templates/cloud-init-templates/ 4 | log_file=/var/log/fuel-agent.log 5 | -------------------------------------------------------------------------------- /resources/rabbitmq_vhost/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - rabbitmq_vhost: name={{vhost_name}} 5 | state=absent 6 | -------------------------------------------------------------------------------- /resources/haproxy_service/actions/apply_config.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{ host }}] 3 | sudo: yes 4 | tasks: 5 | - service: 6 | name: haproxy 7 | state: reloaded 8 | -------------------------------------------------------------------------------- /resources/hosts_file/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: Remove hosts file 5 | shell: echo '# flushed by ansible' > /etc/hosts 6 | -------------------------------------------------------------------------------- /resources/rabbitmq_vhost/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - rabbitmq_vhost: name={{vhost_name}} 5 | state=present 6 | -------------------------------------------------------------------------------- /.config: -------------------------------------------------------------------------------- 1 | dblayer: riak 2 | redis: 3 | host: localhost 4 | port: '6379' 5 | solar_db: 6 | mode: riak 7 | host: localhost 8 | port: '8087' 9 | protocol: pbc 10 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/nginx.cfg: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8000; 3 | root /var/lib/tftp; 4 | 5 | location / { 6 | autoindex on; 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /doc/source/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | List of Solar tutorials 2 | ================================= 3 | 4 | Contents: 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | wordpress 10 | -------------------------------------------------------------------------------- /examples/bootstrap/vagrant-settings.yaml: -------------------------------------------------------------------------------- 1 | # rename it to vagrant-settings.yml then Vagrantfile 2 | # will use values from this file 3 | 4 | slaves_count: 3 5 | slaves_image: ubuntu/trusty64 6 | -------------------------------------------------------------------------------- /resources/neutron_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron': 2 | enabled => false, 3 | package_ensure => 'absent', 4 | rabbit_password => 'not important as removed', 5 | } -------------------------------------------------------------------------------- /resources/sources/meta.yaml: -------------------------------------------------------------------------------- 1 | id: sources 2 | handler: naive_sync 3 | version: 1.0.0 4 | input: 5 | sources: 6 | schema: [{'src': 'str!', 'dst': 'str!'}] 7 | value: [] 8 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/nginx_vagrant_dir.cfg: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8001; 3 | root /vagrant; 4 | 5 | location / { 6 | autoindex on; 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /resources/data_container/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - shell: docker stop {{ resource_name }} 6 | - shell: docker rm {{ resource_name }} 7 | -------------------------------------------------------------------------------- /resources/keystone_role/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: keystone role 5 | #TODO: not implemented in module 6 | pause: seconds=1 7 | -------------------------------------------------------------------------------- /resources/keystone_user/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: keystone user 5 | #TODO: not implemented in module 6 | pause: seconds=1 7 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/actions/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | # Fake run action which is required in order to make 6 | # dependency `run` -> `provision` 7 | 8 | exit 0 9 | -------------------------------------------------------------------------------- /resources/docker_container/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - shell: docker stop {{ resource_name }} 6 | - shell: docker rm {{ resource_name }} 7 | -------------------------------------------------------------------------------- /resources/riak_node/actions/commit.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | # - shell: sleep 30 5 | - shell: riak-admin cluster plan 6 | - shell: riak-admin cluster commit 7 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | hacking<0.11,>=0.10.0 3 | pytest 4 | pytest-cov 5 | pytest-mock 6 | tox 7 | pytest-subunit 8 | os-testr 9 | 10 | # for computable inputs 11 | lupa 12 | -------------------------------------------------------------------------------- /solar/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | from gevent import monkey 3 | except ImportError: 4 | pass 5 | else: 6 | monkey.patch_all() 7 | from solar.dblayer.gevent_patches import patch_all 8 | patch_all() 9 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/with_location.yaml.tmpl: -------------------------------------------------------------------------------- 1 | id: simple_multinode 2 | resources: 3 | - id: base_reource 4 | location: node1 5 | from: {resource_path} 6 | values: 7 | ip: '10.0.0.3' 8 | -------------------------------------------------------------------------------- /resources/rabbitmq_user/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - rabbitmq_user: user={{user_name}} 5 | vhost={{vhost_name}} 6 | state=absent 7 | -------------------------------------------------------------------------------- /resources/hosts_file/meta.yaml: -------------------------------------------------------------------------------- 1 | id: hosts_file 2 | handler: ansible 3 | version: 1.0.0 4 | 5 | input: 6 | hosts: 7 | schema: [{name: str!, ip: str!}] 8 | value: [] 9 | 10 | tags: [resource=hosts_file] 11 | -------------------------------------------------------------------------------- /resources/ceph_mon/actions/test.pp: -------------------------------------------------------------------------------- 1 | prepare_network_config(hiera_hash('network_scheme')) 2 | $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') 3 | 4 | notify{"The value is: ${ceph_cluster_network}": } 5 | -------------------------------------------------------------------------------- /resources/lxc_host/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | roles: 4 | - { role: "lxc_hosts", tags: [ "lxc-host", "host-setup" ] } 5 | post_tasks: 6 | - shell: pip install git+https://github.com/lxc/python2-lxc.git#egg=lxc -------------------------------------------------------------------------------- /templates/sources.yaml: -------------------------------------------------------------------------------- 1 | id: sources 2 | resources: 3 | - id: sources{{index}} 4 | from: resources/sources 5 | location: {{node}} 6 | values: 7 | sources: 8 | - {src: /tmp/sources_test, dst: /tmp/sources_test} 9 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/hiera.yaml: -------------------------------------------------------------------------------- 1 | :backends: 2 | - yaml 3 | #- json 4 | :yaml: 5 | :datadir: /etc/puppet/hieradata 6 | :json: 7 | :datadir: /etc/puppet/hieradata 8 | :hierarchy: 9 | - "%{resource_name}" 10 | - resource 11 | -------------------------------------------------------------------------------- /resources/haproxy_service/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: haproxy container 5 | docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: absent 9 | -------------------------------------------------------------------------------- /examples/cli/README: -------------------------------------------------------------------------------- 1 | This example shows how to use solar via CLI. 2 | 3 | Usage 4 | ===== 5 | 6 | Run 7 | 8 | `bash ./example.sh` 9 | 10 | after this you can run `solar orch report last` and wait until all tasks have status SUCCESS. 11 | -------------------------------------------------------------------------------- /resources/ex_managed/managers/manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import json 5 | 6 | data = json.loads(sys.stdin.read()) 7 | 8 | rst = {'val_x_val': int(data['val'])**2} 9 | 10 | sys.stdout.write(json.dumps(rst)) 11 | -------------------------------------------------------------------------------- /resources/glance_api_service/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: glance api container 5 | docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: absent 9 | -------------------------------------------------------------------------------- /resources/ansible_local/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | sudo: yes 3 | vars: 4 | var1: 'playbook' 5 | roles: 6 | - { role: "test_role" } 7 | tasks: 8 | - debug: msg="VAR1 value is {{var1}}" 9 | - fail: msg='just test failure' -------------------------------------------------------------------------------- /resources/ansible_local/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ansible_sample 2 | handler: ansible_playbook 3 | version: 0.0.1 4 | input: 5 | var1: 6 | type: str! 7 | value: meta 8 | uuid: 9 | type: str! 10 | value: 'aa1das1231' 11 | 12 | -------------------------------------------------------------------------------- /resources/glance_registry_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | class {'glance::registry': 4 | enabled => false, 5 | package_ensure => 'absent', 6 | keystone_password => 'not important as removed' 7 | } 8 | -------------------------------------------------------------------------------- /resources/librarian/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - file: path={{modules_path}} state=absent 5 | - file: path={{modules_path}}/../Puppetfile state=absent 6 | - shell: gem uninstall librarian-puppet 7 | -------------------------------------------------------------------------------- /resources/nova_generic_service_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Nova generic service resource for puppet handler 2 | 3 | Setup and configure the Nova generic services. 4 | 5 | source https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/generic_service.pp 6 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/resource_with_dict.yaml.tmpl: -------------------------------------------------------------------------------- 1 | id: simple_resource_with_list 2 | resources: 3 | - id: res1 4 | from: {resource_path} 5 | values: 6 | ip: '10.0.0.3' 7 | servers: 8 | a: 1 9 | b: 2 10 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/resource_with_list.yaml.tmpl: -------------------------------------------------------------------------------- 1 | id: simple_resource_with_list 2 | resources: 3 | - id: res1 4 | from: {resource_path} 5 | values: 6 | ip: '10.0.0.3' 7 | servers: 8 | - 1 9 | - 2 10 | -------------------------------------------------------------------------------- /resources/glance_registry_service/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: glance registry container 5 | docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: absent 9 | -------------------------------------------------------------------------------- /resources/rabbitmq_service/actions/remove.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | class { '::rabbitmq': 4 | package_ensure => 'absent', 5 | environment_variables => { 6 | 'RABBITMQ_SERVICENAME' => 'RabbitMQ' 7 | } 8 | } 9 | 10 | -------------------------------------------------------------------------------- /templates/seed_node.yaml: -------------------------------------------------------------------------------- 1 | id: seed_node 2 | resources: 3 | - id: seed_node 4 | from: resources/ro_node 5 | values: 6 | ip: '10.0.0.2' 7 | ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key' 8 | ssh_user: 'vagrant' 9 | -------------------------------------------------------------------------------- /resources/ex_managed/meta.yaml: -------------------------------------------------------------------------------- 1 | id: managed 2 | handler: none 3 | version: 1.0.0 4 | managers: 5 | - managers/manager.py 6 | input: 7 | val: 8 | schema: int! 9 | value: 2 10 | val_x_val: 11 | schema: int 12 | value: 13 | -------------------------------------------------------------------------------- /resources/keystone_service/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | # TODO 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - name: keystone container 6 | docker: 7 | image: {{ image }} 8 | name: {{ resource_name }} 9 | state: absent 10 | -------------------------------------------------------------------------------- /resources/apache_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing apache_puppet') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], 80) 10 | 11 | ) 12 | -------------------------------------------------------------------------------- /resources/glance_config/templates/exports: -------------------------------------------------------------------------------- 1 | export OS_TENANT_NAME={{ keystone_admin_tenant }} 2 | export OS_USERNAME={{ keystone_admin_user }} 3 | export OS_PASSWORD={{ keystone_admin_password }} 4 | export OS_AUTH_URL=http://{{ keystone_ip }}:{{ keystone_admin_port }}/v2.0 -------------------------------------------------------------------------------- /resources/keystone_tenant/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: keystone tenant 5 | keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=absent 6 | -------------------------------------------------------------------------------- /resources/managed_apt/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - shell: echo 'Managed by solar' > /etc/apt/sources.list 5 | when: {{ensure_other_removed}} 6 | - shell: apt-get update 7 | when: {{ensure_other_removed}} 8 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/nodes.yaml.tmpl: -------------------------------------------------------------------------------- 1 | id: simple_multinode 2 | resources: 3 | - id: node1 4 | from: {resource_path} 5 | values: 6 | ip: '10.0.0.3' 7 | - id: node2 8 | from: {resource_path} 9 | values: 10 | ip: '10.0.0.4' 11 | -------------------------------------------------------------------------------- /resources/haproxy_service_config/README.md: -------------------------------------------------------------------------------- 1 | # `haproxy_service_config` resource 2 | 3 | This resource represents config for a single service handled by Haproxy. 4 | It connects into `haproxy_config`. It collects all services which are to 5 | be load-balanced by Haproxy. 6 | -------------------------------------------------------------------------------- /solar/computable_inputs/helpers/python_helpers.py: -------------------------------------------------------------------------------- 1 | def make_arr(data): 2 | t = {} 3 | for ov in data: 4 | if t.get(ov['resource']) is None: 5 | t[ov['resource']] = {} 6 | t[ov['resource']][ov['other_input']] = ov['value'] 7 | return t 8 | -------------------------------------------------------------------------------- /resources/cinder_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing cinder_puppet') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['port']) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/data_container/meta.yaml: -------------------------------------------------------------------------------- 1 | id: data_container 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | type: str! 7 | value: 8 | image: 9 | type: str! 10 | value: 11 | export_volumes: 12 | type: str! 13 | value: 14 | -------------------------------------------------------------------------------- /solar/test/resource_fixtures/base_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: base_service 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | servers: 9 | schema: [str] 10 | value: [] 11 | alias: 12 | schema: str 13 | value: 14 | -------------------------------------------------------------------------------- /resources/keystone_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing keystone_puppet') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['port']) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/keystone_service/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing keystone_service') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['port']) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/dnsmasq/actions/exclude_mac_pxe.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | 4 | tasks: 5 | - lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe" 6 | - shell: service dnsmasq restart 7 | -------------------------------------------------------------------------------- /resources/haproxy_service/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - apt: 5 | name: haproxy 6 | state: present 7 | - replace: 8 | dest: '/etc/default/haproxy' 9 | regexp: ENABLED=0 10 | replace: ENABLED=1 11 | -------------------------------------------------------------------------------- /resources/haproxy_service/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing haproxy_service') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['ports'][0][0]) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/transport_torrent/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - apt: 5 | name: python-libtorrent 6 | state: present 7 | - copy: 8 | src: {{scripts_dir}}/solar_torrent.py 9 | dest: /var/tmp/solar_torrent.py 10 | -------------------------------------------------------------------------------- /resources/cinder_api_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing cinder_api_puppet') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['service_port']) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/docker/actions/run.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - shell: docker --version 6 | ignore_errors: true 7 | register: docker_version 8 | - shell: curl -sSL https://get.docker.com/ | sudo sh 9 | when: docker_version|failed 10 | -------------------------------------------------------------------------------- /resources/haproxy_service/actions/install.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - apt: 5 | name: haproxy 6 | state: present 7 | - replace: 8 | dest: '/etc/default/haproxy' 9 | regexp: ENABLED=0 10 | replace: ENABLED=1 11 | -------------------------------------------------------------------------------- /resources/glance_registry_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing glance_registry_puppet') 8 | requests.get( 9 | 'http://%s:%s' % (resource.args['ip'], resource.args['bind_port']) 10 | ) 11 | -------------------------------------------------------------------------------- /resources/nova_conductor_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'nova::conductor': 2 | ensure_package => 'absent', 3 | enabled => false, 4 | } 5 | 6 | include nova::params 7 | 8 | package { 'nova-common': 9 | name => $nova::params::common_package_name, 10 | ensure => 'absent', 11 | } -------------------------------------------------------------------------------- /solar/test/orch_fixtures/simple.yaml: -------------------------------------------------------------------------------- 1 | name: simple 2 | tasks: 3 | - uid: echo_stuff 4 | parameters: 5 | type: echo 6 | args: [10] 7 | before: [just_fail] 8 | - uid: just_fail 9 | parameters: 10 | type: error 11 | args: ['message'] 12 | -------------------------------------------------------------------------------- /resources/glance_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | include glance::params 4 | 5 | class {'glance': 6 | package_ensure => 'absent', 7 | } 8 | 9 | package { [$glance::params::api_package_name, $::glance::params::package_name] : 10 | ensure => 'absent', 11 | } 12 | -------------------------------------------------------------------------------- /resources/keystone_puppet/README.md: -------------------------------------------------------------------------------- 1 | # `keystone_puppet` resource 2 | 3 | This resource implements inputs for the official OpenStack Keystone Puppet manifests 4 | from https://github.com/openstack/puppet-keystone (`stable/juno` branch). 5 | 6 | Basic tests are present that test HTTP connectivity to the service. 7 | -------------------------------------------------------------------------------- /resources/docker/meta.yaml: -------------------------------------------------------------------------------- 1 | id: docker 2 | handler: ansible 3 | version: 1.0.0 4 | 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_user: 10 | # schema: str! 11 | # value: 12 | # ssh_key: 13 | # schema: str! 14 | # value: 15 | 16 | tags: [resources/docker] 17 | -------------------------------------------------------------------------------- /resources/mariadb_service/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb container 5 | docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: absent 9 | - file: path=/var/lib/docker/data/{{resource_name}} state=absent 10 | -------------------------------------------------------------------------------- /resources/rabbitmq_config/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | vars: 4 | admin_user: {{admin_user}} 5 | admin_password: {{admin_password}} 6 | tasks: 7 | - file: path={{config_dir}} state=directory 8 | - template: src={{templates_dir}}/rabbitmq.conf dest={{config_dir}}/rabbitmq.conf 9 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # required for ease of development 4 | if [ -d /solar ]; then 5 | cd /solar && python setup.py develop 6 | fi 7 | 8 | #used only to start celery on docker 9 | ansible-playbook -v -i "localhost," -c local /celery.yaml --skip-tags slave,stop 10 | 11 | tail -f /var/run/celery/*.log 12 | -------------------------------------------------------------------------------- /resources/apt_repo/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - shell: rm -f {{item}} 5 | with_items: 6 | - /etc/apt/sources.list.d/{{name}}.list 7 | - /etc/apt/preferences.d/{{name}}.pref 8 | - shell: apt-get update 9 | when: {{validate_integrity}} 10 | -------------------------------------------------------------------------------- /resources/neutron_server_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::server': 2 | enabled => false, 3 | package_ensure => 'absent', 4 | auth_password => 'not important as removed', 5 | } 6 | 7 | # Remove external class dependency 8 | Service <| title == 'neutron-server' |> { 9 | require => undef 10 | } -------------------------------------------------------------------------------- /resources/remote_file/actions/run.sh: -------------------------------------------------------------------------------- 1 | mkdir -p {{dest}} 2 | 3 | {% for transport in remote %} 4 | {% if transport.name == 'ssh' %} 5 | scp -i {{transport.key}} -r {{transport.user}}@{{remote_ip}}:/{{remote_path}} {{dest}} 6 | exit 0 7 | {% endif %} 8 | {% endfor %} 9 | echo 'No suitable transport.' 10 | exit 2 11 | -------------------------------------------------------------------------------- /bootstrap/build-image.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | sudo: yes 5 | tasks: 6 | #- shell: vagrant init ubuntu/trusty64 7 | - shell: /usr/local/bin/packer build -only=qemu solar-master.json -var 'is_master=true' 8 | - shell: /usr/local/bin/packer build -only=virtualbox-iso solar-master.json -var 'is_master=true' 9 | -------------------------------------------------------------------------------- /resources/haproxy_service/README.md: -------------------------------------------------------------------------------- 1 | # `haproxy_service` resource 2 | 3 | This resource sets up a Docker container with Haproxy code. It requires 4 | config to be provided by the `haproxy_config` resource (mounted under 5 | `/etc/haproxy`). 6 | 7 | About container philosophy, see the `README.md` file in `keystone_service` 8 | resource. 9 | -------------------------------------------------------------------------------- /resources/haproxy_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: haproxy_service 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | # ssh_user: 9 | # schema: str! 10 | # value: 11 | # ssh_key: 12 | # schema: str! 13 | # value: 14 | 15 | tags: [resources=haproxy] 16 | -------------------------------------------------------------------------------- /resources/riak_node/actions/update.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - service: 5 | name: riak 6 | state: stopped 7 | - template: 8 | src: {{templates_dir}}/riak.conf 9 | dest: /etc/riak/riak.conf 10 | - service: 11 | name: riak 12 | state: reloaded 13 | -------------------------------------------------------------------------------- /bootstrap/playbooks/tasks/cloud_archive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - shell: sudo apt-get update 4 | - shell: sudo apt-get -y upgrade 5 | - shell: sudo apt-get -y install python-software-properties software-properties-common 6 | - shell: sudo add-apt-repository -y cloud-archive:juno 7 | - shell: sudo apt-get update 8 | - shell: sudo apt-get update --fix-missing 9 | -------------------------------------------------------------------------------- /resources/cinder_api_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class {'cinder::api': 2 | enabled => false, 3 | package_ensure => 'absent', 4 | keystone_password => 'not important as removed', 5 | } 6 | 7 | include cinder::params 8 | 9 | package { 'cinder': 10 | ensure => 'absent', 11 | name => $::cinder::params::package_name, 12 | } -------------------------------------------------------------------------------- /resources/dnsmasq/meta.yaml: -------------------------------------------------------------------------------- 1 | id: dnsmasq 2 | handler: ansible 3 | version: 1.0.0 4 | 5 | actions: 6 | exclude_mac_pxe: exclude_mac_pxe.yaml 7 | run: run.yaml 8 | 9 | input: 10 | ip: 11 | schema: str! 12 | value: 13 | 14 | exclude_mac_pxe: 15 | schema: str! 16 | value: 17 | 18 | tags: [resources=dnsmasq] 19 | -------------------------------------------------------------------------------- /resources/node_network_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing node_network_puppet') 8 | # requests.get( 9 | # 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value) 10 | # TODO(bogdando) figure out how to test this 11 | # ) 12 | -------------------------------------------------------------------------------- /resources/solar_bootstrap/meta.yaml: -------------------------------------------------------------------------------- 1 | handler: ansible 2 | id: 'solar_bootstrap' 3 | input: 4 | ip: 5 | schema: str! 6 | value: 7 | # ssh_key: 8 | # schema: str! 9 | # value: 10 | # ssh_user: 11 | # schema: str! 12 | # value: 13 | 14 | master_ip: 15 | schema: str! 16 | value: 17 | tags: [] 18 | version: 1.0.0 19 | -------------------------------------------------------------------------------- /.vagrantplugins: -------------------------------------------------------------------------------- 1 | required_plugins = %w(vagrant-vbguest) 2 | 3 | requires_restart = false 4 | 5 | required_plugins.each do |plugin| 6 | unless Vagrant.has_plugin? plugin 7 | system "vagrant plugin install #{plugin}" 8 | requires_restart = true 9 | end 10 | end 11 | 12 | if requires_restart 13 | exec "vagrant #{ARGV.join' '}" 14 | end 15 | -------------------------------------------------------------------------------- /resources/ansible_remote/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ansible_sample 2 | handler: ansible_playbook 3 | version: 0.0.1 4 | input: 5 | ip: 6 | type: str! 7 | value: 8 | # ssh_user: 9 | # type: str! 10 | # value: 11 | # ssh_key: 12 | # type: str! 13 | # value: 14 | default1: 15 | type: str! 16 | value: meta 17 | -------------------------------------------------------------------------------- /resources/mariadb_user/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb user 5 | mysql_user: 6 | name: {{user_name}} 7 | state: absent 8 | login_user: root 9 | login_password: {{login_password}} 10 | login_port: {{login_port}} 11 | login_host: {{db_host}} 12 | -------------------------------------------------------------------------------- /resources/openrc_file/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | vars: 4 | tenant: {{tenant}} 5 | user_name: {{user_name}} 6 | password: {{password}} 7 | keystone_host: {{keystone_host}} 8 | keystone_port: {{keystone_port}} 9 | tasks: 10 | - template: src={{templates_dir}}/openrc.template dest=/root/openrc 11 | -------------------------------------------------------------------------------- /resources/transports/meta.yaml: -------------------------------------------------------------------------------- 1 | id: transports 2 | input: 3 | transports: 4 | schema: [{user: str, password: str, port: int!, key: str, name: str!, trackers: [str]}] 5 | value: [] 6 | transports_id: 7 | schema: str! 8 | value: $uuid 9 | reverse: True 10 | location_id: 11 | schema: str 12 | value: 13 | reverse: True 14 | -------------------------------------------------------------------------------- /resources/keystone_tenant/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: install python-keystoneclient 5 | shell: apt-get install python-keystoneclient 6 | - name: keystone tenant 7 | keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=present 8 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/pxelinux.cfg: -------------------------------------------------------------------------------- 1 | default vesamenu.c32 2 | menu title Live CD Choices 3 | prompt 0 4 | timeout 3 5 | menu autoboot 6 | 7 | label ubuntu 8 | menu label Ubuntu 9 | kernel /ubuntu/linux 10 | append initrd=/ubuntu/initramfs.img verbose fetch=http://{{http_ip}}:{{http_port}}/ubuntu/root.squashfs ip=dhcp boot=live 11 | iappend 2 12 | -------------------------------------------------------------------------------- /bootstrap/packer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | sudo: yes 5 | tasks: 6 | - shell: wget 'https://dl.bintray.com/mitchellh/packer/packer_0.8.2_linux_amd64.zip' -O /tmp/packer-0.8.2.zip 7 | args: 8 | creates: /tmp/packer-0.8.2.zip 9 | - unarchive: 10 | src: /tmp/packer-0.8.2.zip 11 | dest: /usr/local/bin 12 | copy: no 13 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/dnsmasq_pxe.conf: -------------------------------------------------------------------------------- 1 | # Specify interface for dhcp server 2 | interface={{dhcp_interface}} 3 | bind-interfaces 4 | 5 | # Specify IP addresses range 6 | dhcp-range={{dhcp_range_start}},{{dhcp_range_end}},12h 7 | 8 | # Net boot file name 9 | dhcp-boot=net:!nopxe,pxelinux.0 10 | 11 | # Configure tftp 12 | enable-tftp 13 | tftp-root={{tftp_root}} 14 | -------------------------------------------------------------------------------- /solar/computable_inputs/helpers/lua_helpers.lua: -------------------------------------------------------------------------------- 1 | function make_arr(data) 2 | local t = {} 3 | for orig_value in python.iter(data) do 4 | if t[orig_value["resource"]] == nil then 5 | t[orig_value["resource"]] = {} 6 | end 7 | t[orig_value["resource"]][orig_value['other_input']] = orig_value['value'] 8 | end 9 | return t 10 | end 11 | -------------------------------------------------------------------------------- /utils/docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # should be executed from directory with required Dockerfile 5 | name_w_tags=$1 6 | 7 | if [[ -z "$name_w_tags" ]]; then 8 | name_w_tags='solarproject/solar-celery:latest' 9 | fi 10 | 11 | echo "Building image with name $name_w_tags" 12 | docker build -t "$name_w_tags" . 13 | docker push "$name_w_tags" 14 | 15 | -------------------------------------------------------------------------------- /resources/rabbitmq_service/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $port = "${resource['input']['port']}" 4 | $management_port = "${resource['input']['management_port']}" 5 | 6 | class { '::rabbitmq': 7 | service_manage => true, 8 | port => $port, 9 | management_port => $management_port, 10 | delete_guest_user => true, 11 | } 12 | -------------------------------------------------------------------------------- /resources/container_networks/meta.yaml: -------------------------------------------------------------------------------- 1 | id: container_networks 2 | handler: ansible_playbook 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | networks: 16 | schema: {} 17 | value: 18 | -------------------------------------------------------------------------------- /resources/hosts_file/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | {% for val in hosts %} 5 | - name: Create hosts entries for {{val['name']}} => {{val['ip']}} 6 | lineinfile: 7 | dest: /etc/hosts 8 | regexp: ".*{{val['name']}}$" 9 | line: "{{val['ip']}} {{val['name']}}" 10 | state: present 11 | {% endfor %} 12 | -------------------------------------------------------------------------------- /bootstrap/playbooks/build-main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Main build script 4 | hosts: all 5 | sudo: yes 6 | tasks: 7 | - include: tasks/base.yaml 8 | - include: tasks/puppet.yaml 9 | - include: tasks/docker.yaml 10 | #- include: celery.yaml tags=['master'] celery_dir=/var/run/celery 11 | - include: tasks/cloud_archive.yaml 12 | #- include: tasks/mos.yaml 13 | -------------------------------------------------------------------------------- /resources/hosts_file/actions/update.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | {% for val in hosts %} 5 | - name: Create hosts entries for {{val['name']}} => {{val['ip']}} 6 | lineinfile: 7 | dest: /etc/hosts 8 | regexp: ".*{{val['name']}}$" 9 | line: "{{val['ip']}} {{val['name']}}" 10 | state: present 11 | {% endfor %} 12 | -------------------------------------------------------------------------------- /resources/keystone_role/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: install python-keystoneclient 5 | shell: apt-get install python-keystoneclient 6 | - name: keystone role 7 | keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} user={{user_name}} tenant={{tenant_name}} role={{role_name}} state=present 8 | -------------------------------------------------------------------------------- /resources/transport_torrent/meta.yaml: -------------------------------------------------------------------------------- 1 | id: transport_torrent 2 | handler: ansible 3 | input: 4 | trackers: 5 | schema: [str!] 6 | value: [] 7 | name: 8 | schema: str! 9 | value: torrent 10 | location_id: 11 | schema: str 12 | value: 13 | reverse: True 14 | is_own: False 15 | transports_id: 16 | schema: str 17 | value: 18 | is_emit: False 19 | -------------------------------------------------------------------------------- /examples/openstack/README.md: -------------------------------------------------------------------------------- 1 | Deploying simple two node OpenStack env. 2 | 3 | You need to run it from main solar directory. To prepare resources run: 4 | 5 | `python examples/openstack/openstack.py create_all` 6 | 7 | Then to start deployment: 8 | 9 | `solar changes stage 10 | solar changes process 11 | solar orch run-once last` 12 | 13 | To see the progress: 14 | 15 | `solar orch report` 16 | -------------------------------------------------------------------------------- /resources/apt_repo/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - template: 5 | src: {{templates_dir}}/source 6 | dest: /etc/apt/sources.list.d/{{name}}.list 7 | - template: 8 | src: {{templates_dir}}/preferences 9 | dest: /etc/apt/preferences.d/{{name}}.pref 10 | - shell: apt-get update 11 | when: {{validate_integrity}} 12 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Cinder Volume resource for puppet handler 2 | 3 | Setup and configure the cinder volume service. 4 | Configure ISCSI volume backend for cinder volume nodes, if specified 5 | (Default true) 6 | 7 | source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume/iscsi.pp 8 | source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume.pp -------------------------------------------------------------------------------- /resources/managed_apt/meta.yaml: -------------------------------------------------------------------------------- 1 | # This resource will clean 2 | id: apt_repo_manager 3 | handler: ansible 4 | version: 1.0.0 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | repos: 10 | schema: [str!] 11 | value: 12 | names: 13 | schema: [str!] 14 | value: 15 | ensure_other_removed: 16 | schema: bool 17 | value: true 18 | -------------------------------------------------------------------------------- /resources/openrc_file/templates/openrc.template: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export LC_ALL=C 3 | export OS_NO_CACHE="true" 4 | export OS_TENANT_NAME={{tenant}} 5 | export OS_USERNAME={{user_name}} 6 | export OS_PASSWORD={{password}} 7 | export OS_AUTH_URL=http://{{keystone_host}}:{{keystone_port}}/v2.0 8 | export OS_AUTH_STRATEGY=keystone 9 | export OS_REGION_NAME='RegionOne' 10 | export OS_VOLUME_API_VERSION='2' -------------------------------------------------------------------------------- /resources/riak_join_single/meta.yaml: -------------------------------------------------------------------------------- 1 | id: riak_join_single 2 | handler: ansible 3 | version: 1.0.0 4 | actions: 5 | join: actions/join.yaml 6 | input: 7 | join_to: 8 | schema: str! 9 | value: 10 | ip: 11 | schema: str! 12 | value: 13 | # ssh_key: 14 | # schema: str! 15 | # value: 16 | # ssh_user: 17 | # schema: str! 18 | # value: 19 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/sequential.yaml: -------------------------------------------------------------------------------- 1 | name: seq 2 | tasks: 3 | - uid: s1 4 | parameters: 5 | type: sleep 6 | args: [2] 7 | target: 1 8 | - uid: s2 9 | 10 | parameters: 11 | type: sleep 12 | args: [2] 13 | target: 1 14 | - uid: s3 15 | parameters: 16 | type: sleep 17 | args: [2] 18 | target: 1 19 | -------------------------------------------------------------------------------- /resources/ceph_keys/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ceph_keys 2 | handler: shell 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | target_directory: 9 | schema: str! 10 | value: /var/lib/astute/ 11 | key_name: 12 | schema: str! 13 | value: ceph 14 | path: 15 | schema: str! 16 | value: /var/lib/astute/ceph/ 17 | tags: [] 18 | -------------------------------------------------------------------------------- /resources/node_network_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Node network resource for puppet handler 2 | 3 | Setup and configure L23 networking for a node. 4 | Leverages the powerful network_scheme structures to 5 | create all required networking entities like interfaces, 6 | bridges, bonds - both linux and ovs based. 7 | Defaults are given for Debian OS family. 8 | 9 | source https://github.com/xenolog/l23network 10 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/ubuntu-ansible.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # TODO: maybe this is better: 4 | # http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu 5 | 6 | sudo apt-get remove -f python-pip 7 | sudo apt-get update 8 | sudo apt-get install -y python-setuptools python-dev autoconf g++ 9 | sudo easy_install pip 10 | sudo pip install -U pip 11 | sudo pip install ansible 12 | -------------------------------------------------------------------------------- /examples/librarian/README: -------------------------------------------------------------------------------- 1 | Usage 2 | ===== 3 | 4 | Run it from /vagrant dir: 5 | 6 | ``` 7 | solar resource clear_all 8 | solar resource create nodes templates/nodes.yaml '{"count": 1}' 9 | solar resource create librarian_example examples/librarian/librarian.yaml '{"node": "node1"}' 10 | 11 | solar changes stage 12 | solar changes process 13 | solar orch run-once 14 | solar orch report -w 100 15 | ``` 16 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing cinder_volume_puppet') 8 | # requests.get( 9 | # 'http://%s:%s' % (resource.args['ip'], resource.args['port']) 10 | # TODO(bogdando) figure out how to test this 11 | # http://docs.openstack.org/developer/nova/devref/volume.html 12 | # ) 13 | -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | 5 | 6 | def test(resource): 7 | log.debug('Testing cinder_scheduler_puppet') 8 | # requests.get( 9 | # 'http://%s:%s' % (resource.args['ip'], resource.args['port']) 10 | # TODO(bogdando) figure out how to test this 11 | # http://docs.openstack.org/developer/nova/devref/scheduler.html 12 | # ) 13 | -------------------------------------------------------------------------------- /resources/keystone_user/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: install python-keystoneclient 5 | shell: apt-get install python-keystoneclient 6 | - name: keystone user 7 | keystone_user: endpoint=http://{{ keystone_host }}:{{ keystone_port }}/v2.0/ token={{ admin_token }} user={{ user_name }} password={{ user_password }} tenant={{ tenant_name }} state=present 8 | -------------------------------------------------------------------------------- /resources/librarian/meta.yaml: -------------------------------------------------------------------------------- 1 | id: librarian 2 | handler: ansible 3 | version: 0.0.1 4 | actions: 5 | run: run.yaml 6 | update: run.yaml 7 | remove: remove.yaml 8 | input: 9 | modules: 10 | schema: [{}] 11 | value: [] 12 | modules_path: 13 | schema: str! 14 | value: /etc/puppet/modules 15 | forge: 16 | schema: str! 17 | value: https://forgeapi.puppetlabs.com 18 | -------------------------------------------------------------------------------- /resources/mariadb_db/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb db 5 | mysql_db: 6 | name: {{db_name}} 7 | state: absent 8 | login_user: root 9 | login_password: {{login_password}} 10 | login_port: {{login_port}} 11 | login_host: {{db_host}} 12 | collation: {{collation}} 13 | encoding: {{encoding}} 14 | -------------------------------------------------------------------------------- /resources/mariadb_db/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb db 5 | mysql_db: 6 | name: {{ db_name }} 7 | state: present 8 | login_user: root 9 | login_password: {{ login_password }} 10 | login_port: {{ login_port }} 11 | login_host: {{db_host}} 12 | collation: {{collation}} 13 | encoding: {{encoding}} 14 | -------------------------------------------------------------------------------- /resources/neutron_agents_l3_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::agents::l3': 2 | package_ensure => 'absent', 3 | enabled => false, 4 | } 5 | 6 | include neutron::params 7 | 8 | package { 'neutron': 9 | ensure => 'absent', 10 | name => $::neutron::params::package_name, 11 | } 12 | 13 | # Remove external class dependency 14 | Service <| title == 'neutron-l3' |> { 15 | require => undef 16 | } -------------------------------------------------------------------------------- /resources/rabbitmq_vhost/meta.yaml: -------------------------------------------------------------------------------- 1 | id: rabbitmq_vhost 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | vhost_name: 6 | schema: str! 7 | value: openstack 8 | ip: 9 | schema: str! 10 | value: 11 | # ssh_key: 12 | # schema: str! 13 | # value: 14 | # ssh_user: 15 | # schema: str! 16 | # value: 17 | 18 | tags: [resources/rabbitmq, resource/rabbitmq_vhost] 19 | -------------------------------------------------------------------------------- /resources/remote_file/meta.yaml: -------------------------------------------------------------------------------- 1 | id: remote_file 2 | handler: shell 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | remote: 9 | schema: {} 10 | value: 11 | remote_ip: 12 | schema: str! 13 | value: 14 | remote_path: 15 | schema: str! 16 | value: 17 | dest: 18 | schema: str! 19 | value: 20 | tags: [] 21 | -------------------------------------------------------------------------------- /resources/rabbitmq_user/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - rabbitmq_user: user={{user_name}} 5 | password={{password}} 6 | vhost={{vhost_name}} 7 | configure_priv=.* 8 | read_priv=.* 9 | write_priv=.* 10 | tags={{tags}} 11 | state=present 12 | -------------------------------------------------------------------------------- /resources/neutron_agents_dhcp_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::agents::dhcp': 2 | package_ensure => 'absent', 3 | enabled => false, 4 | } 5 | 6 | include neutron::params 7 | 8 | package { 'neutron': 9 | ensure => 'absent', 10 | name => $::neutron::params::package_name, 11 | } 12 | 13 | # Remove external class dependency 14 | Service <| title == 'neutron-dhcp-service' |> { 15 | require => undef 16 | } -------------------------------------------------------------------------------- /resources/neutron_agents_metadata_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'neutron::agents::metadata': 2 | package_ensure => 'absent', 3 | enabled => false, 4 | } 5 | 6 | include neutron::params 7 | 8 | package { 'neutron': 9 | ensure => 'absent', 10 | name => $::neutron::params::package_name, 11 | } 12 | 13 | # Remove external class dependency 14 | Service <| title == 'neutron-metadata' |> { 15 | require => undef 16 | } -------------------------------------------------------------------------------- /examples/provisioning/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 6 | 7 | # Remove generated pxe exclude files 8 | sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf 9 | sudo service dnsmasq restart 10 | 11 | solar resource clear_all 12 | python "${DIR}"/provision.py 13 | 14 | solar changes stage 15 | solar changes process 16 | solar orch run-once last 17 | watch --color -n1 'solar orch report last' 18 | -------------------------------------------------------------------------------- /examples/bootstrap/README.md: -------------------------------------------------------------------------------- 1 | # Demo of the `solar_bootstrap` Resource 2 | 3 | You need to instantiate Vagrant with a slave node which is unprovisioned 4 | (i.e. started from the `trusty64` Vagrant box). 5 | 6 | You can start the boxes from the `Vagrantfile` in master directory and 7 | `vagrant-settings.yml` from this directory. 8 | 9 | Running 10 | ```bash 11 | python example-bootstrap.py deploy 12 | ``` 13 | will deploy full Solar env to node `solar-dev2`. 14 | -------------------------------------------------------------------------------- /resources/mariadb_user/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb user 5 | mysql_user: 6 | name: {{ user_name }} 7 | password: {{ user_password }} 8 | priv: {{ db_name }}.*:ALL 9 | host: '%' 10 | state: present 11 | login_user: root 12 | login_password: {{ login_password }} 13 | login_port: {{ login_port }} 14 | login_host: {{db_host}} 15 | -------------------------------------------------------------------------------- /resources/transport_ssh/meta.yaml: -------------------------------------------------------------------------------- 1 | id: transport_ssh 2 | input: 3 | ssh_key: 4 | schema: str! 5 | value: 6 | ssh_user: 7 | schema: str! 8 | value: 9 | ssh_port: 10 | schema: int! 11 | value: 22 12 | name: 13 | schema: str! 14 | value: ssh 15 | location_id: 16 | schema: str 17 | value: 18 | reverse: True 19 | is_own: False 20 | transports_id: 21 | schema: str 22 | value: 23 | is_emit: False 24 | -------------------------------------------------------------------------------- /resources/ceph_keys/actions/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | BASE_PATH={{ target_directory }} 4 | KEY_NAME={{ key_name }} 5 | 6 | function generate_ssh_keys { 7 | local dir_path=$BASE_PATH$KEY_NAME/ 8 | local key_path=$dir_path$KEY_NAME 9 | mkdir -p $dir_path 10 | if [ ! -f $key_path ]; then 11 | ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1 12 | else 13 | echo 'Key $key_path already exists' 14 | fi 15 | } 16 | 17 | generate_ssh_keys 18 | -------------------------------------------------------------------------------- /resources/librarian/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | # XXX: check apt package http://packages.ubuntu.com/trusty/ruby/librarian-puppet 5 | - shell: gem install librarian-puppet --no-ri --no-rdoc 6 | - file: path={{modules_path}} state=directory 7 | - template: 8 | src={{templates_dir}}/Puppetfile 9 | dest={{modules_path}}/../Puppetfile 10 | - shell: librarian-puppet install chdir={{modules_path}} 11 | -------------------------------------------------------------------------------- /resources/nova_api_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'nova::api': 2 | ensure_package => 'absent', 3 | enabled => false, 4 | admin_password => 'not important as removed' 5 | } 6 | 7 | include nova::params 8 | 9 | exec { 'post-nova_config': 10 | command => '/bin/echo "Nova config has changed"', 11 | refreshonly => true, 12 | } 13 | 14 | package { 'nova-common': 15 | name => $nova::params::common_package_name, 16 | ensure => 'absent', 17 | } 18 | -------------------------------------------------------------------------------- /resources/ro_node/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ro_node 2 | handler: none 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | name: 16 | schema: str 17 | value: a node 18 | location_id: 19 | schema: str! 20 | value: $uuid 21 | reverse: True 22 | 23 | tags: [resources=node] 24 | -------------------------------------------------------------------------------- /resources/vxlan_mesh/meta.yaml: -------------------------------------------------------------------------------- 1 | id: vxlan_mesh 2 | handler: ansible_playbook 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | parent: 16 | schema: str! 17 | value: 18 | master: 19 | schema: str! 20 | value: 21 | id: 22 | schema: int! 23 | value: 24 | -------------------------------------------------------------------------------- /resources/rabbitmq_config/meta.yaml: -------------------------------------------------------------------------------- 1 | id: rabbitmq_config 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | config_dir: 6 | schema: str! 7 | value: 8 | admin_name: 9 | schema: str! 10 | value: 11 | admin_password: 12 | schema: str! 13 | value: 14 | ip: 15 | schema: str! 16 | value: 17 | # ssh_key: 18 | # schema: str! 19 | # value: 20 | # ssh_user: 21 | # schema: str! 22 | # value: 23 | -------------------------------------------------------------------------------- /resources/fuel_library/actions/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p {{temp_directory}} 4 | 5 | pushd {{temp_directory}} 6 | if [ ! -d fuel-library ] 7 | then 8 | git clone -b {{ git['branch'] }} {{ git['repository'] }} 9 | else 10 | pushd ./fuel-library 11 | git pull 12 | popd 13 | fi 14 | pushd ./fuel-library/deployment 15 | ./update_modules.sh 16 | popd 17 | 18 | mkdir -p {{puppet_modules}} 19 | cp -r ./fuel-library/deployment/puppet/* {{puppet_modules}} 20 | popd 21 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | if [[ $UPDATE =~ true || $UPDATE =~ 1 || $UPDATE =~ yes ]]; then 4 | echo "==> Updating list of repositories" 5 | # apt-get update does not actually perform updates, it just downloads and indexes the list of packages 6 | apt-get -y update 7 | apt-get -y upgrade 8 | echo "==> Performing dist-upgrade (all packages and kernel)" 9 | apt-get -y dist-upgrade --force-yes 10 | reboot 11 | sleep 160 12 | fi 13 | -------------------------------------------------------------------------------- /resources/ssh_key/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | gather_facts: false 4 | # this is default variables, they will be overwritten by resource one 5 | vars: 6 | keys_dir: /vagrant/.ssh 7 | private_key: /vagrant/.ssh/id_rsa 8 | passphrase: '' 9 | tasks: 10 | - shell: mkdir -p {{keys_dir}} 11 | - stat: path={{private_key}} 12 | register: key 13 | - shell: ssh-keygen -t rsa -f {{private_key}} -N "" 14 | when: key.stat.exists == False 15 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/README.md: -------------------------------------------------------------------------------- 1 | # Orchestration fixtures 2 | Current fixtures later will be used for functional tests 3 | 4 | * Create plan from fixture 5 | ``` 6 | solar o create solar/solar/test/orch_fixtures/simple.yaml 7 | simple:ebd342cb-b770-4795-9f4c-04cb41c81169 8 | ``` 9 | 10 | * Run this plan 11 | ``` 12 | solar o run-once simple:ebd342cb-b770-4795-9f4c-04cb41c81169 13 | ``` 14 | 15 | * Report progress 16 | ``` 17 | solar o report simple:ebd342cb-b770-4795-9f4c-04cb41c81169 18 | ``` -------------------------------------------------------------------------------- /resources/mariadb_user/actions/update.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: mariadb user 5 | mysql_user: 6 | name: {{ user_name }} 7 | password: {{ user_password }} 8 | priv: {{ db_name }}.*:ALL 9 | host: '%' 10 | state: present 11 | update_password: always 12 | login_user: root 13 | login_password: {{ login_password }} 14 | login_port: {{ login_port }} 15 | login_host: {{db_host}} 16 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/templates/cloud-init-templates/meta-data_centos.jinja2: -------------------------------------------------------------------------------- 1 | # instance-id will be autogenerated 2 | # instance-id: iid-abcdefg 3 | #network-interfaces: | 4 | # auto {{ common.admin_iface_name|default("eth0") }} 5 | # iface {{ common.admin_iface_name|default("eth0") }} inet static 6 | # address {{ common.admin_ip }} 7 | # # network 192.168.1.0 8 | # netmask {{ common.admin_mask }} 9 | # # broadcast 192.168.1.255 10 | # # gateway 192.168.1.254 11 | hostname: {{ common.hostname }} 12 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/templates/cloud-init-templates/meta-data_ubuntu.jinja2: -------------------------------------------------------------------------------- 1 | # instance-id will be autogenerated 2 | # instance-id: iid-abcdefg 3 | #network-interfaces: | 4 | # auto {{ common.admin_iface_name|default("eth0") }} 5 | # iface {{ common.admin_iface_name|default("eth0") }} inet static 6 | # address {{ common.admin_ip }} 7 | # # network 192.168.1.0 8 | # netmask {{ common.admin_mask }} 9 | # # broadcast 192.168.1.255 10 | # # gateway 192.168.1.254 11 | hostname: {{ common.hostname }} 12 | -------------------------------------------------------------------------------- /resources/vxlan_mesh/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | vars: 4 | id: 42 5 | group: 239.1.10.2 6 | parent: eth1 7 | master: br-test0 8 | tasks: 9 | - name: add vxlan mesh 10 | shell: ip l add vxlan{{id}} type vxlan id {{id}} 11 | group {{group}} dev {{parent}} 12 | ignore_errors: true 13 | - name: set vxlan master 14 | shell: ip l set vxlan{{id}} master {{master}} 15 | - name: set vxlan tunnel up 16 | shell: ip l set vxlan{{id}} up 17 | -------------------------------------------------------------------------------- /config.yaml: -------------------------------------------------------------------------------- 1 | clients-data-file: /tmp/connections.yaml 2 | 3 | tmp: /tmp 4 | 5 | examples-dir: /vagrant/examples 6 | 7 | extensions-dir: /vagrant/solar/solar/extensions 8 | 9 | file-system-db: 10 | storage-path: /tmp/storage 11 | 12 | template-dir: /vagrant/templates 13 | 14 | resources-directory: /tmp/git 15 | resources-files-mask: /vagrant/resources/*/*.yaml 16 | resources-compiled-file: /vagrant/resources_compiled.py 17 | node_resource_template: /vagrant/resources/ro_node/ 18 | 19 | state: /tmp/state/ 20 | 21 | -------------------------------------------------------------------------------- /examples/cli/example.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | function deploy { 5 | # this two commands will clean db 6 | solar resource clear_all 7 | 8 | solar resource create nodes templates/nodes.yaml '{"count": 1}' 9 | solar resource create mariadb1 /vagrant/resources/mariadb_service image=mariadb port=3306 10 | solar connect node1 mariadb1 11 | 12 | solar changes stage 13 | solar changes process 14 | solar orch run-once last 15 | solar orch report last 16 | } 17 | 18 | deploy 19 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/actions/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 5 | 6 | # TODO should be a way to render configs, in order to do this 7 | # we should have scripts dir variable passed from above 8 | sed -i "s||${DIR}|" "${DIR}"/templates/agent.config 9 | 10 | provision --log-file /tmp/fa_provision.log -d --data_driver nailgun_simple --input_data_file "${DIR}"/templates/provisioning.json --config-file "${DIR}"/templates/agent.config 11 | -------------------------------------------------------------------------------- /resources/solar_bootstrap/actions/run.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # TODO: this shouldn't be outside of the resource directory 4 | - hosts: all 5 | sudo: yes 6 | tasks: 7 | - script: /vagrant/bootstrap/playbooks/files/ubuntu-ansible.sh 8 | #- include: celery.yaml tags=['master'] celery_dir=/var/run/celery 9 | - include: /vagrant/bootstrap/playbooks/build-main.yaml 10 | - include: /vagrant/bootstrap/playbooks/custom-configs.yaml master_ip={{ master_ip }} 11 | - include: /vagrant/bootstrap/playbooks/celery.yaml tags=slave 12 | -------------------------------------------------------------------------------- /resources/fuel_library/meta.yaml: -------------------------------------------------------------------------------- 1 | id: fuel_library 2 | handler: shell 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | git: 9 | schema: {repository: str!, branch: str!} 10 | value: {repository: 'https://github.com/stackforge/fuel-library', 11 | branch: 'stable/7.0'} 12 | temp_directory: 13 | schema: str! 14 | value: /tmp/solar 15 | puppet_modules: 16 | schema: str! 17 | value: /etc/fuel/modules 18 | tags: [] 19 | -------------------------------------------------------------------------------- /resources/nova_compute_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | class { 'nova::compute': 2 | ensure_package => 'absent', 3 | enabled => false, 4 | } 5 | 6 | include nova::params 7 | 8 | exec { 'post-nova_config': 9 | command => '/bin/echo "Nova config has changed"', 10 | refreshonly => true, 11 | } 12 | 13 | exec { 'networking-refresh': 14 | command => '/sbin/ifdown -a ; /sbin/ifup -a', 15 | } 16 | 17 | package { 'nova-common': 18 | name => $nova::params::common_package_name, 19 | ensure => 'absent', 20 | } -------------------------------------------------------------------------------- /resources/apt_repo/meta.yaml: -------------------------------------------------------------------------------- 1 | id: apt_repo 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | repo: 9 | schema: str! 10 | value: 11 | name: 12 | schema: str! 13 | value: 14 | package: 15 | schema: str 16 | value: '*' 17 | pin: 18 | schema: str 19 | value: 20 | pin_priority: 21 | schema: int 22 | value: 23 | validate_integrity: 24 | schema: bool 25 | value: true 26 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | six>=1.9.0 2 | ply 3 | click==4.0 4 | jinja2==2.7.3 5 | networkx>=1.10 6 | PyYAML 7 | jsonschema==2.4.0 8 | requests 9 | dictdiffer==0.4.0 10 | enum34==1.0.4 11 | redis==2.10.3 12 | fakeredis 13 | inflection 14 | Fabric==1.10.2 15 | tabulate==0.7.5 16 | ansible 17 | celery 18 | mock 19 | multipledispatch==0.4.8 20 | pbr 21 | pydot 22 | bunch 23 | # if you want to use riak backend then 24 | riak 25 | # if you want to use sql backend then 26 | # peewee 27 | 28 | 29 | # if you want to use lua computable inputs 30 | # lupa 31 | -------------------------------------------------------------------------------- /resources/ssh_key/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ssh_key 2 | handler: ansible_playbook 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | keys_dir: 16 | schema: str! 17 | value: 18 | private_key: 19 | schema: str! 20 | value: 21 | public_key: 22 | schema: str! 23 | value: 24 | passphrase: 25 | schema: str 26 | value: 27 | -------------------------------------------------------------------------------- /resources/keystone_user/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | from solar.core import validation 5 | 6 | 7 | def test(resource): 8 | log.debug('Testing keystone_user %s', resource.args['user_name']) 9 | 10 | args = resource.args 11 | 12 | token, _ = validation.validate_token( 13 | keystone_host=args['keystone_host'], 14 | keystone_port=args['keystone_port'], 15 | user=args['user_name'], 16 | tenant=args['tenant_name'], 17 | password=args['user_password'], 18 | ) 19 | -------------------------------------------------------------------------------- /resources/haproxy_service_config/meta.yaml: -------------------------------------------------------------------------------- 1 | id: haproxy_service_config 2 | handler: none 3 | version: 1.0.0 4 | input: 5 | name: 6 | schema: str! 7 | value: general_haproxy 8 | backends: 9 | schema: [{server: str!, port: int!}] 10 | listen_port: 11 | schema: int! 12 | value: 9999 13 | protocol: 14 | schema: str! 15 | value: http 16 | # ports: 17 | # schema: [int] 18 | # value: [] 19 | # servers: 20 | # schema: [str] 21 | # value: [] 22 | 23 | tags: [resources=haproxy] 24 | -------------------------------------------------------------------------------- /resources/keystone_service_endpoint/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - name: keystone service and endpoint 5 | keystone_service: 6 | token: {{admin_token}} 7 | name: {{endpoint_name}} 8 | type: {{type}} 9 | description: {{description}} 10 | publicurl: {{publicurl}} 11 | internalurl: {{internalurl}} 12 | adminurl: {{adminurl}} 13 | region: "RegionOne" 14 | state: present 15 | endpoint: http://{{keystone_host}}:{{keystone_admin_port}}/v2.0/ 16 | -------------------------------------------------------------------------------- /resources/volume_group/meta.yaml: -------------------------------------------------------------------------------- 1 | id: volume_group 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | volume_name: 6 | schema: str! 7 | value: 8 | path: 9 | schema: str! 10 | value: 11 | 12 | # not used, for now all VGs are file based 13 | type: 14 | schema: str! 15 | value: 'file' 16 | 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resource/volume_group] 28 | -------------------------------------------------------------------------------- /examples/librarian/librarian.yaml: -------------------------------------------------------------------------------- 1 | id: librarian_examples 2 | 3 | resources: 4 | - id: rabbitmq_service1 5 | from: resources/rabbitmq_service 6 | location: {{node}} 7 | values: 8 | management_port: 15672 9 | port: 5672 10 | 11 | - id: librarian 12 | location: {{node}} 13 | from: resources/librarian 14 | values: 15 | modules: 16 | - rabbitmq_service1::module::NO_EVENTS 17 | 18 | events: 19 | - type: depends_on 20 | parent_action: librarian.run 21 | state: success 22 | depend_action: rabbitmq_service1.run 23 | -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $scheduler_driver = $resource['input']['scheduler_driver'] 4 | $package_ensure = $resource['input']['package_ensure'] 5 | 6 | include cinder::params 7 | 8 | package { 'cinder': 9 | ensure => $package_ensure, 10 | name => $::cinder::params::package_name, 11 | } -> 12 | 13 | class {'cinder::scheduler': 14 | scheduler_driver => $scheduler_driver, 15 | package_ensure => $package_ensure, 16 | enabled => true, 17 | manage_service => true, 18 | } 19 | -------------------------------------------------------------------------------- /resources/keystone_service/README.md: -------------------------------------------------------------------------------- 1 | # `keystone_service` resource 2 | 3 | This resource sets up a Docker container with Keystone code. It requires 4 | config to be provided by the `keystone_config` resource (mounted under 5 | `/etc/keystone`). 6 | 7 | Basically, the philosophy behind containers in Solar is to have stateless 8 | containers with service code and mount stateful resources with config, 9 | volumes, etc. to that container. Upgrade of code then would be just about 10 | replacing the stateless container with new one and remounting state to that 11 | new container. 12 | -------------------------------------------------------------------------------- /templates/keystone_api.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_api_{{idx}} 2 | 3 | resources: 4 | - id: keystone_service_{{idx}} 5 | from: resources/keystone_puppet 6 | values: 7 | admin_token: '{{admin_token}}' 8 | db_host: '{{db_host}}' 9 | db_name: '{{db_name}}' 10 | db_user: '{{db_user}}' 11 | db_password: '{{db_password}}' 12 | 13 | admin_port: {{admin_port}} 14 | port: {{port}} 15 | ip: '{{ip}}' 16 | ssh_user: '{{ssh_user}}' 17 | ssh_key: '{{ssh_key}}' 18 | 19 | 20 | tags: ['resources/keystone', 'resource/keystone_api'] 21 | -------------------------------------------------------------------------------- /resources/volume_group/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - name: remove VG 5 | lvg: vg={{name}} state=absent force=yes 6 | - name: find loop device 7 | shell: losetup -a|grep "{{path}}"|awk -F':' '{print $1}' 8 | register: loop_device 9 | - name: if loop device exists, delete it 10 | command: sudo losetup -d {% raw %}{{item}}{% endraw %} 11 | when: loop_device|success 12 | with_items: loop_device.stdout_lines 13 | - name: remove file 14 | file: path={{path}} state=absent 15 | 16 | 17 | -------------------------------------------------------------------------------- /resources/haproxy_config/meta.yaml: -------------------------------------------------------------------------------- 1 | id: haproxy_config 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | config_dir: 9 | schema: {src: str!, dst: str!} 10 | value: {src: /etc/solar/haproxy, dst: /etc/haproxy} 11 | config: 12 | schema: [{backends: [{server: str!, port: int!}], listen_port: int!, protocol: str!, name: str!}] 13 | value: [{}] 14 | # ssh_user: 15 | # schema: str! 16 | # value: 17 | # ssh_key: 18 | # schema: str! 19 | # value: 20 | 21 | tags: [resources=haproxy] 22 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 2.7 3 | sudo: false 4 | env: 5 | - PIP_ACCEL_CACHE=$HOME/.pip-accel-cache SOLAR_CONFIG=$TRAVIS_BUILD_DIR/.config SOLAR_SOLAR_DB_HOST=localhost 6 | cache: 7 | directories: 8 | - $HOME/.pip-accel-cache 9 | install: 10 | - pip install pip-accel 11 | - pip-accel install coveralls 12 | - pip-accel install -r test-requirements.txt 13 | script: 14 | - tox -e pep8 && py.test --cov=solar -s solar 15 | services: 16 | - riak 17 | after_success: 18 | coveralls 19 | addons: 20 | apt: 21 | packages: 22 | - libluajit-5.1-dev 23 | -------------------------------------------------------------------------------- /resources/haproxy_config/README.md: -------------------------------------------------------------------------------- 1 | # `haproxy_config` resource 2 | 3 | This resource represents configuration for the `haproxy_service` resource. 4 | Each service represented by Haproxy is connected to this resource via 5 | `haproxy_service_config` resource. This is because in Haproxy there is no 6 | support for something like `/etc/haproxy/conf.d` directory where you put 7 | each config in a separate file, but instead you must collect all configuration 8 | in one file. 9 | 10 | So this resource renders this file from data provided by collecting individual 11 | `haproxy_service_config` data. 12 | -------------------------------------------------------------------------------- /resources/openrc_file/meta.yaml: -------------------------------------------------------------------------------- 1 | id: nova_config 2 | handler: ansible 3 | version: 1.0.0 4 | 5 | input: 6 | keystone_host: 7 | schema: str! 8 | value: 9 | keystone_port: 10 | schema: int! 11 | value: 12 | tenant: 13 | schema: str! 14 | value: 15 | user_name: 16 | schema: str! 17 | value: 18 | password: 19 | schema: str! 20 | value: 21 | ip: 22 | schema: str! 23 | value: 24 | # ssh_key: 25 | # schema: str! 26 | # value: 27 | # ssh_user: 28 | # schema: str! 29 | # value: 30 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/two_path.yaml: -------------------------------------------------------------------------------- 1 | name: two_path 2 | tasks: 3 | - uid: a 4 | parameters: 5 | type: echo 6 | args: [a] 7 | - uid: b 8 | parameters: 9 | type: echo 10 | args: [b] 11 | after: [a] 12 | 13 | 14 | - uid: c 15 | parameters: 16 | type: echo 17 | args: [c] 18 | - uid: d 19 | parameters: 20 | type: echo 21 | args: [d] 22 | after: [c] 23 | 24 | - uid: e 25 | parameters: 26 | type: echo 27 | args: [e] 28 | after: [b,d] 29 | -------------------------------------------------------------------------------- /resources/transport_solar_agent/meta.yaml: -------------------------------------------------------------------------------- 1 | id: transport_solar_agent 2 | handler: ansible 3 | input: 4 | solar_agent_user: 5 | schema: str! 6 | value: 7 | solar_agent_password: 8 | schema: str! 9 | value: 10 | # solar_agent_transport_class: 11 | # schema: str! 12 | # value: 13 | solar_agent_port: 14 | schema: int! 15 | value: 5555 16 | name: 17 | schema: str! 18 | value: solar_agent 19 | location_id: 20 | schema: str 21 | value: 22 | reverse: True 23 | is_own: False 24 | transports_id: 25 | schema: str 26 | is_emit: False 27 | -------------------------------------------------------------------------------- /bootstrap/playbooks/tasks/puppet.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Puppet 4 | - shell: sudo wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb -O /root/puppetlabs-release-trusty.deb 5 | - shell: sudo dpkg -i /root/puppetlabs-release-trusty.deb 6 | - shell: sudo apt-get update 7 | 8 | - apt: name=puppet state=present 9 | - template: src=files/hiera.yaml dest=/etc/puppet/hiera.yaml 10 | - file: path=/etc/puppet/hieradata state=directory 11 | # Make paths puppet 4 compatible 12 | - file: path=/etc/puppetlabs/code/ state=directory 13 | - file: src=/etc/puppet/hiera.yaml dest=/etc/puppetlabs/code/hiera.yaml state=link 14 | -------------------------------------------------------------------------------- /examples/openstack/rabbitmq_user.yaml: -------------------------------------------------------------------------------- 1 | id: primary_controller 2 | 3 | resources: 4 | - id: rabbit_user 5 | from: resources/rabbitmq_user 6 | location: {{node}} 7 | values: 8 | user_name: {{user_name}} 9 | password: {{password}} 10 | vhost_name: {{vhost_res}}::vhost_name 11 | 12 | updates: 13 | - id: {{for}} 14 | values: 15 | {{for_user}}: rabbit_user::user_name 16 | {{for_password}}: rabbit_user::password 17 | 18 | events: 19 | - type: depends_on 20 | parent_action: rabbit_user.run 21 | state: success 22 | depend_action: {{for}}.update 23 | -------------------------------------------------------------------------------- /resources/keystone_tenant/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_tenant 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | keystone_host: 6 | schema: str! 7 | value: 8 | keystone_port: 9 | schema: int! 10 | value: 11 | admin_token: 12 | schema: str! 13 | value: 14 | tenant_name: 15 | schema: str! 16 | value: admin 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resource/keystone_tenant, resources/keystone] 28 | -------------------------------------------------------------------------------- /resources/rabbitmq_service/meta.yaml: -------------------------------------------------------------------------------- 1 | handler: puppet 2 | id: 'rabbitmq' 3 | input: 4 | ip: 5 | schema: str! 6 | value: 7 | # ssh_key: 8 | # schema: str! 9 | # value: 10 | # ssh_user: 11 | # schema: str! 12 | # value: 13 | 14 | port: 15 | schema: int! 16 | value: 5672 17 | management_port: 18 | schema: int! 19 | value: 15672 20 | module: 21 | schema: {name: str!, type: str, url: str, ref: str} 22 | value: {name: 'rabbitmq', type: 'git', url: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', ref: '5.1.0'} 23 | 24 | tags: [] 25 | version: 1.0.0 26 | -------------------------------------------------------------------------------- /bootstrap/playbooks/tasks/mos.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - shell: apt-get update 4 | - shell: apt-get -y upgrade 5 | - apt_repository: repo='deb http://fuel-repository.mirantis.com/fwm/6.1/ubuntu mos6.1 main' validate_certs=no 6 | - shell: echo 'APT::Get::AllowUnauthenticated "true";' > /etc/apt/apt.conf.d/99mos61 7 | - shell: echo 'Package: *' > /etc/apt/preferences.d/mos.pref 8 | - shell: echo 'Pin: release o=Mirantis,a=mos6.1,n=mos6.1,l=mos6.1' >> /etc/apt/preferences.d/mos.pref 9 | - shell: echo 'Pin-Priority: 1050' >> /etc/apt/preferences.d/mos.pref 10 | - shell: apt-get update 11 | - shell: apt-get update --fix-missing 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | WORKDIR / 4 | 5 | ADD bootstrap/playbooks/celery.yaml /celery.yaml 6 | ADD resources /resources 7 | ADD templates /templates 8 | ADD run.sh /run.sh 9 | 10 | RUN apt-get update 11 | RUN apt-get install -y python python-dev python-distribute python-pip \ 12 | libyaml-dev vim libffi-dev libssl-dev git 13 | RUN pip install ansible 14 | 15 | RUN pip install git+https://github.com/Mirantis/solar.git 16 | RUN pip install git+https://github.com/Mirantis/solar-agent.git 17 | 18 | RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --tags install 19 | 20 | CMD ["/run.sh"] 21 | -------------------------------------------------------------------------------- /resources/librarian/templates/Puppetfile: -------------------------------------------------------------------------------- 1 | forge "{{forge}}" 2 | 3 | {%- for module in modules %} 4 | 5 | {% if 'type' not in module or module.type == 'forge' -%} 6 | mod '{{module.name}}' 7 | {%- if 'version' in module -%} 8 | , '{{module.version}}' 9 | {%- endif -%} 10 | {%- endif -%} 11 | 12 | {%- if 'type' in module and module.type == 'git' -%} 13 | mod '{{module.name}}' 14 | {%- if 'url' in module -%} 15 | , 16 | :git => '{{module.url}}' 17 | {%- endif -%} 18 | {%- if 'ref' in module -%} 19 | , 20 | :ref => '{{module.ref}}' 21 | {%- endif -%} 22 | {%- endif -%} 23 | 24 | {%- endfor -%} 25 | -------------------------------------------------------------------------------- /resources/rabbitmq_user/meta.yaml: -------------------------------------------------------------------------------- 1 | id: rabbitmq_user 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | user_name: 6 | schema: str! 7 | value: openstack 8 | password: 9 | schema: str! 10 | value: openstack_password 11 | vhost_name: 12 | schema: str! 13 | value: 14 | tags: 15 | schema: str 16 | value: 'management' 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resources/rabbitmq, resource/rabbitmq_user] 28 | -------------------------------------------------------------------------------- /resources/keystone_service/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - name: keystone container 5 | docker: 6 | command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all" 7 | name: {{ resource_name }} 8 | image: {{ image }} 9 | state: running 10 | expose: 11 | - 5000 12 | - 35357 13 | ports: 14 | - {{ port }}:5000 15 | - {{ admin_port }}:35357 16 | volumes: 17 | - {{ config_dir }}:/etc/keystone 18 | - name: wait for keystone 19 | wait_for: host={{ip}} port={{port}} timeout=20 20 | -------------------------------------------------------------------------------- /resources/transport_solar_agent/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - shell: pip install git+git://github.com/Mirantis/solar-agent.git 5 | - shell: start-stop-daemon --stop --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" 6 | ignore_errors: True 7 | - shell: start-stop-daemon -b --start --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" 8 | -------------------------------------------------------------------------------- /resources/transport_solar_agent/actions/update.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - shell: pip install git+git://github.com/Mirantis/solar-agent.git 5 | - shell: start-stop-daemon --stop --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" 6 | ignore_errors: True 7 | - shell: start-stop-daemon -b --start --make-pidfile --pidfile /tmp/solar_agent.pid --startas /bin/bash -- -c "exec /usr/local/bin/solar_agent run --port {{solar_agent_port}} --base tcp > /tmp/solar_agent.log 2>&1" 8 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Solar documentation master file, created by 2 | sphinx-quickstart on Thu Nov 26 12:41:37 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Solar's documentation! 7 | ================================= 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | glossary 15 | resource 16 | orchestration 17 | transports 18 | handler_ansible 19 | examples 20 | deployment_plan 21 | tutorials/index 22 | 23 | 24 | Indices and tables 25 | ================== 26 | 27 | * :ref:`search` 28 | 29 | -------------------------------------------------------------------------------- /resources/keystone_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_service 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | image: 6 | schema: str! 7 | value: kollaglue/centos-rdo-j-keystone 8 | config_dir: 9 | schema: str! 10 | value: /etc/solar/keystone 11 | port: 12 | schema: int! 13 | value: 5000 14 | admin_port: 15 | schema: int! 16 | value: 35357 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resource/keystone_service, resources/keystone] 28 | -------------------------------------------------------------------------------- /resources/keystone_service_endpoint/actions/remove.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | vars: 4 | ip: {{ip}} 5 | port: {{port}} 6 | admin_port: {{admin_port}} 7 | tasks: 8 | - name: remove keystone service and endpoint 9 | keystone_service: 10 | token: {{admin_token}} 11 | name: {{name}} 12 | type: {{type}} 13 | description: {{description}} 14 | publicurl: {{publicurl}} 15 | internalurl: {{internalurl}} 16 | adminurl: {{adminurl}} 17 | region: "RegionOne" 18 | state: present 19 | endpoint: http://{{keystone_host}}:{{keystone_admin_port}}/v2.0/ 20 | 21 | -------------------------------------------------------------------------------- /resources/nova_conductor_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Nova conductor resource for puppet handler 2 | 3 | Setup and configure the Nova conductor service. 4 | Note, it [should not](http://docs.openstack.org/juno/config-reference/content/section_conductor.html) be deployed on compute nodes. 5 | 6 | # Parameters 7 | 8 | source https://github.com/openstack/puppet-nova_conductor/blob/5.1.0/manifests/conductor.pp 9 | 10 | ``ensure_package`` 11 | (optional) The state of the nova conductor package 12 | Defaults to 'present' 13 | 14 | ``workers`` 15 | (optional) Number of workers for OpenStack Conductor service 16 | Defaults to undef (i.e. parameter will not be present) -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/actions/update.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $scheduler_driver = $resource['input']['scheduler_driver'] 4 | $package_ensure = $resource['input']['package_ensure'] 5 | 6 | include cinder::params 7 | 8 | package { 'cinder': 9 | ensure => $package_ensure, 10 | name => $::cinder::params::package_name, 11 | } -> 12 | 13 | class {'cinder::scheduler': 14 | scheduler_driver => $scheduler_driver, 15 | package_ensure => $package_ensure, 16 | enabled => true, 17 | manage_service => true, 18 | } 19 | 20 | notify { "restart cinder volume": 21 | notify => Service["cinder-scheduler"], 22 | } 23 | -------------------------------------------------------------------------------- /bootstrap/playbooks/solar.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | sudo: yes 5 | tasks: 6 | # upgrade pbr first, old version throws strange errors 7 | - shell: pip install pbr -U 8 | # Setup development env for solar 9 | - shell: pip install -e . chdir=/vagrant 10 | - shell: pip install git+git://github.com/Mirantis/solar-agent.git 11 | 12 | - hosts: all 13 | tasks: 14 | - lineinfile: 15 | dest: /home/vagrant/.bashrc 16 | line: eval "$(_SOLAR_COMPLETE=source solar)" 17 | state: present 18 | - lineinfile: 19 | dest: /home/vagrant/.bashrc 20 | line: export PYTHONWARNINGS="ignore" 21 | state: present 22 | -------------------------------------------------------------------------------- /resources/nova_generic_service_puppet/actions/remove.pp: -------------------------------------------------------------------------------- 1 | $service_title = $resource['input']['title'] 2 | $package_name = $resource['input']['package_name'] 3 | $service_name = $resource['input']['service_name'] 4 | 5 | exec { 'post-nova_config': 6 | command => '/bin/echo "Nova config has changed"', 7 | } 8 | 9 | nova::generic_service { $service_title: 10 | ensure_package => 'absent', 11 | enabled => false, 12 | package_name => $package_name, 13 | service_name => $service_name, 14 | } 15 | 16 | include nova::params 17 | 18 | package { 'nova-common': 19 | name => $nova::params::common_package_name, 20 | ensure => 'absent', 21 | } -------------------------------------------------------------------------------- /resources/mariadb_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: mariadb_service 2 | handler: ansible 3 | version: 1.0.0 4 | actions: 5 | run: run.yaml 6 | update: run.yaml 7 | input: 8 | image: 9 | schema: str! 10 | value: mariadb 11 | root_user: 12 | schema: str! 13 | value: root 14 | root_password: 15 | schema: str! 16 | value: mariadb 17 | port: 18 | schema: int! 19 | value: 3306 20 | ip: 21 | schema: str! 22 | value: 23 | # ssh_key: 24 | # schema: str! 25 | # value: 26 | # ssh_user: 27 | # schema: str! 28 | # value: 29 | 30 | tags: [resource/mariadb_service, resources/mariadb] 31 | -------------------------------------------------------------------------------- /templates/glance.yaml: -------------------------------------------------------------------------------- 1 | id: glance_{{idx}} 2 | 3 | resources: 4 | - id: glance_base_{{ idx }} 5 | from: templates/glance_db.yaml 6 | values: 7 | idx: '{{ idx }}' 8 | 9 | db_name: '{{ db_name }}' 10 | db_user: '{{ db_user }}' 11 | db_password: '{{ db_password }}' 12 | db_host: '{{ db_host }}' 13 | db_login_port: '{{ db_port }}' 14 | db_login_user: '{{ db_login_user }}' 15 | db_login_password: '{{ db_login_password }}' 16 | 17 | ip: '{{ ip }}' 18 | ssh_user: '{{ ssh_user }}' 19 | ssh_key: '{{ ssh_key }}' 20 | 21 | 22 | tags: ['resources/glance', 'resource/glance_api', 'resource/glance_registry'] 23 | 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # python 2 | *.egg-info 3 | .venv 4 | *.pyc 5 | 6 | # vagrant 7 | .vagrant 8 | 9 | tmp/ 10 | 11 | #vim 12 | *.swp 13 | 14 | .eggs 15 | build 16 | docs/_build 17 | 18 | state/ 19 | clients.json 20 | rs/ 21 | 22 | solar.log 23 | x-venv/ 24 | .tmp/ 25 | modules/ 26 | 27 | celery*.pid 28 | celery*.log 29 | 30 | *.dot 31 | *.png 32 | *.svg 33 | resources_compiled.py 34 | 35 | # bootstrap 36 | bootstrap/packer_cache 37 | bootstrap/trusty64 38 | bootstrap/solar-master.box 39 | vagrant-settings.yaml 40 | 41 | .solar_cli_uids 42 | 43 | .ssh/ 44 | .cache 45 | 46 | .tox 47 | 48 | .coverage 49 | 50 | 51 | # pytest cache 52 | solar/.cache 53 | .config.override 54 | 55 | .testrepository/ 56 | -------------------------------------------------------------------------------- /resources/docker_container/meta.yaml: -------------------------------------------------------------------------------- 1 | id: container 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | image: 9 | schema: str! 10 | value: 11 | ports: 12 | schema: [int] 13 | value: [] 14 | host_binds: 15 | schema: [{value: {src: str, dst: str, mode: str}}] 16 | value: [] 17 | volume_binds: 18 | schema: [{src: str, dst: str, mode: str}] 19 | value: [] 20 | env: 21 | schema: {} 22 | value: {} 23 | # ssh_user: 24 | # schema: str! 25 | # value: [] 26 | # ssh_key: 27 | # schema: str! 28 | # value: [] 29 | 30 | tags: [resource/container] 31 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_centos.jinja2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | resize_rootfs: false 3 | growpart: 4 | mode: false 5 | disable_ec2_metadata: true 6 | disable_root: false 7 | 8 | # password: RANDOM 9 | # chpasswd: { expire: True } 10 | 11 | ssh_pwauth: false 12 | ssh_authorized_keys: 13 | {% for key in common.ssh_auth_keys %} 14 | - {{ key }} 15 | {% endfor %} 16 | 17 | # set the locale to a given locale 18 | # default: en_US.UTF-8 19 | locale: en_US.UTF-8 20 | 21 | timezone: {{ common.timezone }} 22 | 23 | hostname: {{ common.hostname }} 24 | fqdn: {{ common.fqdn }} 25 | 26 | final_message: "YAY! The system is finally up, after $UPTIME seconds" 27 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/templates/cloud-init-templates/cloud_config_ubuntu.jinja2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | resize_rootfs: false 3 | growpart: 4 | mode: false 5 | disable_ec2_metadata: true 6 | disable_root: false 7 | user: root 8 | password: r00tme 9 | chpasswd: { expire: false } 10 | ssh_pwauth: false 11 | ssh_authorized_keys: 12 | {% for key in common.ssh_auth_keys %} 13 | - {{ key }} 14 | {% endfor %} 15 | 16 | # set the locale to a given locale 17 | # default: en_US.UTF-8 18 | locale: en_US.UTF-8 19 | 20 | timezone: {{ common.timezone }} 21 | 22 | hostname: {{ common.hostname }} 23 | fqdn: {{ common.fqdn }} 24 | 25 | final_message: "YAY! The system is finally up, after $UPTIME seconds" 26 | -------------------------------------------------------------------------------- /resources/glance_registry_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: container 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | image: 9 | schema: str! 10 | value: cgenie/centos-rdo-glance-registry 11 | ports: 12 | schema: [{value: [{value: int}]}] 13 | value: [] 14 | host_binds: 15 | schema: [{value: {src: str, dst: str, mode: str}}] 16 | value: [] 17 | volume_binds: 18 | schema: [{src: str, dst: str, mode: str}] 19 | value: [] 20 | # ssh_user: 21 | # schema: str! 22 | # value: [] 23 | # ssh_key: 24 | # schema: str! 25 | # value: [] 26 | 27 | tags: [resource/container] 28 | -------------------------------------------------------------------------------- /resources/not_provisioned_node/meta.yaml: -------------------------------------------------------------------------------- 1 | id: not_provisioned_node 2 | handler: shell 3 | version: 1.0.0 4 | 5 | actions: 6 | provision: provision.sh 7 | run: run.sh 8 | reboot: reboot.sh 9 | 10 | input: 11 | ip: 12 | schema: str! 13 | value: 14 | master_key: 15 | schema: str! 16 | value: 17 | admin_mac: 18 | schema: str! 19 | value: 20 | repos: 21 | schema: list! 22 | value: [] 23 | name: 24 | schema: str 25 | value: a node 26 | location_id: 27 | schema: str! 28 | value: $uuid 29 | reverse: True 30 | partitioning: 31 | schema: dict! 32 | value: 33 | 34 | tags: [resources=node] 35 | -------------------------------------------------------------------------------- /resources/nova_conductor_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: nova_conductor 2 | handler: puppet 3 | puppet_module: nova 4 | version: 1.0.0 5 | input: 6 | ensure_package: 7 | schema: str 8 | value: 'present' 9 | workers: 10 | schema: int 11 | value: 1 12 | 13 | git: 14 | schema: {repository: str!, branch: str!} 15 | value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} 16 | 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resource/nova_conductor_service, resources/nova_conductor, resources/nova] 28 | -------------------------------------------------------------------------------- /solar/events/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from .api import * 16 | -------------------------------------------------------------------------------- /resources/riak_node/actions/join.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - shell: riak-admin cluster join {{join_to}} 5 | ignore_errors: true 6 | register: join_output 7 | # those below are hacky solution for "this node is already member of a cluster 8 | # solar for now lacks logic that would allow to avoid it 9 | - shell: /bin/true 10 | when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") != -1 11 | - shell: /bin/false 12 | when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") == -1 13 | - shell: /bin/true 14 | when: join_output|success 15 | 16 | -------------------------------------------------------------------------------- /resources/glance_api_service/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | from solar.core import validation 5 | 6 | 7 | def test(resource): 8 | log.debug('Testing glance_service') 9 | 10 | args = resource.args 11 | 12 | token, _ = validation.validate_token( 13 | keystone_host=args['keystone_host'], 14 | keystone_port=args['keystone_port'], 15 | user='glance_admin', 16 | tenant='services', 17 | password=args['keystone_password'], 18 | ) 19 | 20 | images = requests.get( 21 | 'http://%s:%s/v1/images' % (resource.args['ip'], 9393), 22 | headers={'X-Auth-Token': token} 23 | ) 24 | assert images.json() == {'images': []} 25 | -------------------------------------------------------------------------------- /resources/glance_puppet/test.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from solar.core.log import log 4 | from solar.core import validation 5 | 6 | 7 | def test(resource): 8 | log.debug('Testing glance_puppet') 9 | requests.get( 10 | 'http://%s:%s' % (resource.args['ip'], resource.args['bind_port']) 11 | ) 12 | #TODO(bogdando) test packages installed and filesystem store datadir created 13 | 14 | args = resource.args 15 | 16 | token, _ = validation.validate_token( 17 | keystone_host=args['keystone_host'], 18 | keystone_port=args['keystone_port'], 19 | user=args['keystone_user'], 20 | tenant=args['keystone_tenant'], 21 | password=args['keystone_password'], 22 | ) 23 | -------------------------------------------------------------------------------- /resources/glance_config/templates/glance-registry.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | bind_host = 0.0.0.0 3 | bind_port = 9191 4 | log_file = /var/log/glance/registry.log 5 | backlog = 4096 6 | api_limit_max = 1000 7 | limit_param_default = 25 8 | 9 | [database] 10 | backend = mysql 11 | connection = mysql://{{ mysql_user }}:{{ mysql_password }}@{{ mysql_ip }}/{{ mysql_db }} 12 | 13 | [keystone_authtoken] 14 | auth_uri = http://{{ keystone_ip }}:{{ keystone_port }}/v2.0 15 | identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} 16 | admin_tenant_name = {{ keystone_admin_tenant }} 17 | admin_user = {{ keystone_admin_user }} 18 | admin_password = {{ keystone_admin_password }} 19 | 20 | [paste_deploy] 21 | flavor=keystone 22 | [profiler] 23 | -------------------------------------------------------------------------------- /resources/keystone_role/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_role 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | keystone_host: 6 | schema: str! 7 | value: 8 | keystone_port: 9 | schema: int! 10 | value: 11 | admin_token: 12 | schema: str! 13 | value: 14 | user_name: 15 | schema: str! 16 | value: admin 17 | tenant_name: 18 | schema: str! 19 | value: 20 | role_name: 21 | schema: str! 22 | value: admin 23 | ip: 24 | schema: str! 25 | value: 26 | # ssh_key: 27 | # schema: str! 28 | # value: 29 | # ssh_user: 30 | # schema: str! 31 | # value: 32 | 33 | tags: [resource/keystone_role, resources/keystone] 34 | -------------------------------------------------------------------------------- /resources/lxc_host/meta.yaml: -------------------------------------------------------------------------------- 1 | id: lxc_host 2 | handler: ansible_playbook 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | provides: 16 | schema: str 17 | value: infra 18 | roles: 19 | schema: [{value: str}] 20 | value: 21 | - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_hosts 22 | - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/pip_install 23 | - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/apt_package_pinning 24 | -------------------------------------------------------------------------------- /resources/cinder_scheduler_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: cinder_scheduler_puppet 2 | handler: puppet 3 | puppet_module: cinder 4 | version: 1.0.0 5 | input: 6 | scheduler_driver: 7 | schema: str 8 | value: 9 | package_ensure: 10 | schema: str 11 | value: 'present' 12 | 13 | git: 14 | schema: {repository: str!, branch: str!} 15 | value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} 16 | 17 | ip: 18 | schema: str! 19 | value: 20 | # ssh_key: 21 | # schema: str! 22 | # value: 23 | # ssh_user: 24 | # schema: str! 25 | # value: 26 | 27 | tags: [resource/cinder_scheduler_service, resources/cinder_scheduler, resources/cinder] 28 | -------------------------------------------------------------------------------- /resources/keystone_user/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_user 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | keystone_host: 6 | schema: str! 7 | value: 8 | keystone_port: 9 | schema: int! 10 | value: 11 | admin_token: 12 | schema: str! 13 | value: 14 | user_name: 15 | schema: str! 16 | value: admin 17 | user_password: 18 | schema: str! 19 | value: admin 20 | tenant_name: 21 | schema: str! 22 | value: 23 | ip: 24 | schema: str! 25 | value: 26 | # ssh_key: 27 | # schema: str! 28 | # value: 29 | # ssh_user: 30 | # schema: str! 31 | # value: 32 | 33 | tags: [resource/keystone_user, resources/keystone] 34 | -------------------------------------------------------------------------------- /resources/mariadb_service/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - file: path=/var/lib/docker/data/{{resource_name}} state=directory 5 | - name: mariadb container 6 | docker: 7 | name: {{ resource_name }} 8 | image: {{ image }} 9 | state: reloaded 10 | ports: 11 | - {{ port }}:3306 12 | env: 13 | MYSQL_ROOT_PASSWORD: {{ root_password }} 14 | volumes: 15 | - /var/lib/docker/data/{{resource_name}}:/var/lib/mysql 16 | 17 | - shell: docker exec -t {{ resource_name }} mysql -p{{ root_password }} -uroot -e "SELECT 1" 18 | register: result 19 | until: result.rc == 0 20 | retries: 30 21 | delay: 0.5 22 | 23 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = solar 3 | version = 0.0.1 4 | author = Mirantis Inc. 5 | author-email = product@mirantis.com 6 | summary = Deployment tool 7 | description-file = README.md 8 | license = Apache-2 9 | home-page = http://mirantis.com 10 | classifier = 11 | Development Status :: 1 - Beta 12 | License :: OSI Approved :: Apache Software License 13 | Programming Language :: Python 14 | Programming Language :: Python :: 2.6 15 | Programming Language :: Python :: 2.7 16 | Topic :: System :: Software Distribution 17 | keywords = 18 | deployment 19 | 20 | [build_sphinx] 21 | all_files = 1 22 | build-dir = doc/build 23 | source-dir = doc/source 24 | 25 | [entry_points] 26 | console_scripts = 27 | solar = solar.cli.main:run 28 | -------------------------------------------------------------------------------- /resources/nova_conductor_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $ensure_package = $resource['input']['ensure_package'] 4 | $workers = $resource['input']['workers'] 5 | 6 | exec { 'post-nova_config': 7 | command => '/bin/echo "Nova config has changed"', 8 | } 9 | 10 | include nova::params 11 | 12 | package { 'nova-common': 13 | name => $nova::params::common_package_name, 14 | ensure => $ensure_package, 15 | } 16 | 17 | class { 'nova::conductor': 18 | enabled => true, 19 | manage_service => true, 20 | ensure_package => $ensure_package, 21 | workers => $workers, 22 | } 23 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License attached# 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 10 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See then 11 | # License for the specific language governing permissions and limitations 12 | # under the License. 13 | 14 | from setuptools import setup 15 | 16 | 17 | setup( 18 | setup_requires=['pbr'], 19 | pbr=True, 20 | ) 21 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/sleeping_beauty.yaml: -------------------------------------------------------------------------------- 1 | name: sleeping_beauty 2 | tasks: 3 | - uid: fairy1 4 | parameters: 5 | type: sleep 6 | args: [10] 7 | before: [princess] 8 | - uid: fairy2 9 | parameters: 10 | type: sleep 11 | args: [10] 12 | before: [princess] 13 | - uid: fairy3 14 | parameters: 15 | type: sleep 16 | args: [10] 17 | before: [princess] 18 | - uid: fairy4 19 | parameters: 20 | type: sleep 21 | args: [10] 22 | before: [princess] 23 | - uid: fairy5 24 | parameters: 25 | type: sleep 26 | args: [10] 27 | before: [princess] 28 | - uid: princess 29 | parameters: 30 | type: sleep 31 | args: [10] 32 | -------------------------------------------------------------------------------- /examples/torrent/README.md: -------------------------------------------------------------------------------- 1 | Example of using torrent transport with solar. Torrent is used to distribute task data. After fetching is finished torrent client forks and continues seeding. 2 | 3 | 4 | The example contains single node with single host mapping + transports. 5 | 6 | Execute: 7 | ``` 8 | python examples/torrent/example.py 9 | solar changes stage 10 | solar changes process 11 | solar orch run-once last 12 | ``` 13 | 14 | Wait for finish: 15 | 16 | ``` 17 | solar orch report last -w 100 18 | ``` 19 | 20 | After this you should see new entry in `/etc/hosts` file. 21 | 22 | 23 | * All created torrents are in `/vagrant/torrents`, it doesn't need to be shared 24 | * Initial seeding is done using torrent file 25 | * Downloading and then seeding is always done with magnetlinks 26 | -------------------------------------------------------------------------------- /resources/container_networks/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | gather_facts: false 4 | # this is default variables, they will be overwritten by resource one 5 | vars: 6 | networks: 7 | mgmt: 8 | address: 172.18.10.6 9 | bridge: br-test0 10 | bridge_address: 172.18.10.252/24 11 | interface: eth1 12 | netmask: 255.255.255.0 13 | type: veth 14 | tasks: 15 | - shell: ip l add {{item.value.bridge}} type bridge 16 | with_dict: networks 17 | ignore_errors: true 18 | - shell: ip l set {{item.value.bridge}} up 19 | with_dict: networks 20 | - shell: ip a add dev {{item.value.bridge}} {{item.value.bridge_address}} 21 | with_dict: networks 22 | ignore_errors: true 23 | -------------------------------------------------------------------------------- /solar/core/resource/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from .resource import Resource, load, load_all, validate_resources, load_by_tags, load_updated, RESOURCE_STATE 16 | -------------------------------------------------------------------------------- /examples/lxc/README.md: -------------------------------------------------------------------------------- 1 | Bootstraping lxc containers using solar and roles from os-ansible-deployment 2 | 3 | At first run: 4 | 5 | `python examples/lxc/example-lxc.py deploy` 6 | 7 | It will do several things: 8 | 9 | * Prepare about ~10 containers on solar-dev1 10 | * Add linux bridge on solar-dev and solar-dev1 with uid br-int53 11 | * Setup vxlan tunnel for solar-dev and solar-dev1 12 | * Generate ssh key and inject it into containers 13 | 14 | Later this containers can be used as regular nodes in solar. 15 | Check rabbitmq example at the end of the file. 16 | 17 | To deploy everything use usual solar commands. 18 | ``` 19 | solar changes stage -d 20 | solar changes process 21 | solar orch run-once last 22 | watch -n 1 solar orch report last 23 | ``` 24 | 25 | Wait until all actions have state `SUCCESS` 26 | -------------------------------------------------------------------------------- /resources/nova_generic_service_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: nova_generic_service 2 | handler: puppet 3 | puppet_module: nova 4 | version: 1.0.0 5 | input: 6 | title: 7 | schema: str! 8 | value: 9 | package_name: 10 | schema: str! 11 | value: 12 | service_name: 13 | schema: str! 14 | value: 15 | ensure_package: 16 | schema: str 17 | value: 'present' 18 | 19 | git: 20 | schema: {repository: str!, branch: str!} 21 | value: {repository: 'https://github.com/openstack/puppet-nova', branch: '5.1.0'} 22 | 23 | ip: 24 | schema: str! 25 | value: 26 | # ssh_key: 27 | # schema: str! 28 | # value: 29 | # ssh_user: 30 | # schema: str! 31 | # value: 32 | 33 | tags: [resource/nova_generic_service, resources/nova] 34 | -------------------------------------------------------------------------------- /examples/library_ceph/README.md: -------------------------------------------------------------------------------- 1 | Current example will do following things: 2 | 3 | - fetch fuel-library from github 4 | - use ./update_modules.sh to fetch librarian dependencies 5 | - generate ceph keys on a solar-dev1 6 | - install ceph-mon on solar-dev1 (INPROGRESS) 7 | - install ceph-osd on solar-dev2 (TODO) 8 | - imlement removal mechanism for ceph-mon/ceph-osd (TODO) 9 | 10 | 11 | To use it: 12 | 13 | ``` 14 | python exaples/library_ceph/ceph.py 15 | solar ch stage && solar ch process 16 | solar or run-once last -w 120 17 | ``` 18 | 19 | If it will fail you can run particular resource action, with a lot of 20 | debug info. 21 | 22 | ``` 23 | solar res action run ceph_mon1 24 | ``` 25 | 26 | To add repositories use 27 | 28 | ``` 29 | solar resource create apt1 templates/mos_repos.yaml node=node1 index=1 30 | ``` 31 | -------------------------------------------------------------------------------- /resources/nova_generic_service_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $service_title = $resource['input']['title'] 4 | $package_name = $resource['input']['package_name'] 5 | $service_name = $resource['input']['service_name'] 6 | $ensure_package = $resource['input']['ensure_package'] 7 | 8 | exec { 'post-nova_config': 9 | command => '/bin/echo "Nova config has changed"', 10 | } 11 | 12 | include nova::params 13 | 14 | package { 'nova-common': 15 | name => $nova::params::common_package_name, 16 | ensure => $ensure_package, 17 | } 18 | 19 | nova::generic_service { $service_title: 20 | enabled => true, 21 | manage_service => true, 22 | package_name => $package_name, 23 | service_name => $service_name, 24 | ensure_package => $ensure_package, 25 | } -------------------------------------------------------------------------------- /resources/mariadb_user/meta.yaml: -------------------------------------------------------------------------------- 1 | id: mariadb_user 2 | handler: ansible 3 | version: 1.0.0 4 | actions: 5 | run: run.yaml 6 | update: update.yaml 7 | remove: remove.yaml 8 | input: 9 | user_password: 10 | schema: str! 11 | value: 12 | user_name: 13 | schema: str! 14 | value: 15 | 16 | db_name: 17 | schema: str! 18 | value: 19 | db_host: 20 | schema: str! 21 | value: 22 | 23 | login_password: 24 | schema: str! 25 | value: 26 | login_port: 27 | schema: int! 28 | value: 29 | login_user: 30 | schema: str! 31 | value: 32 | 33 | ip: 34 | schema: str! 35 | value: 36 | # ssh_key: 37 | # schema: str! 38 | # value: 39 | # ssh_user: 40 | # schema: str! 41 | # value: 42 | -------------------------------------------------------------------------------- /resources/ceph_mon/meta.yaml: -------------------------------------------------------------------------------- 1 | id: ceph_mon 2 | handler: puppetv2 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | public_vip: 9 | schema: str! 10 | value: 11 | management_vip: 12 | schema: str! 13 | value: 14 | use_syslog: 15 | schema: bool 16 | value: true 17 | keystone: 18 | schema: {'admin_token': 'str'} 19 | value: {} 20 | ceph_monitor_nodes: 21 | schema: [] 22 | value: [] 23 | ceph_primary_monitor_node: 24 | schema: [] 25 | value: [] 26 | storage: 27 | schema: {} 28 | value: {} 29 | network_scheme: 30 | schema: {} 31 | value: {} 32 | role: 33 | schema: str! 34 | value: 35 | puppet_modules: 36 | schema: str! 37 | value: 38 | tags: [] 39 | -------------------------------------------------------------------------------- /resources/keystone_config/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_config 2 | handler: ansible 3 | version: 1.0.0 4 | 5 | input: 6 | config_dir: 7 | schema: str! 8 | value: /etc/solar/keystone 9 | admin_token: 10 | schema: str! 11 | value: admin 12 | db_password: 13 | schema: str! 14 | value: password 15 | db_user: 16 | schema: str! 17 | value: keystone 18 | db_host: 19 | schema: str! 20 | value: 21 | db_port: 22 | schema: int! 23 | value: 24 | db_name: 25 | schema: str! 26 | value: keystone 27 | ip: 28 | schema: str! 29 | value: 30 | # ssh_key: 31 | # schema: str! 32 | # value: 33 | # ssh_user: 34 | # schema: str! 35 | # value: 36 | 37 | tags: [resource/keystone_config, resources/keystone] 38 | -------------------------------------------------------------------------------- /resources/mariadb_db/meta.yaml: -------------------------------------------------------------------------------- 1 | id: mariadb_db 2 | handler: ansible 3 | version: 1.0.0 4 | actions: 5 | run: run.yaml 6 | remove: remove.yaml 7 | update: run.yaml 8 | input: 9 | db_name: 10 | schema: str! 11 | value: 12 | db_host: 13 | schema: str! 14 | value: 15 | 16 | login_user: 17 | schema: str! 18 | value: 19 | login_password: 20 | schema: str! 21 | value: 22 | login_port: 23 | schema: int! 24 | value: 25 | collation: 26 | schema: str 27 | value: 'utf8_general_ci' 28 | encoding: 29 | schema: str 30 | value: 'utf8' 31 | 32 | ip: 33 | schema: str! 34 | value: 35 | # ssh_key: 36 | # schema: str! 37 | # value: 38 | # ssh_user: 39 | # schema: str! 40 | # value: 41 | -------------------------------------------------------------------------------- /templates/nodes_with_transports.yaml: -------------------------------------------------------------------------------- 1 | id: simple_multinode_with_transports 2 | resources: 3 | {% for i in range(count|int) %} 4 | - id: ssh_transport{{i}} 5 | from: resources/transport_ssh 6 | values: 7 | ssh_user: 'vagrant' 8 | ssh_key: '/vagrant/.vagrant/machines/solar-dev{{i + 1}}/virtualbox/private_key' 9 | - id: transports{{i}} 10 | from: resources/transports 11 | values: 12 | transports:key: ssh_transport{{i}}::ssh_key 13 | transports:user: ssh_transport{{i}}::ssh_user 14 | transports:port: ssh_transport{{i}}::ssh_port 15 | transports:name: ssh_transport{{i}}::name 16 | - id: node{{i}} 17 | from: resources/ro_node 18 | values: 19 | ip: '10.0.0.{{i + 3}}' 20 | transports_id: transports{{i}}::transports_id 21 | name: node{{i}} 22 | {% endfor %} 23 | -------------------------------------------------------------------------------- /resources/keystone_config/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | vars: 4 | admin_token: {{admin_token}} 5 | keystone_host: {{ ip }} 6 | keystone_port: {{ port }} 7 | db_user: {{db_user}} 8 | db_password: {{db_password}} 9 | db_host: {{db_host}} 10 | db_name: {{db_name}} 11 | tasks: 12 | - file: path={{config_dir}} state=directory 13 | - template: src={{templates_dir}}/keystone.conf dest={{config_dir}}/keystone.conf 14 | - template: src={{templates_dir}}/default_catalog.templates dest={{config_dir}}/default_catalog.templates 15 | - template: src={{templates_dir}}/logging.conf dest={{config_dir}}/logging.conf 16 | - template: src={{templates_dir}}/policy.json dest={{config_dir}}/policy.json 17 | - template: src={{templates_dir}}/exports dest={{ config_dir }}/keystone-exports 18 | -------------------------------------------------------------------------------- /resources/lxc_container/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: '*' 2 | sudo: yes 3 | gather_facts: false 4 | # this is default variables, they will be overwritten by resource one 5 | vars: 6 | ansible_ssh_host: 10.0.0.3 7 | physical_host: 10.0.0.3 8 | container_name: test3 9 | inventory_hostname: test3 10 | properties: 11 | container_release: trusty 12 | container_networks: 13 | mgmt: 14 | address: 172.18.10.6 15 | bridge: br-test0 16 | bridge_address: 172.18.10.252/24 17 | interface: eth1 18 | netmask: 255.255.255.0 19 | type: veth 20 | pub_key: '' 21 | pre_tasks: 22 | - set_fact: 23 | lxc_container_ssh_key: "{{ lookup('file', pub_key) }}" 24 | roles: 25 | - { role: "lxc_container_create", tags: [ "lxc-container-create" ] } 26 | -------------------------------------------------------------------------------- /templates/glance_registry.yaml: -------------------------------------------------------------------------------- 1 | id: glance_register_{{idx}} 2 | 3 | resources: 4 | - id: glance_config_{{idx}} 5 | from: resources/glance_config 6 | values: 7 | keystone_admin_port: '{{keystone_admin_port}}' 8 | keystone_ip: '{{keystone_ip}}' 9 | mysql_password: '{{mysql_password}}' 10 | mysql_user: '{{mysql_user}}' 11 | mysql_db: '{{mysql_db}}' 12 | mysql_ip: '{{mysql_ip}}' 13 | ip: '{{ip}}' 14 | ssh_user: '{{ssh_user}}' 15 | ssh_key: '{{ssh_key}}' 16 | 17 | 18 | - id: glance_registry_{{idx}} 19 | from: resources/glance_registry_service 20 | values: 21 | ip: 'keystone_config_{{idx}}::ip' 22 | ssh_user: 'keystone_config_{{idx}}::ssh_user' 23 | ssh_key: 'keystone_config_{{idx}}::ssh_key' 24 | 25 | tags: ['resources/keystone', 'resource/keystone_api'] 26 | 27 | -------------------------------------------------------------------------------- /resources/volume_group/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{ host }}] 2 | sudo: yes 3 | tasks: 4 | - name: install dependencies 5 | apt: name=lvm2 state=present 6 | - name: preapara file 7 | command: truncate -s 10G {{path}} creates={{path}} 8 | - name: check if loop for file is already created 9 | shell: losetup -a|grep {{path}} 10 | register: loop_created 11 | ignore_errors: True 12 | - name: if loop is not created, create it 13 | command: losetup -f {{path}} 14 | when: loop_created|failed 15 | - name: find loop device 16 | shell: losetup -a|grep '{{path}}'|awk -F':' '{print $1}' 17 | register: loop_device 18 | - name: create Volume Group on loop device 19 | lvg: vg={{volume_name}} pvs={% raw %}{{item}}{% endraw %} state=present 20 | with_items: loop_device.stdout_lines 21 | -------------------------------------------------------------------------------- /resources/keystone_puppet/actions/update.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $ip = $resource['input']['ip'] 4 | $admin_token = $resource['input']['admin_token'] 5 | $db_user = $resource['input']['db_user'] 6 | $db_host = $resource['input']['db_host'] 7 | $db_password = $resource['input']['db_password'] 8 | $db_name = $resource['input']['db_name'] 9 | $db_port = $resource['input']['db_port'] 10 | $admin_port = $resource['input']['admin_port'] 11 | $port = $resource['input']['port'] 12 | 13 | class {'keystone': 14 | package_ensure => 'present', 15 | verbose => true, 16 | catalog_type => 'sql', 17 | admin_token => $admin_token, 18 | database_connection => "mysql://$db_user:$db_password@$db_host:$db_port/$db_name", 19 | public_port => "$port", 20 | admin_port => "$admin_port", 21 | } 22 | -------------------------------------------------------------------------------- /templates/glance_base.yaml: -------------------------------------------------------------------------------- 1 | id: glance_base 2 | 3 | resources: 4 | - id: glance_db 5 | from: resources/mariadb_db 6 | values: 7 | db_name: {{db_name}} 8 | login_user: '{{login_user}}' 9 | login_password: '{{login_password}}' 10 | login_port: '{{login_port}}' 11 | ip: '{{ip}}' 12 | ssh_user: '{{ssh_user}}' 13 | ssh_key: '{{ssh_key}}' 14 | 15 | - id: glance_db_user 16 | from: resources/mariadb_user 17 | values: 18 | user_password: '{{user_password}}' 19 | user_name: '{{user_name}}' 20 | db_name: 'keystone_db::db_name' 21 | login_user: 'keystone_db::login_user' 22 | login_password: 'keystone_db::login_password' 23 | login_port: 'keystone_db::login_port' 24 | ip: 'keystone_db::ip' 25 | ssh_user: 'keystone_db::ssh_user' 26 | ssh_key: 'keystone_db::ssh_key' 27 | -------------------------------------------------------------------------------- /bootstrap/playbooks/celery.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Solar Celery config 4 | hosts: all 5 | sudo: yes 6 | vars: 7 | celery_dir: /var/run/celery 8 | tasks: 9 | - shell: mkdir -p {{ celery_dir }} 10 | tags: [install] 11 | - shell: pip install celery 12 | tags: [install] 13 | - shell: hostname 14 | register: hostname 15 | - shell: celery multi stopwait 2 -A solar.orchestration.runner 16 | chdir={{ celery_dir }} 17 | tags: [stop] 18 | - shell: celery multi start 2 -A solar.orchestration.runner -P:2 prefork -c:1 1 -c:2 3 -Q:1 scheduler,system_log -Q:2 celery,{{ hostname.stdout }} 19 | chdir={{ celery_dir }} 20 | tags: [master] 21 | - shell: celery multi start 1 -A solar.orchestration.runner -Q:1 {{ hostname.stdout }} 22 | chdir={{ celery_dir }} 23 | tags: [slave] 24 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/actions/update.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $package_ensure = $resource['input']['package_ensure'] 4 | $use_iscsi_backend = $resource['input']['use_iscsi_backend'] 5 | 6 | $iscsi_ip_address = $resource['input']['iscsi_ip_address'] 7 | $volume_driver = $resource['input']['volume_driver'] 8 | $volume_group = $resource['input']['volume_group'] 9 | $iscsi_helper = $resource['input']['iscsi_helper'] 10 | 11 | include cinder::params 12 | 13 | package { 'cinder': 14 | ensure => $package_ensure, 15 | name => $::cinder::params::package_name, 16 | } -> 17 | 18 | class {'cinder::volume': 19 | package_ensure => $package_ensure, 20 | enabled => true, 21 | manage_service => true, 22 | } 23 | 24 | notify { "restart cinder volume": 25 | notify => Service["cinder-volume"], 26 | } 27 | -------------------------------------------------------------------------------- /resources/nova_conductor_puppet/actions/update.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $ensure_package = $resource['input']['ensure_package'] 4 | $workers = $resource['input']['workers'] 5 | 6 | exec { 'post-nova_config': 7 | command => '/bin/echo "Nova config has changed"', 8 | } 9 | 10 | include nova::params 11 | 12 | package { 'nova-common': 13 | name => $nova::params::common_package_name, 14 | ensure => $ensure_package, 15 | } 16 | 17 | class { 'nova::conductor': 18 | enabled => true, 19 | manage_service => true, 20 | ensure_package => $ensure_package, 21 | workers => $workers, 22 | } 23 | 24 | notify { "restart nova conductor": 25 | notify => Service["nova-conductor"], 26 | } 27 | -------------------------------------------------------------------------------- /solar/system_log/consts.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from enum import Enum 16 | 17 | CHANGES = Enum( 18 | 'Changes', 19 | 'run remove update' 20 | ) 21 | 22 | 23 | STATES = Enum('States', 'error inprogress pending success') 24 | -------------------------------------------------------------------------------- /resources/rabbitmq_config/templates/rabbitmq.conf: -------------------------------------------------------------------------------- 1 | [ 2 | {rabbit, [ 3 | {cluster_partition_handling, autoheal}, 4 | {default_permissions, [<<".*">>, <<".*">>, <<".*">>]}, 5 | {default_vhost, <<"/">>}, 6 | {log_levels, [connection,info,error]}, 7 | {tcp_listen_options, [ 8 | binary, 9 | {packet, raw}, 10 | {reuseaddr, true}, 11 | {backlog, 128}, 12 | {nodelay, true}, 13 | {exit_on_close, false}, 14 | {keepalive, true} 15 | ]}, 16 | {default_user, <<"{{default_user}}">>}, 17 | {default_pass, <<"{{default_password}}">>} 18 | ]}, 19 | {kernel, [ 20 | {inet_default_connect_options, [{nodelay,true}]}, 21 | {inet_dist_listen_max, 41055}, 22 | {inet_dist_listen_min, 41055} 23 | ]} 24 | , 25 | {rabbitmq_management, [ 26 | {listener, [ 27 | {port, 15672} 28 | ]} 29 | ]} 30 | ]. 31 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/test_errors.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: errors 3 | tasks: 4 | - uid: compute1 5 | parameters: 6 | type: echo 7 | args: [compute1] 8 | before: [compute_ready] 9 | - uid: compute2 10 | parameters: 11 | type: echo 12 | args: [compute2] 13 | before: [compute_ready] 14 | - uid: compute3 15 | parameters: 16 | type: echo 17 | args: [compute3] 18 | before: [compute_ready] 19 | - uid: compute4 20 | parameters: 21 | type: error 22 | args: [compute4] 23 | before: [compute_ready] 24 | - uid: compute5 25 | parameters: 26 | type: error 27 | args: [compute5] 28 | before: [compute_ready] 29 | 30 | - uid: compute_ready 31 | parameters: 32 | type: fault_tolerance 33 | args: [80] 34 | 35 | -------------------------------------------------------------------------------- /solar/test/orch_fixtures/upd_test_errors.yaml: -------------------------------------------------------------------------------- 1 | 2 | name: errors 3 | tasks: 4 | - uid: compute1 5 | parameters: 6 | type: echo 7 | args: [compute1] 8 | before: [compute_ready] 9 | - uid: compute2 10 | parameters: 11 | type: echo 12 | args: [compute2] 13 | before: [compute_ready] 14 | - uid: compute3 15 | parameters: 16 | type: echo 17 | args: [compute3] 18 | before: [compute_ready] 19 | - uid: compute4 20 | parameters: 21 | type: echo 22 | args: [compute4] 23 | before: [compute_ready] 24 | - uid: compute5 25 | parameters: 26 | type: error 27 | args: [compute5] 28 | before: [compute_ready] 29 | 30 | - uid: compute_ready 31 | parameters: 32 | type: fault_tolerance 33 | args: [80] 34 | 35 | -------------------------------------------------------------------------------- /resources/riak_node/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | # those below are mostly for tests 5 | - shell: killall -u riak 6 | ignore_errors: yes 7 | # remove above when non tests 8 | 9 | # we install ubuntu repo there, 10 | # NOT recommended on production 11 | - shell: curl -s https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | sudo bash 12 | 13 | - apt: 14 | name: riak 15 | state: present 16 | - service: 17 | name: riak 18 | state: stopped 19 | - file: path=/etc/riak/riak.conf state=touch 20 | - template: 21 | src: {{templates_dir}}/riak.conf 22 | dest: /etc/riak/riak.conf 23 | - shell: rm -fr /var/lib/riak/kv_vnode/* 24 | - shell: rm -fr /var/lib/riak/ring/* 25 | 26 | - service: 27 | name: riak 28 | state: reloaded 29 | -------------------------------------------------------------------------------- /templates/glance_db.yaml: -------------------------------------------------------------------------------- 1 | id: glance_db_{{ idx }} 2 | 3 | resources: 4 | - id: glance_db_db_{{ idx }} 5 | from: resources/mariadb_db 6 | values: 7 | db_name: '{{ db_name }}' 8 | login_user: '{{ db_login_user }}' 9 | login_password: '{{ db_login_password }}' 10 | login_port: '{{ db_login_port }}' 11 | 12 | ip: '{{ ip }}' 13 | ssh_user: '{{ ssh_user }}' 14 | ssh_key: '{{ ssh_key }}' 15 | 16 | - id: glance_db_user 17 | from: resources/mariadb_user 18 | values: 19 | user_password: '{{ db_password }}' 20 | user_name: '{{ db_user }}' 21 | 22 | db_name: '{{ db_name }}' 23 | 24 | login_user: '{{ db_login_user }}' 25 | login_password: '{{ db_login_password }}' 26 | login_port: '{{ db_login_port }}' 27 | 28 | ip: '{{ ip }}' 29 | ssh_user: '{{ ssh_user }}' 30 | ssh_key: '{{ ssh_key }}' 31 | -------------------------------------------------------------------------------- /resources/riak_node/meta.yaml: -------------------------------------------------------------------------------- 1 | id: riak_node 2 | handler: ansible 3 | version: 1.0.0 4 | actions: 5 | commit: commit.yaml 6 | run: run.yaml 7 | join: join.yaml 8 | input: 9 | ip: 10 | schema: str! 11 | value: 12 | riak_self_name: 13 | schema: str! 14 | value: 15 | riak_hostname: 16 | schema: str! 17 | value: 18 | riak_name: 19 | schema: str! 20 | value: null 21 | computable: 22 | lang: jinja2 23 | type: full 24 | func: "{{riak_self_name}}@{{riak_hostname}}" 25 | riak_port_http: 26 | schema: int! 27 | value: 18098 28 | riak_port_pb: 29 | schema: int! 30 | value: 18087 31 | riak_port_solr: 32 | schema: int! 33 | value: 8985 34 | join_to: 35 | schema: str 36 | value: 37 | storage_backend: 38 | schema: str! 39 | value: bitcask 40 | -------------------------------------------------------------------------------- /resources/nova_generic_service_puppet/actions/update.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $service_title = $resource['input']['title'] 4 | $package_name = $resource['input']['package_name'] 5 | $service_name = $resource['input']['service_name'] 6 | $ensure_package = $resource['input']['ensure_package'] 7 | 8 | exec { 'post-nova_config': 9 | command => '/bin/echo "Nova config has changed"', 10 | } 11 | 12 | include nova::params 13 | 14 | package { 'nova-common': 15 | name => $nova::params::common_package_name, 16 | ensure => $ensure_package, 17 | } 18 | 19 | nova::generic_service { $service_title: 20 | enabled => true, 21 | manage_service => true, 22 | package_name => $package_name, 23 | service_name => $service_name, 24 | ensure_package => $ensure_package, 25 | } 26 | 27 | notify { "restart generic service": 28 | notify => Service["nova-${service_title}"], 29 | } 30 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | solar-celery: 2 | image: solarproject/solar-celery 3 | # path inside of the container should be exactly the same as outside 4 | # because solar uses absolute path to find resoruce actions files 5 | volumes: 6 | - /vagrant/.vagrant:/vagrant/.vagrant 7 | - /vagrant:/solar 8 | - /vagrant/templates:/vagrant/templates 9 | - /vagrant/resources:/vagrant/resources 10 | - /vagrant/library:/vagrant/library 11 | environment: 12 | - REDIS_HOST=redis 13 | - REDIS_PORT=6379 14 | - RIAK_HOST=riak 15 | - RIAK_PORT=8087 16 | # links are not used for configuration because we can rely on non-container 17 | # based datastores 18 | links: 19 | - riak 20 | - redis 21 | 22 | riak: 23 | image: tutum/riak 24 | ports: 25 | - 8087:8087 26 | - 8098:8098 27 | redis: 28 | image: tutum/redis 29 | ports: 30 | - 6379:6379 31 | environment: 32 | - REDIS_PASS=**None** 33 | -------------------------------------------------------------------------------- /resources/haproxy_config/actions/run.yaml: -------------------------------------------------------------------------------- 1 | # TODO 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | vars: 5 | config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} 6 | haproxy_ip: {{ ip }} 7 | haproxy_services: 8 | {% for single in config %} 9 | - name: {{ single['name'] }} 10 | listen_port: {{ single['listen_port'] }} 11 | protocol: {{ single['protocol'] }} 12 | servers: 13 | {% for backend in single['backends'] %} 14 | - name: {{ backend['server'] }}_{{ backend['port'] }} 15 | ip: {{ backend['server'] }} 16 | port: {{ backend['port'] }} 17 | {% endfor %} 18 | {% endfor %} 19 | tasks: 20 | - file: path={{ config_dir['src'] }}/ state=directory 21 | - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch 22 | - template: src={{templates_dir}}/haproxy.cfg dest=/etc/haproxy/haproxy.cfg 23 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 1.6 3 | skipsdist = True 4 | envlist = pep8,py27 5 | 6 | [testenv] 7 | usedevelop = True 8 | install_command = pip install -U {opts} {packages} 9 | setenv = VIRTUAL_ENV={envdir} 10 | deps = -r{toxinidir}/test-requirements.txt 11 | commands = ostestr 12 | 13 | [testenv:pep8] 14 | deps = hacking==0.10.2 15 | usedevelop = False 16 | commands = 17 | flake8 {posargs:solar} 18 | 19 | [testenv:venv] 20 | deps = -r{toxinidir}/requirements.txt 21 | sphinx 22 | commands = {posargs:} 23 | 24 | [testenv:cover] 25 | commands = 26 | coverage erase 27 | python setup.py testr --coverage \ 28 | --testr-args='--concurrency=1 {posargs}' 29 | 30 | [testenv:devenv] 31 | envdir = devenv 32 | usedevelop = True 33 | 34 | [flake8] 35 | ignore = H101,H236,E731 36 | exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,__init__.py,docs 37 | show-pep8 = True 38 | show-source = True 39 | count = True 40 | -------------------------------------------------------------------------------- /resources/haproxy_config/actions/update.yaml: -------------------------------------------------------------------------------- 1 | # TODO 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | vars: 5 | config_dir: {src: {{ config_dir['src'] }}, dst: {{ config_dir['dst'] }}} 6 | haproxy_ip: {{ ip }} 7 | haproxy_services: 8 | {% for single in config %} 9 | - name: {{ single['name'] }} 10 | listen_port: {{ single['listen_port'] }} 11 | protocol: {{ single['protocol'] }} 12 | servers: 13 | {% for backend in single['backends'] %} 14 | - name: {{ backend['server'] }}_{{ backend['port'] }} 15 | ip: {{ backend['server'] }} 16 | port: {{ backend['port'] }} 17 | {% endfor %} 18 | {% endfor %} 19 | tasks: 20 | - file: path={{ config_dir['src'] }}/ state=directory 21 | - file: path={{ config_dir['src'] }}/haproxy.cfg state=touch 22 | - template: src={{templates_dir}}/haproxy.cfg dest=/etc/haproxy/haproxy.cfg 23 | -------------------------------------------------------------------------------- /templates/keystone_base.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_base 2 | 3 | resources: 4 | - id: keystone_db 5 | from: resources/mariadb_db 6 | values: 7 | db_name: '{{db_name}}' 8 | db_host: '{{db_host}}' 9 | login_user: '{{login_user}}' 10 | login_password: '{{login_password}}' 11 | login_port: '{{login_port}}' 12 | ip: '{{ip}}' 13 | ssh_user: '{{ssh_user}}' 14 | ssh_key: '{{ssh_key}}' 15 | 16 | - id: keystone_db_user 17 | from: resources/mariadb_user 18 | values: 19 | user_password: '{{user_password}}' 20 | user_name: '{{user_name}}' 21 | db_name: 'keystone_db::db_name' 22 | db_host: '{{db_host}}' 23 | login_user: 'keystone_db::login_user' 24 | login_password: 'keystone_db::login_password' 25 | login_port: 'keystone_db::login_port' 26 | ip: 'keystone_db::ip' 27 | ssh_user: 'keystone_db::ssh_user' 28 | ssh_key: 'keystone_db::ssh_key' 29 | -------------------------------------------------------------------------------- /resources/data_container/actions/run.yaml: -------------------------------------------------------------------------------- 1 | - hosts: [{{host}}] 2 | sudo: yes 3 | tasks: 4 | - docker: 5 | name: {{ resource_name }} 6 | image: {{ image }} 7 | state: running 8 | net: host 9 | {% if ports.value %} 10 | ports: 11 | {% for port in ports.value %} 12 | - {{ port['value'] }}:{{ port['value'] }} 13 | {% endfor %} 14 | {% endif %} 15 | {% if host_binds.value %} 16 | volumes: 17 | # TODO: host_binds might need more work 18 | # Currently it's not that trivial to pass custom src: dst here 19 | # (when a config variable is passed here from other resource) 20 | # so we mount it to the same directory as on host 21 | {% for bind in host_binds.value %} 22 | - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} 23 | {% endfor %} 24 | {% endif %} 25 | -------------------------------------------------------------------------------- /templates/nodes.yaml: -------------------------------------------------------------------------------- 1 | id: simple_riak_with_transports 2 | resources: 3 | {% for i in range(count|int) %} 4 | {% set j = i +1 %} 5 | - id: ssh_transport{{j}} 6 | from: resources/transport_ssh 7 | values: 8 | ssh_user: 'vagrant' 9 | ssh_key: '/vagrant/.vagrant/machines/solar-dev{{j}}/virtualbox/private_key' 10 | - id: transports{{j}} 11 | from: resources/transports 12 | values: 13 | transports:key: ssh_transport{{j}}::ssh_key 14 | transports:user: ssh_transport{{j}}::ssh_user 15 | transports:port: ssh_transport{{j}}::ssh_port 16 | transports:name: ssh_transport{{j}}::name 17 | - id: node{{j}} 18 | from: resources/ro_node 19 | values: 20 | name: node{{j}} 21 | ip: '10.0.0.{{i + 3}}' 22 | transports_id: transports{{j}}::transports_id 23 | - id: hosts_file{{j}} 24 | from: resources/hosts_file 25 | location: node{{j}} 26 | tags: ['location=node{{j}}'] 27 | {% endfor %} 28 | -------------------------------------------------------------------------------- /examples/compiled-resources/README.md: -------------------------------------------------------------------------------- 1 | # Example script that uses the "compiled resources" functionality 2 | 3 | To run this code, first compile the resources with 4 | 5 | ```bash 6 | solar resource compile_all 7 | ``` 8 | 9 | Please note that you don't have to anymore write 10 | 11 | ```python 12 | node1 = resource.create('node1', 'resources/ro_node/', {'ip': '10.0.0.3', 'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key', 'ssh_user': 'vagrant'}) 13 | ``` 14 | 15 | but instead you can do: 16 | ```python 17 | import resources_compiled 18 | 19 | node1 = resources_compiled.RoNodeResource('node1', None, {}) 20 | node1.ip = '10.0.0.3' 21 | node1.ssh_key = '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key' 22 | node1.ssh_user = 'vagrant' 23 | ``` 24 | 25 | Resources aren't anymore a collection of dicts with inputs that are hard to 26 | trace, but they are full Python classes for which you can use your IDE's 27 | autocompletion, etc. functionality. 28 | -------------------------------------------------------------------------------- /vagrant-settings.yaml_defaults: -------------------------------------------------------------------------------- 1 | # copy it to vagrant-settings.yaml then Vagrantfile 2 | # will use values from this file 3 | 4 | slaves_count: 2 5 | slaves_ram: 1024 6 | master_image: solar-project/solar-master 7 | slaves_image: solar-project/solar-master 8 | master_ram: 1024 9 | master_cpus: 1 10 | master_ips: 11 | - 10.0.0.2 12 | - 10.1.0.2 13 | - 10.2.0.2 14 | slaves_cpus: 1 15 | slaves_ips: 16 | - 10.0.0. 17 | - 10.1.0. 18 | - 10.2.0. 19 | 20 | # if you have virtualbox 5.x then enable it 21 | # if will speedup things a lot 22 | # paravirtprovider: kvm 23 | 24 | # By default Virtualbox shared folder is used which is very slow 25 | # Uncomment following option to change it. 26 | # Possible options are: rsync, nfs 27 | # sync_type: nfs 28 | 29 | # Use vagrant image in order to perform provisioning 30 | preprovisioned: true 31 | 32 | # Use pxe bootstrap in order to bootstrap nodes 33 | # it should be used in order to provision nodes 34 | # by solar 35 | # preprovisioned: false 36 | -------------------------------------------------------------------------------- /resources/glance_api_service/meta.yaml: -------------------------------------------------------------------------------- 1 | id: container 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | image: 9 | schema: str! 10 | value: 11 | ports: 12 | schema: [{value: [{value: int}]}] 13 | value: [] 14 | host_binds: 15 | schema: [{value: {src: str, dst: str, mode: str}}] 16 | value: [] 17 | volume_binds: 18 | schema: [{src: str, dst: str, mode: str}] 19 | value: [] 20 | # ssh_user: 21 | # schema: str! 22 | # value: [] 23 | # ssh_key: 24 | # schema: str! 25 | # value: [] 26 | 27 | db_password: 28 | schema: str! 29 | value: 30 | keystone_admin_token: 31 | schema: str! 32 | value: 33 | keystone_password: 34 | schema: str! 35 | value: 36 | keystone_host: 37 | schema: str! 38 | value: 39 | keystone_port: 40 | schema: int! 41 | value: 42 | 43 | tags: [resource/container] 44 | -------------------------------------------------------------------------------- /doc/libvirt.md: -------------------------------------------------------------------------------- 1 | # Using Vagrant with livbirt 2 | 3 | First install libvirt plugin 4 | 5 | ```bash 6 | vagrant plugin install vagrant-libvirt 7 | ``` 8 | 9 | If you do not have already vagrant box for VirtualBox, install it: 10 | 11 | ```bash 12 | vagrant box add solar-project/solar-master 13 | ``` 14 | 15 | To use this box in libvirt you need to convert it using `vagrant-mutate` plugin: 16 | 17 | ```bash 18 | vagrant plugin install vagrant-mutate 19 | vagrant mutate solar-project/solar-master libvirt 20 | ``` 21 | 22 | You can also change `sync_type` in your custom `vagrant-settings.yaml` file 23 | copied from the `vagrant-settings.yaml_defaults`. 24 | 25 | # Use solar 26 | 27 | ``` bash 28 | vagrant up --provider libvirt 29 | ``` 30 | 31 | (TODO automation required) After that, copy (or create, if missing) the ssh 32 | private keys for nodes to the `.vagrant/machines/solar-dev*/virtualbox` dirs. 33 | And make sure the public keys are listed in the `authorized_keys` files for the 34 | `solar-dev*` nodes. 35 | -------------------------------------------------------------------------------- /bootstrap/playbooks/tasks/docker.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - shell: docker --version 4 | ignore_errors: true 5 | register: docker_version 6 | # This script is completely broken, it has so many sleeps... 7 | - shell: curl -sSL https://get.docker.com/ | sudo sh 8 | when: docker_version | failed 9 | 10 | # Here's a raw paste of what the above script really does for Ubuntu 11 | #- shell: apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 12 | #- shell: mkdir -p /etc/apt/sources.list.d 13 | #- shell: echo deb https://apt.dockerproject.org/repo ubuntu-trusty main > /etc/apt/sources.list.d/docker.list 14 | # args: 15 | # creates: /etc/apt/sources.list.d/docker.list 16 | #- shell: apt-get update 17 | #- shell: apt-get install -y -q docker-engine 18 | 19 | # install docker compose 20 | - shell: curl -L https://github.com/docker/compose/releases/download/1.5.1/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose 21 | - shell: chmod +x /usr/local/bin/docker-compose 22 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $package_ensure = $resource['input']['package_ensure'] 4 | $use_iscsi_backend = $resource['input']['use_iscsi_backend'] 5 | 6 | $iscsi_ip_address = $resource['input']['iscsi_ip_address'] 7 | $volume_driver = $resource['input']['volume_driver'] 8 | $volume_group = $resource['input']['volume_group'] 9 | $iscsi_helper = $resource['input']['iscsi_helper'] 10 | 11 | include cinder::params 12 | 13 | package { 'cinder': 14 | ensure => $package_ensure, 15 | name => $::cinder::params::package_name, 16 | } -> 17 | 18 | class {'cinder::volume': 19 | package_ensure => $package_ensure, 20 | enabled => true, 21 | manage_service => true, 22 | } 23 | 24 | if $use_iscsi_backend { 25 | class {'cinder::volume::iscsi': 26 | iscsi_ip_address => $iscsi_ip_address, 27 | volume_driver => $volume_driver, 28 | volume_group => $volume_group, 29 | iscsi_helper => $iscsi_helper, 30 | } 31 | } -------------------------------------------------------------------------------- /solar/core/handlers/python.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright 2015 Mirantis, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | from fabric import api as fabric_api 17 | 18 | from solar.core.handlers.base import TempFileHandler 19 | 20 | 21 | class Python(TempFileHandler): 22 | 23 | def action(self, resource, action_name): 24 | action_file = self._compile_action_file(resource, action_name) 25 | fabric_api.local('python {}'.format(action_file)) 26 | -------------------------------------------------------------------------------- /resources/cinder_glance_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $glance_api_version = $resource['input']['glance_api_version'] 4 | $glance_num_retries = $resource['input']['glance_num_retries'] 5 | $glance_api_insecure = $resource['input']['glance_api_insecure'] 6 | $glance_api_ssl_compression = $resource['input']['glance_api_ssl_compression'] 7 | $glance_request_timeout = $resource['input']['glance_request_timeout'] 8 | $glance_api_servers_host = $resource['input']['glance_api_servers_host'] 9 | $glance_api_servers_port = $resource['input']['glance_api_servers_port'] 10 | 11 | class {'cinder::glance': 12 | glance_api_servers => "${glance_api_servers_host}:${glance_api_servers_port}", 13 | glance_api_version => $glance_api_version, 14 | glance_num_retries => $glance_num_retries, 15 | glance_api_insecure => $glance_api_insecure, 16 | glance_api_ssl_compression => $glance_api_ssl_compression, 17 | glance_request_timeout => $glance_request_timeout, 18 | } 19 | -------------------------------------------------------------------------------- /resources/cinder_volume_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: cinder_volume_puppet 2 | handler: puppet 3 | puppet_module: cinder 4 | version: 1.0.0 5 | input: 6 | package_ensure: 7 | schema: str 8 | value: 'present' 9 | iscsi_ip_address: 10 | schema: str 11 | value: '127.0.0.1' 12 | volume_driver: 13 | schema: str 14 | value: 'cinder.volume.drivers.lvm.LVMISCSIDriver' 15 | volume_group: 16 | schema: str 17 | value: 'cinder-volumes' 18 | iscsi_helper: 19 | schema: str 20 | value: 'tgtadm' 21 | 22 | use_iscsi_backend: 23 | schema: bool 24 | value: true 25 | 26 | git: 27 | schema: {repository: str!, branch: str!} 28 | value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} 29 | 30 | ip: 31 | schema: str! 32 | value: 33 | # ssh_key: 34 | # schema: str! 35 | # value: 36 | # ssh_user: 37 | # schema: str! 38 | # value: 39 | 40 | tags: [resource/cinder_volume_service, resources/cinder_volume, resources/cinder] 41 | -------------------------------------------------------------------------------- /solar/orchestration/runner.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from celery import Celery 16 | 17 | from solar.config import C 18 | 19 | _url = 'redis://{}:{}/1'.format(C.redis.host, C.redis.port) 20 | 21 | app = Celery( 22 | include=['solar.system_log.tasks', 'solar.orchestration.tasks'], 23 | backend=_url, 24 | broker=_url) 25 | app.conf.update(CELERY_ACCEPT_CONTENT=['json']) 26 | app.conf.update(CELERY_TASK_SERIALIZER='json') 27 | -------------------------------------------------------------------------------- /examples/hosts_file/hosts.py: -------------------------------------------------------------------------------- 1 | import click 2 | import sys 3 | import time 4 | 5 | from solar.core import signals 6 | from solar.core.resource import virtual_resource as vr 7 | from solar.dblayer.model import ModelMeta 8 | 9 | 10 | def run(): 11 | ModelMeta.remove_all() 12 | 13 | resources = vr.create('nodes', 'templates/nodes_with_transports.yaml', {'count': 2}) 14 | nodes = [x for x in resources if x.name.startswith('node')] 15 | node1, node2 = nodes 16 | 17 | hosts1 = vr.create('hosts_file1', 'resources/hosts_file', {})[0] 18 | hosts2 = vr.create('hosts_file2', 'resources/hosts_file', {})[0] 19 | node1.connect(hosts1, { 20 | 'name': 'hosts:name', 21 | 'ip': 'hosts:ip', 22 | }) 23 | 24 | node2.connect(hosts1, { 25 | 'name': 'hosts:name', 26 | 'ip': 'hosts:ip', 27 | }) 28 | 29 | node1.connect(hosts2, { 30 | 'name': 'hosts:name', 31 | 'ip': 'hosts:ip', 32 | }) 33 | 34 | node2.connect(hosts2, { 35 | 'name': 'hosts:name', 36 | 'ip': 'hosts:ip', 37 | }) 38 | 39 | 40 | run() 41 | -------------------------------------------------------------------------------- /resources/keystone_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_puppet 2 | handler: puppet 3 | puppet_module: keystone 4 | version: 1.0.0 5 | input: 6 | admin_token: 7 | schema: str! 8 | value: admin_token 9 | db_user: 10 | schema: str! 11 | value: 12 | db_password: 13 | schema: str! 14 | value: 15 | db_name: 16 | schema: str! 17 | value: 18 | db_host: 19 | schema: str! 20 | value: 21 | db_port: 22 | schema: int! 23 | value: 24 | 25 | admin_port: 26 | schema: int! 27 | value: 35357 28 | port: 29 | schema: int! 30 | value: 5000 31 | 32 | module: 33 | schema: {name: str!, type: str, url: str, ref: str} 34 | value: {name: 'keystone', type: 'git', url: 'https://github.com/openstack/puppet-keystone', ref: '5.1.0'} 35 | 36 | ip: 37 | schema: str! 38 | value: 39 | # ssh_key: 40 | # schema: str! 41 | # value: 42 | # ssh_user: 43 | # schema: str! 44 | # value: 45 | 46 | tags: [resource/keystone_service, resources/keystone] 47 | -------------------------------------------------------------------------------- /resources/keystone_puppet/actions/run.pp: -------------------------------------------------------------------------------- 1 | $resource = hiera($::resource_name) 2 | 3 | $ip = $resource['input']['ip'] 4 | $admin_token = $resource['input']['admin_token'] 5 | $db_user = $resource['input']['db_user'] 6 | $db_host = $resource['input']['db_host'] 7 | $db_password = $resource['input']['db_password'] 8 | $db_name = $resource['input']['db_name'] 9 | $db_port = $resource['input']['db_port'] 10 | $admin_port = $resource['input']['admin_port'] 11 | $port = $resource['input']['port'] 12 | 13 | class {'keystone': 14 | package_ensure => 'present', 15 | verbose => true, 16 | catalog_type => 'sql', 17 | admin_token => $admin_token, 18 | database_connection => "mysql://$db_user:$db_password@$db_host:$db_port/$db_name", 19 | public_port => "$port", 20 | admin_port => "$admin_port", 21 | token_driver => 'keystone.token.persistence.backends.sql.Token' 22 | } 23 | 24 | #file { '/etc/keystone/keystone-exports': 25 | # owner => 'root', 26 | # group => 'root', 27 | # content => template('keystone/exports.erb') 28 | #} 29 | -------------------------------------------------------------------------------- /solar/errors.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | 16 | class SolarError(Exception): 17 | pass 18 | 19 | 20 | class CannotFindID(SolarError): 21 | pass 22 | 23 | 24 | class CannotFindExtension(SolarError): 25 | pass 26 | 27 | 28 | class ValidationError(SolarError): 29 | pass 30 | 31 | 32 | class LexError(SolarError): 33 | pass 34 | 35 | 36 | class ParseError(SolarError): 37 | pass 38 | 39 | 40 | class ExecutionTimeout(SolarError): 41 | pass 42 | -------------------------------------------------------------------------------- /resources/glance_api_service/actions/run.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - docker: 6 | command: /bin/bash -c "glance-manage db_sync && /usr/bin/glance-api" 7 | #command: /usr/bin/glance-api 8 | name: {{ resource_name }} 9 | image: {{ image }} 10 | state: running 11 | expose: 12 | - 9393 13 | ports: 14 | - {{ ports.value[0]['value'][0]['value'] }}:9393 15 | {% if host_binds.value %} 16 | volumes: 17 | # TODO: host_binds might need more work 18 | # Currently it's not that trivial to pass custom src: dst here 19 | # (when a config variable is passed here from other resource) 20 | # so we mount it to the same directory as on host 21 | {% for bind in host_binds.value %} 22 | - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} 23 | {% endfor %} 24 | {% endif %} 25 | 26 | - name: wait for glance api 27 | wait_for: host={{ ip }} port={{ ports.value[0]['value']['value'] }} timeout=20 28 | -------------------------------------------------------------------------------- /resources/cinder_glance_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | id: cinder_glance_puppet 2 | handler: puppet 3 | puppet_module: cinder 4 | version: 1.0.0 5 | input: 6 | glance_api_version: 7 | schema: int 8 | value: 2 9 | glance_num_retries: 10 | schema: int 11 | value: 0 12 | glance_api_insecure: 13 | schema: bool 14 | value: false 15 | glance_api_ssl_compression: 16 | schema: bool 17 | value: false 18 | glance_request_timeout: 19 | schema: str 20 | value: 21 | 22 | git: 23 | schema: {repository: str!, branch: str!} 24 | value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'} 25 | 26 | ip: 27 | schema: str! 28 | value: 29 | # ssh_key: 30 | # schema: str! 31 | # value: 32 | # ssh_user: 33 | # schema: str! 34 | # value: 35 | 36 | glance_api_servers_port: 37 | schema: int 38 | value: 9292 39 | glance_api_servers_host: 40 | schema: 'str' 41 | value: 'localhost' 42 | 43 | tags: [resource/cinder_glance_service, resources/cinder_glance, resources/cinder] 44 | -------------------------------------------------------------------------------- /examples/riak/README.md: -------------------------------------------------------------------------------- 1 | Example of 3 node riak cluster. 2 | 3 | At first run: 4 | 5 | `python examples/riak/riaks.py deploy` 6 | 7 | It will prepare riak nodes etc. 8 | 9 | Then you can continue with standard solar things: 10 | 11 | ``` 12 | solar changes stage -d 13 | solar changes process 14 | solar orch run-once last 15 | watch -n 1 solar orch report last 16 | ``` 17 | 18 | Wait until all actions have state `SUCCESS` 19 | After that you can add HAProxy on each node: 20 | 21 | `python examples/riak/riaks.py add_haproxies` 22 | 23 | Then again normal solar stuff 24 | 25 | ``` 26 | solar changes stage -d 27 | solar changes process 28 | solar orch run-once last 29 | watch -n 1 solar orch report last 30 | ``` 31 | 32 | 33 | Wait until all actions have state `SUCCESS` 34 | After that you have basic 3 node riak cluster running. 35 | 36 | You can also modify riak http port by: 37 | 38 | `solar resource update riak_service1 riak_port_http=18100` 39 | 40 | And then again standard stuff: 41 | 42 | ``` 43 | solar changes stage -d 44 | solar changes process 45 | solar orch run-once last 46 | watch -n 1 solar orch report last 47 | ``` 48 | -------------------------------------------------------------------------------- /doc/removal.md: -------------------------------------------------------------------------------- 1 | # Problems to solve with removal operation 2 | 3 | 1. It is tricky to figure out what to do with data that will be left when 4 | you are removing resource that is a parent for other resources. 5 | 6 | The basic example is a node resource. 7 | If hosts_file1 subscribed to node properties, and we will just remove 8 | node - hosts_file1 will be left with corrupted data. 9 | Validation is not a solution, because we can not expect user to remove 10 | each resource one-by-one. 11 | 12 | log task=hosts_file1.run uid=c1545041-a5c5-400e-8c46-ad52d871e6c3 13 | ++ ip: None 14 | ++ ssh_user: None 15 | ++ hosts: [{u'ip': None, u'name': u'riak_server1.solar'}] 16 | ++ ssh_key: None 17 | 18 | Proposed solution: 19 | 20 | Add `solar res remove node1 -r` where *r* stands for recursive. 21 | During this operation we will find all childs of specified resource, and 22 | stage them for removal as well. 23 | 24 | 2. If so we need to be able to determine what to do with child resource 25 | on removal. 26 | Basically this seems like another type of event: 27 | hosts1.remove -> success -> node1.remove 28 | And 29 | hosts2.update -> success -> node2.remove 30 | -------------------------------------------------------------------------------- /resources/glance_config/templates/glance-api.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | default_store = file 3 | bind_host = 0.0.0.0 4 | bind_port = {{ api_port }} 5 | log_file = /var/log/glance/api.log 6 | backlog = 4096 7 | registry_host = {{ ip }} 8 | registry_port = {{ registry_port }} 9 | registry_client_protocol = http 10 | 11 | delayed_delete = False 12 | scrub_time = 43200 13 | scrubber_datadir = /var/lib/glance/scrubber 14 | image_cache_dir = /var/lib/glance/image-cache/ 15 | 16 | [database] 17 | connection = mysql://{{ mysql_user }}:{{ mysql_password }}@{{ mysql_ip }}/{{ mysql_db }} 18 | backend = mysql 19 | 20 | [keystone_authtoken] 21 | auth_uri = http://{{ keystone_ip }}:{{ keystone_port }}/v2.0 22 | identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} 23 | admin_tenant_name = {{ keystone_admin_tenant }} 24 | admin_user = {{ keystone_admin_user }} 25 | admin_password = {{ keystone_admin_password }} 26 | revocation_cache_time = 10 27 | 28 | [paste_deploy] 29 | flavor=keystone+cachemanagement 30 | 31 | [glance_store] 32 | filesystem_store_datadir = /var/lib/glance/images/ 33 | sheepdog_store_address = localhost 34 | sheepdog_store_port = 7000 35 | sheepdog_store_chunk_size = 64 36 | -------------------------------------------------------------------------------- /solar/system_log/tasks.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from solar.orchestration.runner import app 16 | from solar.system_log.operations import move_to_commited 17 | from solar.system_log.operations import set_error 18 | 19 | __all__ = ['error_logitem', 'commit_logitem'] 20 | 21 | 22 | @app.task(name='error_logitem') 23 | def error_logitem(task_uuid): 24 | return set_error(task_uuid.rsplit(':', 1)[-1]) 25 | 26 | 27 | @app.task(name='commit_logitem') 28 | def commit_logitem(task_uuid): 29 | return move_to_commited(task_uuid.rsplit(':', 1)[-1]) 30 | -------------------------------------------------------------------------------- /bootstrap/playbooks/files/minimize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | echo "==> Installed packages before cleanup" 4 | dpkg --get-selections | grep -v deinstall 5 | 6 | # Remove some packages to get a minimal install 7 | echo "==> Removing all linux kernels except the currrent one" 8 | dpkg --list | awk '{ print $2 }' | grep 'linux-image-3.*-generic' | grep -v $(uname -r) | xargs apt-get -y purge 9 | echo "==> Removing linux source" 10 | dpkg --list | awk '{ print $2 }' | grep linux-source | xargs apt-get -y purge 11 | echo "==> Removing documentation" 12 | dpkg --list | awk '{ print $2 }' | grep -- '-doc$' | xargs apt-get -y purge 13 | echo "==> Removing obsolete networking components" 14 | apt-get -y purge ppp pppconfig pppoeconf 15 | echo "==> Removing other oddities" 16 | apt-get -y purge popularity-contest installation-report landscape-common wireless-tools wpasupplicant ubuntu-serverguide 17 | 18 | # Clean up the apt cache 19 | apt-get -y autoremove --purge 20 | apt-get -y autoclean 21 | apt-get -y clean 22 | 23 | echo "==> Removing man pages" 24 | rm -rf /usr/share/man/* 25 | echo "==> Removing anything in /usr/src" 26 | rm -rf /usr/src/* 27 | echo "==> Removing any docs" 28 | rm -rf /usr/share/doc/* 29 | -------------------------------------------------------------------------------- /resources/docker_container/actions/run.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: running 9 | net: host 10 | {% if ports %} 11 | ports: 12 | {% for port in ports %} 13 | - {{ port }}:{{ port }} 14 | {% endfor %} 15 | expose: 16 | {% for port in ports %} 17 | - {{ port }} 18 | {% endfor %} 19 | {% endif %} 20 | 21 | {% if host_binds.value %} 22 | volumes: 23 | # TODO: host_binds might need more work 24 | # Currently it's not that trivial to pass custom src: dst here 25 | # (when a config variable is passed here from other resource) 26 | # so we mount it to the same directory as on host 27 | {% for bind in host_binds.value %} 28 | - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} 29 | {% endfor %} 30 | {% endif %} 31 | 32 | {% if env %} 33 | env: 34 | {% for key, value in env.iteritems() %} 35 | {{ key }}: {{ value }} 36 | {% endfor %} 37 | {% endif %} 38 | -------------------------------------------------------------------------------- /resources/docker_container/actions/update.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - docker: 6 | name: {{ resource_name }} 7 | image: {{ image }} 8 | state: reloaded 9 | net: host 10 | {% if ports %} 11 | ports: 12 | {% for port in ports %} 13 | - {{ port }}:{{ port }} 14 | {% endfor %} 15 | expose: 16 | {% for port in ports %} 17 | - {{ port }} 18 | {% endfor %} 19 | {% endif %} 20 | 21 | {% if host_binds.value %} 22 | volumes: 23 | # TODO: host_binds might need more work 24 | # Currently it's not that trivial to pass custom src: dst here 25 | # (when a config variable is passed here from other resource) 26 | # so we mount it to the same directory as on host 27 | {% for bind in host_binds.value %} 28 | - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} 29 | {% endfor %} 30 | {% endif %} 31 | 32 | {% if env %} 33 | env: 34 | {% for key, value in env.iteritems() %} 35 | {{ key }}: {{ value }} 36 | {% endfor %} 37 | {% endif %} 38 | -------------------------------------------------------------------------------- /solar/dblayer/standalone_session_wrapper.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | """ 16 | Starts single seession, and ends it with `atexit` 17 | can be used from cli / examples 18 | shouldn't be used from long running processes (workers etc) 19 | 20 | """ 21 | 22 | 23 | def create_all(): 24 | 25 | import sys 26 | if sys.executable.startswith(('python', )): 27 | # auto add session to only standalone python runs 28 | return 29 | 30 | from solar.dblayer.model import ModelMeta 31 | 32 | import atexit 33 | 34 | ModelMeta.session_start() 35 | 36 | atexit.register(ModelMeta.session_end) 37 | -------------------------------------------------------------------------------- /resources/glance_registry_service/actions/run.yaml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: [{{host}}] 3 | sudo: yes 4 | tasks: 5 | - docker: 6 | #command: /bin/bash -c "glance-manage db_sync && /usr/bin/glance-registry" 7 | command: /usr/bin/glance-registry 8 | name: {{ resource_name }} 9 | image: {{ image }} 10 | state: running 11 | net: host 12 | {% if ports.value %} 13 | ports: 14 | {% for port in ports.value %} 15 | {% for p in port['value'] %} 16 | - {{ p['value'] }}:{{ p['value'] }} 17 | {% endfor %} 18 | {% endfor %} 19 | {% endif %} 20 | {% if host_binds.value %} 21 | volumes: 22 | # TODO: host_binds might need more work 23 | # Currently it's not that trivial to pass custom src: dst here 24 | # (when a config variable is passed here from other resource) 25 | # so we mount it to the same directory as on host 26 | {% for bind in host_binds.value %} 27 | - {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }} 28 | {% endfor %} 29 | {% endif %} 30 | 31 | - name: wait for glance registry 32 | wait_for: host={{ ip }} port=9191 timeout=20 33 | -------------------------------------------------------------------------------- /solar/computable_inputs/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from enum import Enum 16 | import os 17 | 18 | ComputablePassedTypes = Enum('ComputablePassedTypes', 'values full') 19 | 20 | HELPERS_PATH = os.path.normpath( 21 | os.path.join(os.path.realpath(__file__), '..', 'helpers')) 22 | 23 | 24 | class ComputableInputProcessor(object): 25 | 26 | def __init__(self): 27 | pass 28 | 29 | def process(self, resource_name, computable_type, funct, data): 30 | if funct is None or funct == 'noop': 31 | return data 32 | return self.run(resource_name, computable_type, funct, data) 33 | -------------------------------------------------------------------------------- /solar/test/test_celery_executor.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import networkx as nx 16 | 17 | from mock import patch 18 | from pytest import fixture 19 | from solar.orchestration import executor 20 | 21 | 22 | @fixture 23 | def dg(): 24 | ex = nx.DiGraph() 25 | ex.add_node('t1', args=['t'], status='PENDING', type='echo') 26 | ex.graph['uid'] = 'some_string' 27 | return ex 28 | 29 | 30 | @patch.object(executor, 'app') 31 | def test_celery_executor(mapp, dg): 32 | """Just check that it doesnt fail for now.""" 33 | assert executor.celery_executor(dg, ['t1']) 34 | assert dg.node['t1']['status'] == 'INPROGRESS' 35 | -------------------------------------------------------------------------------- /run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2015 Mirantis, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | set -e 17 | 18 | 19 | VENV=x-venv 20 | WORKSPACE=${WORKSPACE:-"/vagrant"} 21 | CONFIG_FILE=$WORKSPACE/jenkins-config.yaml 22 | 23 | # Setup a proper path, I call my virtualenv dir "$VENV" and 24 | # I've got the virtualenv command installed in /usr/local/bin 25 | PATH=$WORKSPACE/venv/bin:/usr/local/bin:$PATH 26 | if [ ! -d "$VENV" ]; then 27 | virtualenv -p python2 $VENV 28 | fi 29 | 30 | . $VENV/bin/activate 31 | 32 | pip install pip-accel 33 | pip-accel install -r test-requirements.txt 34 | 35 | 36 | SOLAR_CONFIG=../.config CONFIG_FILE=$CONFIG_FILE py.test --cov=solar -s solar 37 | -------------------------------------------------------------------------------- /resources/glance_config/meta.yaml: -------------------------------------------------------------------------------- 1 | id: glance_config 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | ip: 6 | schema: str! 7 | value: 8 | # ssh_user: 9 | # schema: str! 10 | # value: [] 11 | # ssh_key: 12 | # schema: str! 13 | # value: [] 14 | 15 | config_dir: 16 | schema: {src: str!, dst: str!, mode: str} 17 | value: {src: /etc/solar/glance, dst: /etc/glance, mode: rw} 18 | api_port: 19 | schema: int! 20 | value: 9292 21 | registry_port: 22 | schema: int! 23 | value: 9191 24 | keystone_ip: 25 | schema: str! 26 | value: 27 | keystone_port: 28 | schema: int! 29 | value: 5000 30 | keystone_admin_user: 31 | schema: str! 32 | value: glance_admin 33 | keystone_admin_password: 34 | schema: str! 35 | value: password1234 36 | keystone_admin_port: 37 | schema: int! 38 | value: 39 | keystone_admin_tenant: 40 | schema: str! 41 | value: service_admins 42 | mysql_ip: 43 | schema: str! 44 | value: 45 | mysql_db: 46 | schema: str! 47 | value: 48 | mysql_user: 49 | schema: str! 50 | value: 51 | mysql_password: 52 | schema: str! 53 | value: 54 | -------------------------------------------------------------------------------- /solar/core/handlers/naive_sync.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright 2015 Mirantis, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | from solar.core.handlers.base import BaseHandler 17 | 18 | 19 | class NaiveSync(BaseHandler): 20 | 21 | def action(self, resource, action_name): 22 | # it is inconsistent with handlers because action_name 23 | # is totally useless piece of info here 24 | 25 | args = resource.args 26 | # this src seems not intuitive to me, wo context it is impossible 27 | # to understand where src comes from 28 | for item in args['sources']: 29 | self.transport_sync.copy(resource, item['src'], item['dst']) 30 | self.transport_sync.sync_all() 31 | -------------------------------------------------------------------------------- /bootstrap/vagrant_plugins/noop.rb: -------------------------------------------------------------------------------- 1 | # Noop Vagrant plugins are used in case if Vagrant does not 2 | # have an access to VMs (e.g. there is no information about ip), 3 | # so it just runs VMs and does not try to perform additional 4 | # actions using SSH. 5 | 6 | class NoopCommunicator < Vagrant.plugin("2", :communicator) 7 | 8 | def ready? 9 | true 10 | end 11 | 12 | def wait_for_ready(timeout) 13 | true 14 | end 15 | 16 | end 17 | 18 | 19 | class NoopGuest < Vagrant.plugin("2", :guest) 20 | 21 | def self.change_host_name(*args) 22 | true 23 | end 24 | 25 | def self.configure_networks(*args) 26 | true 27 | end 28 | 29 | def self.mount_virtualbox_shared_folder(*args) 30 | true 31 | end 32 | 33 | end 34 | 35 | 36 | class NoopCommunicatorPlugin < Vagrant.plugin("2") 37 | 38 | name 'Noop communicator/guest' 39 | description 'Noop communicator/guest' 40 | 41 | communicator('noop') do 42 | NoopCommunicator 43 | end 44 | 45 | guest 'noop_guest' do 46 | NoopGuest 47 | end 48 | 49 | guest_capability 'noop_guest', 'change_host_name' do 50 | NoopGuest 51 | end 52 | 53 | guest_capability 'noop_guest', 'configure_networks' do 54 | NoopGuest 55 | end 56 | 57 | guest_capability 'noop_guest', 'mount_virtualbox_shared_folder' do 58 | NoopGuest 59 | end 60 | 61 | end 62 | -------------------------------------------------------------------------------- /resources/lxc_container/meta.yaml: -------------------------------------------------------------------------------- 1 | id: lxc_container 2 | handler: ansible_playbook 3 | version: 1.0.0 4 | actions: 5 | input: 6 | ip: 7 | schema: str! 8 | value: 9 | # ssh_key: 10 | # schema: str! 11 | # value: 12 | # ssh_user: 13 | # schema: str! 14 | # value: 15 | ansible_ssh_host: 16 | schema: str! 17 | value: 18 | user: 19 | schema: str! 20 | value: 21 | user_key: 22 | schema: str! 23 | value: 24 | mgmt_ip: 25 | schema: str! 26 | value: 27 | physical_host: 28 | schema: str! 29 | value: 30 | container_address: 31 | schema: str! 32 | value: 33 | container_name: 34 | schema: str! 35 | value: 36 | inventory_hostname: 37 | schema: str! 38 | value: 39 | container_networks: 40 | schema: {} 41 | value: 42 | properties: 43 | schema: {} 44 | value: 45 | pub_key: 46 | schema: str! 47 | value: 48 | requires: 49 | schema: str 50 | value: 51 | roles: 52 | schema: [{value: str}] 53 | value: 54 | - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_container_create 55 | - https://github.com/stackforge/os-ansible-deployment/trunk/playbooks/roles/lxc_container_destroy 56 | -------------------------------------------------------------------------------- /bootstrap/README.md: -------------------------------------------------------------------------------- 1 | # Solar image building 2 | 3 | To build for a Virtualbox, install Packer (https://www.packer.io/): 4 | ``` 5 | cp vagrant-settings.yaml_defaults vagrant-settings.yaml 6 | sed -i 's/master_image:.*$/master_image: solar-master/g' ./vagrant-settings.yaml 7 | sed -i 's/slaves_image:.*$/slaves_image: solar-master/g' ./vagrant-settings.yaml 8 | cd bootstrap 9 | packer build -only=virtualbox-iso solar-master.json 10 | mv solar-master-virtualbox.box ../solar-master.box 11 | cd .. 12 | vagrant box add solar-master solar-master.box --provider virtualbox 13 | vagrant up --provider virtualbox 14 | ``` 15 | 16 | To build for a libvirt, replace the following commands: 17 | ``` 18 | packer build -only=qemu solar-master.json 19 | mv solar-master-libvirt.box ../solar-master.box 20 | cd .. 21 | vagrant box add solar-master solar-master.box --provider libvirt 22 | vagrant up --provider libvirt 23 | 24 | ``` 25 | 26 | If Vagrant throws error about `vboxsf` try this: 27 | ``` 28 | vagrant plugin install vagrant-vbguest 29 | ``` 30 | (see https://github.com/shiguredo/packer-templates/issues/16). 31 | 32 | If you're rebuilding the same box, make sure Vagrant reimports it: 33 | ``` 34 | vagrant box remove solar-master 35 | ``` 36 | 37 | Note that you can also set `PACKER_LOG=debug` and/or `VAGRANT_LOG=debug` 38 | the shell environment variables to get more information. 39 | -------------------------------------------------------------------------------- /resources/keystone_config/templates/logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,access 3 | 4 | [handlers] 5 | keys=production,file,access_file,devel 6 | 7 | [formatters] 8 | keys=minimal,normal,debug 9 | 10 | 11 | ########### 12 | # Loggers # 13 | ########### 14 | 15 | [logger_root] 16 | level=WARNING 17 | handlers=file 18 | 19 | [logger_access] 20 | level=INFO 21 | qualname=access 22 | handlers=access_file 23 | 24 | 25 | ################ 26 | # Log Handlers # 27 | ################ 28 | 29 | [handler_production] 30 | class=handlers.SysLogHandler 31 | level=ERROR 32 | formatter=normal 33 | args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) 34 | 35 | [handler_file] 36 | class=handlers.WatchedFileHandler 37 | level=WARNING 38 | formatter=normal 39 | args=('error.log',) 40 | 41 | [handler_access_file] 42 | class=handlers.WatchedFileHandler 43 | level=INFO 44 | formatter=minimal 45 | args=('access.log',) 46 | 47 | [handler_devel] 48 | class=StreamHandler 49 | level=NOTSET 50 | formatter=debug 51 | args=(sys.stdout,) 52 | 53 | 54 | ################## 55 | # Log Formatters # 56 | ################## 57 | 58 | [formatter_minimal] 59 | format=%(message)s 60 | 61 | [formatter_normal] 62 | format=(%(name)s): %(asctime)s %(levelname)s %(message)s 63 | 64 | [formatter_debug] 65 | format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s 66 | -------------------------------------------------------------------------------- /resources/glance_config/templates/policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "context_is_admin": "role:admin", 3 | "default": "", 4 | 5 | "add_image": "", 6 | "delete_image": "", 7 | "get_image": "", 8 | "get_images": "", 9 | "modify_image": "", 10 | "publicize_image": "role:admin", 11 | "copy_from": "", 12 | 13 | "download_image": "", 14 | "upload_image": "", 15 | 16 | "delete_image_location": "", 17 | "get_image_location": "", 18 | "set_image_location": "", 19 | 20 | "add_member": "", 21 | "delete_member": "", 22 | "get_member": "", 23 | "get_members": "", 24 | "modify_member": "", 25 | 26 | "manage_image_cache": "role:admin", 27 | 28 | "get_task": "", 29 | "get_tasks": "", 30 | "add_task": "", 31 | "modify_task": "", 32 | 33 | "get_metadef_namespace": "", 34 | "get_metadef_namespaces":"", 35 | "modify_metadef_namespace":"", 36 | "add_metadef_namespace":"", 37 | 38 | "get_metadef_object":"", 39 | "get_metadef_objects":"", 40 | "modify_metadef_object":"", 41 | "add_metadef_object":"", 42 | 43 | "list_metadef_resource_types":"", 44 | "get_metadef_resource_type":"", 45 | "add_metadef_resource_type_association":"", 46 | 47 | "get_metadef_property":"", 48 | "get_metadef_properties":"", 49 | "modify_metadef_property":"", 50 | "add_metadef_property":"" 51 | 52 | } 53 | -------------------------------------------------------------------------------- /resources/glance_config/templates/schema-image.json: -------------------------------------------------------------------------------- 1 | { 2 | "kernel_id": { 3 | "type": "string", 4 | "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", 5 | "description": "ID of image stored in Glance that should be used as the kernel when booting an AMI-style image." 6 | }, 7 | "ramdisk_id": { 8 | "type": "string", 9 | "pattern": "^([0-9a-fA-F]){8}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){4}-([0-9a-fA-F]){12}$", 10 | "description": "ID of image stored in Glance that should be used as the ramdisk when booting an AMI-style image." 11 | }, 12 | "instance_uuid": { 13 | "type": "string", 14 | "description": "ID of instance used to create this image." 15 | }, 16 | "architecture": { 17 | "description": "Operating system architecture as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", 18 | "type": "string" 19 | }, 20 | "os_distro": { 21 | "description": "Common name of operating system distribution as specified in http://docs.openstack.org/trunk/openstack-compute/admin/content/adding-images.html", 22 | "type": "string" 23 | }, 24 | "os_version": { 25 | "description": "Operating system version as specified by the distributor", 26 | "type": "string" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /templates/haproxy.yaml: -------------------------------------------------------------------------------- 1 | id: haproxy 2 | 3 | resources: 4 | - id: haproxy_config{{index}} 5 | from: resources/haproxy_config 6 | location: {{node}} 7 | values: 8 | ip: '{{node}}::ip' 9 | config:protocol: 10 | {% for config in service_configs %} 11 | - {{config}}::protocol 12 | {% endfor %} 13 | config:listen_port: 14 | {% for config in service_configs %} 15 | - {{config}}::listen_port 16 | {% endfor %} 17 | config:name: 18 | {% for config in service_configs %} 19 | - {{config}}::name 20 | {% endfor %} 21 | config:backends: 22 | {% for config in service_configs %} 23 | - {{config}}::backends 24 | {% endfor %} 25 | 26 | - id: haproxy_service{{index}} 27 | location: {{node}} 28 | from: resources/haproxy_service 29 | values: 30 | ip: '{{node}}::ip' 31 | 32 | events: 33 | - type: depends_on 34 | parent_action: 'haproxy_service{{index}}.run' 35 | state: 'success' 36 | depend_action: 'haproxy_config{{index}}.run' 37 | 38 | - type: react_on 39 | parent_action: 'haproxy_config{{index}}.run' 40 | state: 'success' 41 | depend_action: 'haproxy_service{{index}}.apply_config' 42 | 43 | - type: react_on 44 | parent_action: 'haproxy_config{{index}}.update' 45 | state: 'success' 46 | depend_action: 'haproxy_service{{index}}.apply_config' 47 | -------------------------------------------------------------------------------- /resources/glance_config/templates/glance-registry-paste.ini: -------------------------------------------------------------------------------- 1 | # Use this pipeline for no auth - DEFAULT 2 | [pipeline:glance-registry] 3 | #pipeline = osprofiler unauthenticated-context registryapp 4 | pipeline = authtoke context registryapp 5 | 6 | # Use this pipeline for keystone auth 7 | [pipeline:glance-registry-keystone] 8 | pipeline = osprofiler authtoken context registryapp 9 | 10 | # Use this pipeline for authZ only. This means that the registry will treat a 11 | # user as authenticated without making requests to keystone to reauthenticate 12 | # the user. 13 | [pipeline:glance-registry-trusted-auth] 14 | pipeline = osprofiler context registryapp 15 | 16 | [app:registryapp] 17 | paste.app_factory = glance.registry.api:API.factory 18 | 19 | [filter:context] 20 | paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory 21 | 22 | [filter:unauthenticated-context] 23 | paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory 24 | 25 | [filter:authtoken] 26 | paste.filter_factory = keystonemiddleware.auth_token:filter_factory 27 | identity_uri = http://{{ keystone_ip }}:{{ keystone_admin_port }} 28 | admin_user = {{ keystone_admin_user }} 29 | admin_tenant_name = {{ keystone_admin_tenant }} 30 | admin_password = {{ keystone_admin_password }} 31 | 32 | [filter:osprofiler] 33 | paste.filter_factory = osprofiler.web:WsgiMiddleware.factory 34 | hmac_keys = SECRET_KEY 35 | enabled = yes 36 | -------------------------------------------------------------------------------- /solar/core/log.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import logging 16 | import sys 17 | 18 | 19 | log = logging.getLogger('solar') 20 | 21 | 22 | def setup_logger(): 23 | handler = logging.FileHandler('solar.log') 24 | handler.setLevel(logging.DEBUG) 25 | formatter = logging.Formatter( 26 | '%(asctime)s %(levelname)s %(funcName)s' 27 | ' (%(filename)s::%(lineno)s)::%(message)s') 28 | handler.setFormatter(formatter) 29 | log.addHandler(handler) 30 | 31 | print_formatter = logging.Formatter( 32 | '%(levelname)s (%(filename)s::%(lineno)s)::%(message)s') 33 | print_handler = logging.StreamHandler(stream=sys.stdout) 34 | print_handler.setFormatter(print_formatter) 35 | log.addHandler(print_handler) 36 | 37 | log.setLevel(logging.DEBUG) 38 | 39 | setup_logger() 40 | -------------------------------------------------------------------------------- /resources/keystone_service_endpoint/meta.yaml: -------------------------------------------------------------------------------- 1 | id: keystone_service_endpoint 2 | handler: ansible 3 | version: 1.0.0 4 | input: 5 | keystone_host: 6 | schema: str! 7 | value: 8 | keystone_admin_port: 9 | schema: int! 10 | value: 11 | admin_token: 12 | schema: str! 13 | value: 14 | 15 | endpoint_name: 16 | schema: str! 17 | value: 18 | type: 19 | schema: str! 20 | value: 21 | description: 22 | schema: str! 23 | value: 24 | public_ip: 25 | schema: str! 26 | value: 27 | public_port: 28 | schema: int! 29 | value: 30 | publicurl: 31 | schema: str! 32 | value: http://{{public_ip}}:{{public_port}}/v2.0 33 | internal_ip: 34 | schema: str! 35 | value: 36 | internal_port: 37 | schema: int! 38 | value: 39 | internalurl: 40 | schema: str! 41 | value: http://{{internal_ip}}:{{internal_port}}/v2.0 42 | admin_ip: 43 | schema: str! 44 | value: 45 | admin_port: 46 | schema: int! 47 | value: 48 | adminurl: 49 | schema: str! 50 | value: http://{{admin_ip}}:{{admin_port}}/v2.0 51 | 52 | ip: 53 | schema: str! 54 | value: 55 | # ssh_key: 56 | # schema: str! 57 | # value: 58 | # ssh_user: 59 | # schema: str! 60 | # value: 61 | 62 | tags: [resource/keystone_service_endpoint, resources/keystone] 63 | -------------------------------------------------------------------------------- /solar/core/actions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Copyright 2015 Mirantis, Inc. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import handlers 17 | 18 | from solar.core.transports.bat import BatRunTransport 19 | from solar.core.transports.bat import BatSyncTransport 20 | 21 | _default_transports = { 22 | # 'sync': RsyncSyncTransport, 23 | # 'sync': SSHSyncTransport, 24 | # 'run': SSHRunTransport, 25 | # 'run': SolarAgentRunTransport, 26 | # 'sync': SolarAgentSyncTransport 27 | 'run': BatRunTransport, 28 | 'sync': BatSyncTransport 29 | } 30 | 31 | 32 | def resource_action(resource, action): 33 | handler = resource.db_obj.handler or 'none' 34 | with handlers.get(handler)([resource], _default_transports) as h: 35 | return h.action(resource, action) 36 | 37 | 38 | def tag_action(tag, action): 39 | # TODO 40 | pass 41 | -------------------------------------------------------------------------------- /solar/orchestration/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import subprocess 16 | 17 | import networkx as nx 18 | 19 | 20 | def write_graph(plan): 21 | """Writes graph to dot then to svg 22 | 23 | :param plan: networkx Graph object 24 | """ 25 | colors = { 26 | 'PENDING': 'cyan', 27 | 'ERROR': 'red', 28 | 'SUCCESS': 'green', 29 | 'INPROGRESS': 'yellow', 30 | 'SKIPPED': 'blue', 31 | 'NOOP': 'black'} 32 | 33 | for n in plan: 34 | color = colors[plan.node[n]['status']] 35 | plan.node[n]['color'] = color 36 | 37 | nx.write_dot(plan, '{name}.dot'.format(name=plan.graph['name'])) 38 | subprocess.call( 39 | 'tred {name}.dot | dot -Tsvg -o {name}.svg'.format( 40 | name=plan.graph['name']), 41 | shell=True) 42 | -------------------------------------------------------------------------------- /examples/riak/riak_cluster.yaml: -------------------------------------------------------------------------------- 1 | id: riak_cluster 2 | 3 | resources: 4 | - id: riak_service1 5 | from: examples/riak/riak_service.yaml 6 | values: 7 | node: {{nodes[0]}} 8 | index: 1 9 | join_to: '' 10 | 11 | - id: riak_service2 12 | from: examples/riak/riak_service.yaml 13 | values: 14 | node: {{nodes[1]}} 15 | index: 2 16 | join_to: riak_service1 17 | 18 | - id: riak_service3 19 | from: examples/riak/riak_service.yaml 20 | values: 21 | node: {{nodes[2]}} 22 | index: 3 23 | join_to: riak_service1 24 | 25 | - id: haproxy_riak_config 26 | from: examples/riak/haproxy_riak_config.yaml 27 | values: 28 | http_listen_port: 8098 29 | pb_listen_port: 8087 30 | riaks: ['riak_service1', 'riak_service2', 'riak_service3'] 31 | 32 | - id: haproxy1 33 | from: templates/haproxy.yaml 34 | values: 35 | node: {{nodes[0]}} 36 | service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] 37 | index: 1 38 | 39 | - id: haproxy2 40 | from: templates/haproxy.yaml 41 | values: 42 | node: {{nodes[1]}} 43 | service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] 44 | index: 2 45 | 46 | - id: haproxy3 47 | from: templates/haproxy.yaml 48 | values: 49 | node: {{nodes[2]}} 50 | service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http'] 51 | index: 3 52 | -------------------------------------------------------------------------------- /resources/neutron_agents_dhcp_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | handler: puppet 2 | id: 'neutron_agents_dhcp_puppet' 3 | input: 4 | ip: 5 | schema: str! 6 | value: 7 | # ssh_key: 8 | # schema: str! 9 | # value: 10 | # ssh_user: 11 | # schema: str! 12 | # value: 13 | 14 | package_ensure: 15 | schema: str 16 | value: present 17 | debug: 18 | schema: bool 19 | value: false 20 | state_path: 21 | schema: str 22 | value: '/var/lib/neutron' 23 | resync_interval: 24 | schema: int 25 | value: 30 26 | interface_driver: 27 | schema: str 28 | value: 'neutron.agent.linux.interface.OVSInterfaceDriver' 29 | dhcp_driver: 30 | schema: str 31 | value: 'neutron.agent.linux.dhcp.Dnsmasq' 32 | root_helper: 33 | schema: str 34 | value: 'sudo neutron-rootwrap /etc/neutron/rootwrap.conf' 35 | use_namespaces: 36 | schema: bool 37 | value: true 38 | dnsmasq_config_file: 39 | schema: str 40 | value: 41 | dhcp_delete_namespaces: 42 | schema: bool 43 | value: false 44 | enable_isolated_metadata: 45 | schema: bool 46 | value: false 47 | enable_metadata_network: 48 | schema: bool 49 | value: false 50 | 51 | git: 52 | schema: {repository: str!, branch: str!} 53 | value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} 54 | 55 | puppet_module: 'neutron' 56 | tags: [resource/neutron, resource/neutron_agents_dhcp] 57 | version: 1.0.0 58 | -------------------------------------------------------------------------------- /solar/dblayer/riak_client.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Mirantis, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import time 16 | 17 | from riak import RiakClient as OrigRiakClient 18 | 19 | from solar.dblayer.model import clear_cache 20 | 21 | 22 | class RiakClient(OrigRiakClient): 23 | def session_start(self): 24 | clear_cache() 25 | 26 | def session_end(self, result=True): 27 | # ignore result 28 | clear_cache() 29 | 30 | def delete_all(self, cls): 31 | for _ in xrange(10): 32 | # riak dislikes deletes without dvv 33 | rst = cls.bucket.get_index('$bucket', 34 | startkey='_', 35 | max_results=100000).results 36 | for key in rst: 37 | cls.bucket.delete(key) 38 | else: 39 | return 40 | time.sleep(0.5) 41 | -------------------------------------------------------------------------------- /resources/cinder_glance_puppet/README.md: -------------------------------------------------------------------------------- 1 | # Cinder Volume resource for puppet handler 2 | 3 | Glance drive Cinder as a block storage backend to store image data. 4 | 5 | # Parameters 6 | 7 | source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/glance.pp 8 | 9 | ``glance_api_servers`` 10 | (optional) A list of the glance api servers available to cinder. 11 | Should be an array with [hostname|ip]:port 12 | Defaults to undef 13 | Note: for this resource, it is decomposed to *_host and *_port due to 14 | existing implementation limitations 15 | 16 | ``glance_api_version`` 17 | (optional) Glance API version. 18 | Should be 1 or 2 19 | Defaults to 2 (current version) 20 | 21 | ``glance_num_retries`` 22 | (optional) Number retries when downloading an image from glance. 23 | Defaults to 0 24 | 25 | ``glance_api_insecure`` 26 | (optional) Allow to perform insecure SSL (https) requests to glance. 27 | Defaults to false 28 | 29 | ``glance_api_ssl_compression`` 30 | (optional) Whether to attempt to negotiate SSL layer compression when 31 | using SSL (https) requests. Set to False to disable SSL 32 | layer compression. In some cases disabling this may improve 33 | data throughput, eg when high network bandwidth is available 34 | and you are using already compressed image formats such as qcow2. 35 | Defaults to false 36 | 37 | ``glance_request_timeout`` 38 | (optional) http/https timeout value for glance operations. 39 | Defaults to undef -------------------------------------------------------------------------------- /resources/neutron_plugins_ml2_puppet/meta.yaml: -------------------------------------------------------------------------------- 1 | handler: puppet 2 | id: 'neutron_plugins_ml2_puppet' 3 | input: 4 | ip: 5 | schema: str! 6 | value: 7 | # ssh_key: 8 | # schema: str! 9 | # value: 10 | # ssh_user: 11 | # schema: str! 12 | # value: 13 | 14 | type_drivers: 15 | schema: [str] 16 | value: ['local', 'flat', 'vlan', 'gre', 'vxlan'] 17 | tenant_network_types: 18 | schema: [str] 19 | value: ['local', 'flat', 'vlan', 'gre', 'vxlan'] 20 | mechanism_drivers: 21 | schema: [str] 22 | value: ['openvswitch', 'linuxbridge'] 23 | flat_networks: 24 | schema: [str] 25 | value: ['*'] 26 | network_vlan_ranges: 27 | schema: [str] 28 | value: ['physnet1:1000:2999'] 29 | tunnel_id_ranges: 30 | schema: [str] 31 | value: ['20:100'] 32 | vxlan_group: 33 | schema: str 34 | value: '224.0.0.1' 35 | vni_ranges: 36 | schema: [str] 37 | value: ['10:100'] 38 | enable_security_group: 39 | schema: bool 40 | value: true 41 | package_ensure: 42 | schema: str 43 | value: 'present' 44 | supported_pci_vendor_devs: 45 | schema: [str] 46 | value: ['15b3:1004', '8086:10ca'] 47 | sriov_agent_required: 48 | schema: bool 49 | value: false 50 | 51 | git: 52 | schema: {repository: str!, branch: str!} 53 | value: {repository: 'https://github.com/openstack/puppet-neutron', branch: '5.1.0'} 54 | 55 | puppet_module: 'neutron' 56 | tags: [resource/neutron, resource/neutron_plugins_ml2] 57 | version: 1.0.0 58 | --------------------------------------------------------------------------------