├── cloudman
├── helmsman
│ ├── __init__.py
│ ├── clients
│ │ ├── __init__.py
│ │ └── helm_client.py
│ ├── migrations
│ │ ├── __init__.py
│ │ └── 0001_initial.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── client_mocker.py
│ │ ├── test_helmsman_unit.py
│ │ ├── test_mgmt_commands.py
│ │ └── data
│ │ │ ├── helmsman_config.yaml
│ │ │ └── helmsman_config_update.yaml
│ ├── admin.py
│ ├── apps.py
│ ├── default_macros.j2
│ ├── management
│ │ └── commands
│ │ │ ├── add_repo.py
│ │ │ ├── helmsman_load_config.py
│ │ │ ├── add_template_registry.py
│ │ │ ├── add_chart.py
│ │ │ └── add_install_template.py
│ ├── urls.py
│ ├── rules.py
│ ├── models.py
│ ├── helpers.py
│ ├── views.py
│ └── serializers.py
├── projman
│ ├── __init__.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── data
│ │ │ ├── projman_config.yaml
│ │ │ ├── projman_config_update.yaml
│ │ │ ├── expected_chart_values.yaml
│ │ │ └── helmsman_config.yaml
│ │ └── test_mgmt_commands.py
│ ├── migrations
│ │ ├── __init__.py
│ │ └── 0001_initial.py
│ ├── admin.py
│ ├── urls.py
│ ├── models.py
│ ├── management
│ │ └── commands
│ │ │ ├── projman_create_project.py
│ │ │ ├── projman_load_config.py
│ │ │ └── install_template_in_project.py
│ ├── rules.py
│ ├── views.py
│ └── serializers.py
├── clusterman
│ ├── __init__.py
│ ├── tests
│ │ ├── __init__.py
│ │ ├── data
│ │ │ ├── kube_config.yaml
│ │ │ ├── initial_cluster_data_aws.yaml
│ │ │ ├── initial_cluster_data_gcp.yaml
│ │ │ ├── initial_cluster_data_openstack.yaml
│ │ │ └── initial_cluster_data_azure.yaml
│ │ ├── client_mocker.py
│ │ └── test_mgmt_commands.py
│ ├── migrations
│ │ ├── __init__.py
│ │ ├── 0003_cmautoscaler_allowed_vm_type_prefixes.py
│ │ ├── 0002_create_rancher_app.py
│ │ └── 0001_initial.py
│ ├── exceptions.py
│ ├── admin.py
│ ├── urls.py
│ ├── clients
│ │ ├── helpers.py
│ │ └── kube_client.py
│ ├── rules.py
│ ├── fixtures
│ │ └── rancher_app_def.json
│ ├── tasks.py
│ ├── management
│ │ └── commands
│ │ │ ├── create_cluster.py
│ │ │ ├── create_autoscale_user.py
│ │ │ └── import_cloud_data.py
│ ├── models.py
│ ├── serializers.py
│ ├── views.py
│ ├── cluster_templates.py
│ ├── plugins
│ │ └── rke_kubernetes_app.py
│ └── resources.py
├── cloudman
│ ├── tests
│ │ ├── fixtures
│ │ │ ├── keycloak-export.sh
│ │ │ ├── keycloak-import.sh
│ │ │ └── keycloak-export-realm.sh
│ │ └── test_cloudman_auth.py
│ ├── celeryconfig_test.py
│ ├── wsgi.py
│ ├── __init__.py
│ ├── auth.py
│ ├── celery.py
│ ├── urls.py
│ ├── oidc.py
│ └── settings.py
└── manage.py
├── util
├── requirements.txt
├── uwsgi_schema.yml
└── convert_kwalify_to_json_schema.py
├── HISTORY.rst
├── .editorconfig
├── run_web.sh
├── .gitignore
├── setup.cfg
├── .dockerignore
├── local_dev.sh
├── requirements_test.txt
├── .github
└── workflows
│ ├── tests.yaml
│ └── build_container.yaml
├── requirements.txt
├── README.rst
├── tox.ini
├── Dockerfile
└── setup.py
/cloudman/helmsman/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cloudman/projman/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/cloudman/clusterman/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cloudman/helmsman/clients/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/util/requirements.txt:
--------------------------------------------------------------------------------
1 | pykwalify
2 |
--------------------------------------------------------------------------------
/cloudman/clusterman/migrations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cloudman/helmsman/migrations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/cloudman/projman/migrations/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/util/uwsgi_schema.yml:
--------------------------------------------------------------------------------
1 | ../uwsgi_schema.yml
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/__init__.py:
--------------------------------------------------------------------------------
1 | from .test_helmsman_api import HelmsManServiceTestBase
2 |
--------------------------------------------------------------------------------
/cloudman/cloudman/tests/fixtures/keycloak-export.sh:
--------------------------------------------------------------------------------
1 | docker exec -it keycloak bash /testdata/keycloak-export-realm.sh
2 |
--------------------------------------------------------------------------------
/HISTORY.rst:
--------------------------------------------------------------------------------
1 | .. :changelog:
2 |
3 | History
4 | -------
5 |
6 | 2.0.0 (2018-05-16)
7 | ++++++++++++++++++
8 |
9 | * Development version of CloudManV2
10 |
--------------------------------------------------------------------------------
/cloudman/clusterman/exceptions.py:
--------------------------------------------------------------------------------
1 | # Exception hierarchy for cloudman
2 |
3 |
4 | class CMDuplicateNameException(Exception):
5 | pass
6 |
7 |
8 | class CMRunCommandException(Exception):
9 | pass
10 |
--------------------------------------------------------------------------------
/cloudman/helmsman/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | from . import models
4 |
5 |
6 | @admin.register(models.HMInstallTemplate)
7 | class HMInstallTemplateAdmin(admin.ModelAdmin):
8 | ordering = ('added',)
9 |
--------------------------------------------------------------------------------
/.editorconfig:
--------------------------------------------------------------------------------
1 | # http://editorconfig.org
2 |
3 | root = true
4 |
5 | [*]
6 | indent_style = space
7 | indent_size = 4
8 | trim_trailing_whitespace = true
9 | insert_final_newline = true
10 | charset = utf-8
11 | end_of_line = lf
12 |
--------------------------------------------------------------------------------
/cloudman/projman/admin.py:
--------------------------------------------------------------------------------
1 | """Models exposed via Django Admin."""
2 | from django.contrib import admin
3 |
4 | from . import models
5 |
6 |
7 | @admin.register(models.CMProject)
8 | class CMProjectAdmin(admin.ModelAdmin):
9 | ordering = ('added',)
10 |
--------------------------------------------------------------------------------
/run_web.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | cd cloudman
4 |
5 | # Prepare init migration
6 | # su -m myuser -c "python manage.py makemigrations"
7 | # Migrate db, so we have the latest db schema
8 | su -m myuser -c "python manage.py migrate"
9 | # Start development server on public ip interface, on port 8000
10 | su -m cloudman -c "gunicorn -b :8000 cloudman.wsgi"
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | syntax: glob
2 |
3 | *~
4 | *.*.swp
5 | *.orig
6 | *.DS_Store
7 | *.pyc
8 | *.sqlite3
9 | *.log
10 |
11 | cloudman/cloudman_server.egg-info/*
12 |
13 | cloudman/db.sqlite3
14 | cloudman/cloudman/settings_local.py
15 |
16 | .tox
17 | build
18 | docs/_build/
19 | static/
20 |
21 | CM2.sublime-project
22 | CM2.sublime-workspace
23 |
24 | /venv/
25 | /.tox/
26 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/data/projman_config.yaml:
--------------------------------------------------------------------------------
1 | projects:
2 | first:
3 | charts:
4 | galaxy:
5 | install_template: galaxy
6 | second:
7 | charts:
8 | galaxy:
9 | install_template: galaxy
10 | jupyterhub:
11 | release_name: jup
12 | install_template: jupyter
13 | context:
14 | dummy: "hello"
15 |
--------------------------------------------------------------------------------
/cloudman/cloudman/celeryconfig_test.py:
--------------------------------------------------------------------------------
1 | """
2 | Celery settings used during cloudlaunch testing
3 | """
4 | broker_url = 'memory://'
5 | broker_transport_options = {'polling_interval': .01}
6 | broker_backend = 'memory'
7 | result_backend = 'db+sqlite:///results.db'
8 | result_serializer = 'json'
9 | task_serializer = 'json'
10 | accept_content = ['json']
11 | task_always_eager = True
12 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | max-line-length = 80
3 | max-complexity = 15
4 | exclude = .git, __pycache__
5 | import-order-style = smarkets
6 | application-import-names = cloudman
7 |
8 | [coverage:run]
9 | branch = True
10 | source = cloudman
11 | omit =
12 | cloudman/manage.py
13 | cloudman/cloudman/__init__.py
14 | cloudman/cloudman/wsgi.py
15 |
16 | [bdist_wheel]
17 | universal = 1
18 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # this file uses slightly different syntax than .gitignore,
2 | # e.g. ".tox/" will not ignore .tox directory
3 |
4 | # well, official docker build should be done on clean git checkout
5 | # anyway, so .tox should be empty... But I'm sure people will try to
6 | # test docker on their git working directories.
7 |
8 | .git
9 | .tox
10 | .venv
11 | venv
12 | venv3
13 | docs
14 | *.log
15 | *.db
16 | *.sqlite3
17 |
--------------------------------------------------------------------------------
/cloudman/helmsman/apps.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.apps import AppConfig
4 |
5 | DEFAULT_MACRO_FILE = os.path.join(os.path.dirname(__file__), 'default_macros.j2')
6 |
7 |
8 | class HelmsManConfig(AppConfig):
9 | name = "helmsman"
10 |
11 | def __init__(self, app_name, app_module):
12 | super().__init__(app_name, app_module)
13 | with open(DEFAULT_MACRO_FILE) as f:
14 | self.default_macros = f.read()
15 |
--------------------------------------------------------------------------------
/cloudman/cloudman/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for cloudman project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudman.settings")
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/cloudman/cloudman/tests/fixtures/keycloak-import.sh:
--------------------------------------------------------------------------------
1 | docker run -p 8080:8080 -e KEYCLOAK_USER=admin -e KEYCLOAK_PASSWORD=testpassword -v `pwd`:/testdata/ -e JAVA_OPTS="-server -Xms64m -Xmx512m -XX:MetaspaceSize=96M -XX:MaxMetaspaceSize=256m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.awt.headless=true -Dkeycloak.migration.action=import -Dkeycloak.migration.provider=singleFile -Dkeycloak.migration.file=/testdata/realm-export.json -Dkeycloak.migration.strategy=OVERWRITE_EXISTING" jboss/keycloak:7.0.0
2 |
3 |
--------------------------------------------------------------------------------
/local_dev.sh:
--------------------------------------------------------------------------------
1 | . venv/bin/activate
2 | #CONSUL_PATH="./venv/bin"
3 | #if [ ! -f $CONSUL_PATH/consul ]; then
4 | # echo "Extracting consul to: $CONSUL_PATH"
5 | # wget -O- https://releases.hashicorp.com/consul/0.9.3/consul_0.9.3_darwin_amd64.zip | tar xvz -C $CONSUL_PATH
6 | #fi
7 | #$CONSUL_PATH/consul agent -dev &
8 | export CELERY_BROKER_URL="amqp://guest:guest@localhost:5672/"
9 | cd cloudman
10 | /usr/local/Cellar/rabbitmq/3.6.9/sbin/rabbitmq-server &
11 | celery -E -A cloudman worker -l debug &
12 | python manage.py runserver
13 |
14 |
--------------------------------------------------------------------------------
/cloudman/helmsman/default_macros.j2:
--------------------------------------------------------------------------------
1 | {%- macro random_int(len) -%}
2 | {%- for _ in range(len) -%}
3 | {{ range(10) | random }}
4 | {%- endfor -%}
5 | {%- endmacro -%}
6 | {%- macro random_alphanumeric(len) -%}
7 | {%- for _ in range(len) -%}
8 | {{ ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z",0,1,2,3,4,5,6,7,8,9] | random }}
9 | {%- endfor -%}
10 | {%- endmacro -%}
11 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/data/kube_config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | clusters:
3 | - cluster:
4 | certificate-authority: /Users/Name/.minikube/ca.crt
5 | server: https://192.168.99.114:8443
6 | name: minikube
7 | contexts:
8 | - context:
9 | cluster: minikube
10 | user: minikube
11 | name: minikube
12 | current-context: minikube
13 | kind: Config
14 | preferences: {}
15 | users:
16 | - name: minikube
17 | user:
18 | client-certificate: /Users/Name/.minikube/client.crt
19 | client-key: /Users/Name/.minikube/client.key
20 |
--------------------------------------------------------------------------------
/requirements_test.txt:
--------------------------------------------------------------------------------
1 | git+https://github.com/celery/django-celery-results
2 | brotlipy
3 | paramiko
4 | # needed by celery
5 | sqlalchemy
6 | # required by moto
7 | sshpubkeys
8 | git+https://github.com/CloudVE/cloudbridge#egg=cloudbridge[dev]
9 | git+https://github.com/CloudVE/djcloudbridge
10 | # Leave cloudlaunch-cli before cloudlaunch-server due to coreapi version mismatch
11 | git+https://github.com/CloudVE/cloudlaunch-cli
12 | git+https://github.com/galaxyproject/cloudlaunch
13 | pytz>=2022.2.1
14 | tzdata>=2022.2
15 | -r requirements.txt
16 | -e ".[test]"
17 |
--------------------------------------------------------------------------------
/cloudman/cloudman/__init__.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 |
3 | # This will make sure the app is always imported when
4 | # Django starts so that shared_task will use this app.
5 | from .celery import app as celery_app # noqa
6 |
7 | __all__ = ['celery_app']
8 |
9 | # Current version of the library
10 | __version__ = '2.0.0'
11 |
12 |
13 | def get_version():
14 | """
15 | Return a string with the current version of the library.
16 |
17 | :rtype: ``string``
18 | :return: Library version (e.g., "2.0.0").
19 | """
20 | return __version__
21 |
--------------------------------------------------------------------------------
/cloudman/clusterman/migrations/0003_cmautoscaler_allowed_vm_type_prefixes.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 3.1.4 on 2021-01-31 17:46
2 |
3 | from django.db import migrations, models
4 |
5 |
6 | class Migration(migrations.Migration):
7 |
8 | dependencies = [
9 | ('clusterman', '0002_create_rancher_app'),
10 | ]
11 |
12 | operations = [
13 | migrations.AddField(
14 | model_name='cmautoscaler',
15 | name='allowed_vm_type_prefixes',
16 | field=models.CharField(blank=True, default=None, max_length=300, null=True),
17 | ),
18 | ]
19 |
--------------------------------------------------------------------------------
/cloudman/cloudman/auth.py:
--------------------------------------------------------------------------------
1 | import requests
2 |
3 | from django.core.cache import cache
4 |
5 |
6 | def get_metadata(metadata_endpoint):
7 | op_metadata = cache.get('OIDC_OP_METADATA')
8 | if not op_metadata:
9 | response = requests.get(url=metadata_endpoint, verify=False)
10 | response.raise_for_status()
11 | op_metadata = response.json()
12 | cache.set('OIDC_OP_METADATA', op_metadata)
13 | return op_metadata
14 |
15 |
16 | def get_from_well_known(metadata_endpoint, attr):
17 | metadata = get_metadata(metadata_endpoint)
18 | return metadata.get(attr)
19 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/data/projman_config_update.yaml:
--------------------------------------------------------------------------------
1 | projects:
2 | first:
3 | charts:
4 | galaxy:
5 | install_template: galaxy
6 | context:
7 | storageclass: updated-provisioner
8 | second:
9 | charts:
10 | galaxy:
11 | install_template: galaxy
12 | context:
13 | storageclass: updated-provisioner
14 | upgrade: true
15 | reset_values: true
16 | jupyterhub:
17 | release_name: jup
18 | install_template: jupyter
19 | upgrade: true
20 | context:
21 | dummy: "world"
22 |
--------------------------------------------------------------------------------
/cloudman/clusterman/admin.py:
--------------------------------------------------------------------------------
1 | """Models exposed via Django Admin."""
2 | from django.contrib import admin
3 | import nested_admin
4 |
5 | from . import models
6 |
7 |
8 | class CMClusterNodeAdmin(nested_admin.NestedStackedInline):
9 | model = models.CMClusterNode
10 | extra = 0
11 |
12 |
13 | class CMAutoScalerAdmin(nested_admin.NestedStackedInline):
14 | model = models.CMAutoScaler
15 | extra = 0
16 |
17 |
18 | @admin.register(models.CMCluster)
19 | class CMClusterAdmin(nested_admin.NestedModelAdmin):
20 | inlines = [CMClusterNodeAdmin, CMAutoScalerAdmin]
21 | ordering = ('added',)
22 |
--------------------------------------------------------------------------------
/cloudman/helmsman/management/commands/add_repo.py:
--------------------------------------------------------------------------------
1 | from django.core.management.base import BaseCommand
2 |
3 | from ...clients.helm_client import HelmClient
4 |
5 |
6 | class Command(BaseCommand):
7 | help = 'Adds a new repository to helm'
8 |
9 | def add_arguments(self, parser):
10 | parser.add_argument('name', help='Name of the repository')
11 | parser.add_argument('url', help='Url to the repository')
12 |
13 | def handle(self, *args, **options):
14 | self.add_chart(options['name'], options['url'])
15 |
16 | @staticmethod
17 | def add_chart(name, url):
18 | client = HelmClient()
19 | client.repositories.create(name, url)
20 |
--------------------------------------------------------------------------------
/.github/workflows/tests.yaml:
--------------------------------------------------------------------------------
1 | name: Run tests
2 | on:
3 | push:
4 | branches:
5 | - master
6 | pull_request:
7 | branches:
8 | - master
9 | jobs:
10 | build:
11 | name: tests
12 | runs-on: ubuntu-latest
13 | strategy:
14 | fail-fast: true
15 | steps:
16 | - name: 'Set up Python'
17 | uses: actions/setup-python@v2
18 | with:
19 | python-version: '3.8'
20 | - uses: actions/checkout@v2
21 | - run: pip install tox
22 | - run: pip install tox-docker>=2.0.0a3
23 | - run: pip install coveralls
24 | - run: tox -e py38-integration
25 | - run: coveralls -v --service=github
26 | env:
27 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
28 |
--------------------------------------------------------------------------------
/.github/workflows/build_container.yaml:
--------------------------------------------------------------------------------
1 | name: Build Image
2 | on:
3 | workflow_dispatch: {}
4 | push:
5 | branches:
6 | - 'master'
7 | jobs:
8 | build:
9 | name: Build image
10 | runs-on: ubuntu-latest
11 | strategy:
12 | fail-fast: false
13 | steps:
14 | - uses: actions/checkout@v2
15 | - run: docker build . -t galaxy/cloudman-server:latest
16 | - name: Login to docker hub
17 | uses: actions-hub/docker/login@master
18 | env:
19 | DOCKER_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
20 | DOCKER_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }}
21 | - name: Push to docker hub
22 | uses: actions-hub/docker@master
23 | with:
24 | args: push galaxy/cloudman-server:latest
25 |
--------------------------------------------------------------------------------
/cloudman/helmsman/urls.py:
--------------------------------------------------------------------------------
1 | """CloudMan Create URL configuration."""
2 |
3 | from django.urls import include
4 | from django.urls import re_path
5 |
6 | from . import views
7 | from djcloudbridge.drf_routers import HybridDefaultRouter
8 |
9 |
10 | router = HybridDefaultRouter()
11 | router.register(r'repositories', views.ChartRepoViewSet,
12 | basename='repositories')
13 | router.register(r'charts', views.ChartViewSet,
14 | basename='charts')
15 | router.register(r'namespaces', views.NamespaceViewSet,
16 | basename='namespaces')
17 | router.register(r'install_templates', views.InstallTemplatesViewSet,
18 | basename='install_templates')
19 |
20 | app_name = "helmsman"
21 |
22 | urlpatterns = [
23 | re_path(r'^', include(router.urls)),
24 | ]
25 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/data/expected_chart_values.yaml:
--------------------------------------------------------------------------------
1 | config:
2 | galaxy.yml:
3 | galaxy:
4 | enable_oidc: true
5 | oidc_backends_config_file: /galaxy/server/config/oidc_backends_config.xml
6 | oidc_config_file: /galaxy/server/config/oidc_config.xml
7 | hello: world
8 | ingress:
9 | annotations:
10 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
11 | kubernetes.io/tls-acme: 'true'
12 | nginx.ingress.kubernetes.io/secure-backends: 'true'
13 | enabled: true
14 | hosts:
15 | - ngkc4.cloudve.org
16 | path: /gvl/galaxy
17 | tls:
18 | - hosts:
19 | - ngkc4.cloudve.org
20 | secretName: ngkc4-cloudve-org-key
21 | persistence:
22 | size: 95Gi
23 | storageClass: nfs-provisioner
24 | postgresql:
25 | persistence:
26 | storageClass: ebs-provisioner
27 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | # needed by celery
2 | sqlalchemy
3 | # install edge till this is released: https://github.com/encode/django-rest-framework/pull/7571
4 | git+https://github.com/encode/django-rest-framework
5 | # install edge till this is released: https://github.com/celery/django-celery-results/issues/157
6 | git+https://github.com/celery/django-celery-results
7 | # moto==1.1.11
8 | # for eclipse debugging
9 | pydevd>=1.0.0
10 | # jinja2 for rendering install templates
11 | jinja2
12 | # get latest package versions for now
13 | git+https://github.com/CloudVE/cloudbridge#egg=cloudbridge[full]
14 | git+https://github.com/CloudVE/djcloudbridge
15 | # Leave cloudlaunch-cli before cloudlaunch-server due to coreapi version mismatch
16 | git+https://github.com/CloudVE/cloudlaunch-cli
17 | git+https://github.com/galaxyproject/cloudlaunch
18 | -e ".[prod]"
19 |
--------------------------------------------------------------------------------
/cloudman/projman/urls.py:
--------------------------------------------------------------------------------
1 | """CloudMan Create URL configuration."""
2 |
3 | from django.urls import include
4 | from django.urls import re_path
5 |
6 | from . import views
7 |
8 | from djcloudbridge.drf_routers import HybridDefaultRouter
9 | from djcloudbridge.drf_routers import HybridNestedRouter
10 |
11 |
12 | router = HybridDefaultRouter()
13 | router.register(r'projects', views.ProjectViewSet,
14 | basename='projects')
15 |
16 | project_router = HybridNestedRouter(router, r'projects',
17 | lookup='project')
18 | project_router.register(r'charts', views.ProjectChartViewSet,
19 | basename='chart')
20 |
21 | app_name = "projman"
22 |
23 | cluster_regex_pattern = r'^'
24 | urlpatterns = [
25 | re_path(r'^', include(router.urls)),
26 | re_path(cluster_regex_pattern, include(project_router.urls))
27 | ]
28 |
--------------------------------------------------------------------------------
/cloudman/helmsman/rules.py:
--------------------------------------------------------------------------------
1 | import rules
2 |
3 | # Delegate to keycloak in future iteration
4 |
5 | # Permissions
6 | rules.add_perm('helmsman.view_namespace', rules.is_staff)
7 | rules.add_perm('helmsman.add_namespace', rules.is_staff)
8 | rules.add_perm('helmsman.change_namespace', rules.is_staff)
9 | rules.add_perm('helmsman.delete_namespace', rules.is_staff)
10 |
11 | rules.add_perm('helmsman.view_chart', rules.is_staff)
12 | rules.add_perm('helmsman.add_chart', rules.is_staff)
13 | rules.add_perm('helmsman.change_chart', rules.is_staff)
14 | rules.add_perm('helmsman.delete_chart', rules.is_staff)
15 |
16 | rules.add_perm('helmsman.view_install_template', rules.is_authenticated)
17 | rules.add_perm('helmsman.add_install_template', rules.is_staff)
18 | rules.add_perm('helmsman.change_install_template', rules.is_staff)
19 | rules.add_perm('helmsman.delete_install_template', rules.is_staff)
20 |
--------------------------------------------------------------------------------
/cloudman/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "cloudman.settings")
7 | try:
8 | from django.core.management import execute_from_command_line
9 | except ImportError:
10 | # The above import may fail for some other reason. Ensure that the
11 | # issue is really that Django is missing to avoid masking other
12 | # exceptions on Python 2.
13 | try:
14 | import django
15 | except ImportError:
16 | raise ImportError(
17 | "Couldn't import Django. Are you sure it's installed and "
18 | "available on your PYTHONPATH environment variable? Did you "
19 | "forget to activate a virtual environment?"
20 | )
21 | raise
22 | execute_from_command_line(sys.argv)
23 |
--------------------------------------------------------------------------------
/cloudman/projman/models.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.db import models
3 |
4 |
5 | class CMProject(models.Model):
6 | """CloudMan project details."""
7 | # Automatically add timestamps when object is created
8 | added = models.DateTimeField(auto_now_add=True)
9 | # Automatically add timestamps when object is updated
10 | updated = models.DateTimeField(auto_now=True)
11 | # Each project corresponds to a k8s namespace and therefore, must be unique
12 | name = models.CharField(max_length=60, unique=True)
13 | namespace = models.SlugField(max_length=253, unique=True)
14 | owner = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
15 | null=False)
16 |
17 | class Meta:
18 | verbose_name = "Project"
19 | verbose_name_plural = "Projects"
20 |
21 | def __str__(self):
22 | return "{0} ({1})".format(self.name, self.id)
23 |
--------------------------------------------------------------------------------
/cloudman/cloudman/celery.py:
--------------------------------------------------------------------------------
1 | # File based on:
2 | # http://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
3 | from __future__ import absolute_import
4 |
5 | import os
6 |
7 | import celery
8 | from django.conf import settings # noqa
9 |
10 | import logging
11 | log = logging.getLogger(__name__)
12 |
13 | # set the default Django settings module for the 'celery' program.
14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloudman.settings')
15 |
16 | # Set default configuration module name
17 | os.environ.setdefault('CELERY_CONFIG_MODULE', 'cloudlaunchserver.celeryconfig')
18 |
19 |
20 | class Celery(celery.Celery):
21 |
22 | def on_configure(self):
23 | pass
24 |
25 |
26 | app = Celery('proj')
27 | # Changed to use dedicated celery config as detailed in:
28 | # http://docs.celeryproject.org/en/latest/getting-started/first-steps-with-celery.html
29 | # app.config_from_object('django.conf:settings')
30 | app.config_from_envvar('CELERY_CONFIG_MODULE')
31 | app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
32 |
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/client_mocker.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | from clusterman.tests.client_mocker import ClientMocker as CMClientMocker
4 | from clusterman.tests.client_mocker import KubeMocker
5 | from .mock_helm import MockHelm
6 |
7 |
8 | class HelmMocker(object):
9 |
10 | def __init__(self):
11 | self.mock_helm = MockHelm()
12 |
13 | def can_parse(self, command):
14 | if isinstance(command, list):
15 | prog = command[0]
16 | if prog.startswith("helm"):
17 | return True
18 | return False
19 |
20 | @staticmethod
21 | def extra_patches():
22 | return [patch(
23 | 'helmsman.clients.helm_client.HelmClient._check_environment',
24 | return_value=True)]
25 |
26 | def run_command(self, command):
27 | return self.mock_helm.run_command(command)
28 |
29 |
30 | class ClientMocker(CMClientMocker):
31 | """
32 | Replaces helm and kube clients with their Mock versions
33 | """
34 |
35 | """ Mocks all calls to the helm and kubectl commands"""
36 | def __init__(self, testcase, mockers=None):
37 | super().__init__(testcase, mockers=mockers or [KubeMocker(), HelmMocker()])
38 |
--------------------------------------------------------------------------------
/cloudman/projman/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.10 on 2020-02-05 17:36
2 |
3 | from django.conf import settings
4 | from django.db import migrations, models
5 | import django.db.models.deletion
6 |
7 |
8 | class Migration(migrations.Migration):
9 |
10 | initial = True
11 |
12 | dependencies = [
13 | migrations.swappable_dependency(settings.AUTH_USER_MODEL),
14 | ]
15 |
16 | operations = [
17 | migrations.CreateModel(
18 | name='CMProject',
19 | fields=[
20 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
21 | ('added', models.DateTimeField(auto_now_add=True)),
22 | ('updated', models.DateTimeField(auto_now=True)),
23 | ('name', models.CharField(max_length=60, unique=True)),
24 | ('namespace', models.SlugField(max_length=253, unique=True)),
25 | ('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
26 | ],
27 | options={
28 | 'verbose_name': 'Project',
29 | 'verbose_name_plural': 'Projects',
30 | },
31 | ),
32 | ]
33 |
--------------------------------------------------------------------------------
/cloudman/clusterman/urls.py:
--------------------------------------------------------------------------------
1 | """CloudMan Create URL configuration."""
2 |
3 | from django.urls import include
4 | from django.urls import re_path
5 |
6 | from . import views
7 | from djcloudbridge.drf_routers import HybridDefaultRouter
8 | from djcloudbridge.drf_routers import HybridNestedRouter
9 |
10 |
11 | router = HybridDefaultRouter()
12 | router.register(r'clusters', views.ClusterViewSet,
13 | basename='clusters')
14 |
15 | cluster_router = HybridNestedRouter(router, r'clusters',
16 | lookup='cluster')
17 | cluster_router.register(r'nodes', views.ClusterNodeViewSet,
18 | basename='node')
19 | cluster_router.register(r'autoscalers', views.ClusterAutoScalerViewSet,
20 | basename='autoscaler')
21 | cluster_router.register(r'signals/scaleup', views.ClusterScaleUpSignalViewSet,
22 | basename='scaleupsignal')
23 | cluster_router.register(r'signals/scaledown', views.ClusterScaleDownSignalViewSet,
24 | basename='scaledownsignal')
25 |
26 |
27 | app_name = "clusterman"
28 |
29 | cluster_regex_pattern = r'^'
30 | urlpatterns = [
31 | re_path(r'^', include(router.urls)),
32 | re_path(cluster_regex_pattern, include(cluster_router.urls))
33 | ]
34 |
--------------------------------------------------------------------------------
/cloudman/clusterman/clients/helpers.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import io
3 | import subprocess
4 | import yaml
5 |
6 | from ..exceptions import CMRunCommandException
7 |
8 |
9 | def run_command(command, shell=False, stderr=None):
10 | """
11 | Runs a command and returns stdout
12 | """
13 | try:
14 | return subprocess.check_output(
15 | command, universal_newlines=True, shell=shell, encoding='utf-8',
16 | stderr=stderr)
17 | except subprocess.CalledProcessError as e:
18 | raise CMRunCommandException(f"Error running command: {e.output}")
19 |
20 |
21 | def run_list_command(command, delimiter="\t", skipinitialspace=True):
22 | """
23 | Runs a command, and parses the output as
24 | tab separated columnar output. First row must be column names."
25 | """
26 | output = run_command(command)
27 | reader = csv.DictReader(io.StringIO(output), delimiter=delimiter, skipinitialspace=skipinitialspace)
28 | output = []
29 | for row in reader:
30 | data = {key.strip(): val.strip() for key, val in row.items()}
31 | output.append(data)
32 | return output
33 |
34 |
35 | def run_yaml_command(command):
36 | """
37 | Runs a command, and parses the output as yaml.
38 | """
39 | output = run_command(command)
40 | return yaml.safe_load(output)
41 |
--------------------------------------------------------------------------------
/cloudman/projman/management/commands/projman_create_project.py:
--------------------------------------------------------------------------------
1 | import logging as log
2 |
3 | from django.core.management.base import BaseCommand
4 |
5 | from django.contrib.auth.models import User
6 |
7 | from ...api import ProjManAPI, PMServiceContext
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Creates a ProjMan project.'
12 |
13 | def add_arguments(self, parser):
14 | parser.add_argument('name')
15 |
16 | def handle(self, *args, **options):
17 | name = options['name']
18 | self.create_project(name)
19 |
20 | @staticmethod
21 | def create_project(name):
22 | try:
23 | print("Creating project: {0}".format(name))
24 | admin = User.objects.filter(is_superuser=True).first()
25 | pmapi = ProjManAPI(PMServiceContext(user=admin))
26 | if not pmapi.projects.find(name):
27 | proj = pmapi.projects.create(name)
28 | print("Project created successfully.")
29 | return proj
30 | else:
31 | return pmapi.projects.find(name)
32 | except Exception as e:
33 | log.exception(f"An error occurred while "
34 | f"creating the project '{name}':", e)
35 | print(f"An error occurred while creating the project '{name}':", str(e))
36 | # Re-raise the exception
37 | raise e
38 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | CloudMan is a cloud infrastructure and application manager, primarily for Galaxy.
2 |
3 | .. image:: https://github.com/galaxyproject/cloudman/actions/workflows/tests.yaml/badge.svg
4 | :target: https://github.com/galaxyproject/cloudman/actions/workflows/tests.yaml
5 | :alt: Github Build Status
6 |
7 | .. image:: https://coveralls.io/repos/github/galaxyproject/cloudman/badge.svg?branch=master
8 | :target: https://coveralls.io/github/galaxyproject/cloudman?branch=master
9 | :alt: Test Coverage Report
10 |
11 | Installation
12 | ------------
13 | CloudMan is intended to be installed via the `CloudMan Helm chart`_.
14 |
15 | Run locally for development
16 | ---------------------------
17 | .. code-block:: bash
18 |
19 | git clone https://github.com/galaxyproject/cloudman.git
20 | cd cloudman
21 | pip install -r requirements.txt
22 | python cloudman/manage.py migrate
23 | gunicorn --log-level debug cloudman.wsgi
24 |
25 | The CloudMan API will be available at http://127.0.0.1:8000/cloudman/api/v1/
26 |
27 | To add the UI, see https://github.com/cloudve/cloudman-ui
28 |
29 | Build Docker image
30 | ------------------
31 | To build a Docker image, run ``docker build -t galaxy/cloudman:latest .``
32 | Push it to Dockerhub with:
33 |
34 | .. code-block:: bash
35 |
36 | docker login
37 | docker push galaxy/cloudman:latest
38 |
39 | .. _`CloudMan Helm chart`: https://github.com/cloudve/cloudman-helm
40 |
--------------------------------------------------------------------------------
/cloudman/cloudman/tests/fixtures/keycloak-export-realm.sh:
--------------------------------------------------------------------------------
1 | # https://stackoverflow.com/questions/60766292/how-to-get-keycloak-to-export-realm-users-and-then-exit
2 | # docker-exec-cmd.sh
3 |
4 | set -o errexit
5 | set -o errtrace
6 | set -o nounset
7 | set -o pipefail
8 |
9 | # If something goes wrong, this script does not run forever but times out
10 | TIMEOUT_SECONDS=300
11 | # Logfile for the keycloak export instance
12 | LOGFILE=/tmp/standalone.sh.log
13 | # destionation export file
14 | JSON_EXPORT_FILE=/testdata/realm-export.json
15 |
16 | rm -f ${LOGFILE} ${JSON_EXPORT_FILE}
17 |
18 | # Start a new keycloak instance with exporting options enabled.
19 | # Use prot offset to prevent port conflicts with the "real" keycloak instance.
20 | timeout ${TIMEOUT_SECONDS}s \
21 | /opt/jboss/keycloak/bin/standalone.sh \
22 | -Dkeycloak.migration.action=export \
23 | -Dkeycloak.migration.provider=singleFile \
24 | -Dkeycloak.migration.realmName=master \
25 | -Dkeycloak.migration.file=${JSON_EXPORT_FILE} \
26 | -Dkeycloak.migration.usersExportStrategy=REALM_FILE \
27 | -Djboss.socket.binding.port-offset=99 \
28 | | tee -a ${LOGFILE} &
29 |
30 | # Grab the keycloak export instance process id
31 | PID="${!}"
32 |
33 | # Wait for the export to finish
34 | timeout ${TIMEOUT_SECONDS}s \
35 | grep -m 1 "Export finished successfully" <(tail -f ${LOGFILE})
36 |
37 | # Stop the keycloak export instance
38 | kill ${PID}
39 |
40 |
--------------------------------------------------------------------------------
/cloudman/helmsman/models.py:
--------------------------------------------------------------------------------
1 | from django.conf import settings
2 | from django.db import models
3 |
4 |
5 | class HMInstallTemplate(models.Model):
6 | """CloudMan project details."""
7 | # Automatically add timestamps when object is created
8 | added = models.DateTimeField(auto_now_add=True)
9 | # Automatically add timestamps when object is updated
10 | updated = models.DateTimeField(auto_now=True)
11 | # Each project corresponds to a k8s namespace and therefore, must be unique
12 | name = models.CharField(max_length=60, primary_key=True)
13 | repo = models.SlugField(max_length=60)
14 | chart = models.SlugField(max_length=60)
15 | chart_version = models.CharField(max_length=60, blank=True, null=True)
16 | template = models.TextField(blank=True, null=True)
17 | context = models.TextField(blank=True, null=True)
18 | display_name = models.TextField(blank=True, null=True)
19 | summary = models.TextField(blank=True, null=True)
20 | description = models.TextField(blank=True, null=True)
21 | maintainers = models.TextField(blank=True, null=True)
22 | info_url = models.TextField(blank=True, null=True)
23 | icon_url = models.TextField(blank=True, null=True)
24 | screenshot_url = models.TextField(blank=True, null=True)
25 |
26 | class Meta:
27 | verbose_name = "Install Template"
28 | verbose_name_plural = "Install Templates"
29 |
30 | def __str__(self):
31 | return "{0}".format(self.name)
32 |
--------------------------------------------------------------------------------
/cloudman/clusterman/migrations/0002_create_rancher_app.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.0.5 on 2018-05-21 13:23
2 | import os
3 | from django.db import migrations
4 | from django.core import serializers
5 | from django.core.management import call_command
6 |
7 |
8 | # based on: https://stackoverflow.com/a/39743581/10971151
9 | def import_data(apps, schema_editor, filename):
10 | # Save the old _get_model() function
11 | old_get_model = serializers.python._get_model
12 |
13 | # Define new _get_model() function here, which utilizes the apps argument
14 | # to get the historical version of a model.
15 | def _get_model(model_identifier):
16 | try:
17 | return apps.get_model(model_identifier)
18 | except (LookupError, TypeError):
19 | raise serializers.base.DeserializationError(
20 | "Invalid model identifier: '%s'" % model_identifier)
21 |
22 | # Replace the _get_model() function, so loaddata can utilize it.
23 | serializers.python._get_model = _get_model
24 |
25 | try:
26 | # Call loaddata command
27 | call_command('loaddata', filename, app_label='clusterman')
28 | finally:
29 | # Restore old _get_model() function
30 | serializers.python._get_model = old_get_model
31 |
32 |
33 | def import_rancher_app(apps, schema_editor):
34 | import_data(apps, schema_editor, 'rancher_app_def.json')
35 |
36 |
37 | class Migration(migrations.Migration):
38 |
39 | dependencies = [
40 | ('clusterman', '0001_initial'),
41 | ]
42 |
43 | operations = [
44 | migrations.RunPython(import_rancher_app)
45 | ]
46 |
--------------------------------------------------------------------------------
/cloudman/clusterman/rules.py:
--------------------------------------------------------------------------------
1 | import rules
2 |
3 | # Delegate to keycloak in future iteration
4 |
5 | @rules.predicate
6 | def can_view_node(user, node):
7 | # Should have view rights on the parent cluster
8 | if not node:
9 | return False
10 | return user.has_perm('clusters.view_cluster', node.cluster)
11 |
12 |
13 | @rules.predicate
14 | def is_node_owner(user, node):
15 | # Should have update rights on the parent cluster
16 | if not node:
17 | return False
18 | return user.has_perm('clusters.change_cluster', node.cluster)
19 |
20 |
21 | @rules.predicate
22 | def has_autoscale_permissions(user, obj):
23 | return (user.has_perm('clusterman.view_cmcluster') and
24 | user.has_perm('clusterman.add_cmclusternode') and
25 | user.has_perm('clusterman.delete_cmclusternode'))
26 |
27 |
28 | # Permissions
29 | rules.add_perm('clusters.view_cluster', rules.is_staff | has_autoscale_permissions)
30 | rules.add_perm('clusters.add_cluster', rules.is_staff)
31 | rules.add_perm('clusters.change_cluster', rules.is_staff)
32 | rules.add_perm('clusters.delete_cluster', rules.is_staff)
33 |
34 | rules.add_perm('clusternodes.view_clusternode', can_view_node | has_autoscale_permissions | rules.is_staff)
35 | rules.add_perm('clusternodes.add_clusternode', is_node_owner | has_autoscale_permissions | rules.is_staff)
36 | rules.add_perm('clusternodes.change_clusternode', is_node_owner | has_autoscale_permissions | rules.is_staff)
37 | rules.add_perm('clusternodes.delete_clusternode', is_node_owner | has_autoscale_permissions | rules.is_staff)
38 |
39 | rules.add_perm('autoscalers.can_autoscale', has_autoscale_permissions | rules.is_staff)
40 |
--------------------------------------------------------------------------------
/cloudman/clusterman/fixtures/rancher_app_def.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "model": "cloudlaunch.application",
4 | "pk": "cm_rke_kubernetes_plugin",
5 | "fields": {
6 | "added": "2016-06-27T22:10:17.212Z",
7 | "updated": "2017-12-22T21:38:44.060Z",
8 | "name": "RKE Kubernetes Plugin",
9 | "status": "LIVE",
10 | "summary": "A RKE kubernetes plugin for cloudman",
11 | "maintainer": "cloudve.org",
12 | "description": "A RKE kubernetes plugin for cloudman",
13 | "info_url": "",
14 | "icon_url": "https://docs.rke2.io/assets/logo-horizontal-rke.svg",
15 | "default_launch_config": "",
16 | "default_version": 1,
17 | "display_order": 1000,
18 | "category": []
19 | }
20 | },
21 | {
22 | "model": "cloudlaunch.applicationversion",
23 | "pk": 1,
24 | "fields": {
25 | "application": "cm_rke_kubernetes_plugin",
26 | "version": "0.1.0",
27 | "frontend_component_path": "",
28 | "frontend_component_name": "",
29 | "backend_component_name": "clusterman.plugins.rke_kubernetes_app.RKEKubernetesApp",
30 | "default_launch_config": "{\r\n \"config_cloudlaunch\":{\r\n \"firewall\":[\r\n {\r\n \"securityGroup\":\"cloudlaunch-vm\",\r\n \"rules\":[\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"22\",\r\n \"to\":\"22\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"443\",\r\n \"to\":\"443\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n ]\r\n }\r\n ]\r\n }\r\n}",
31 | "default_target": null
32 | }
33 | }
34 | ]
35 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/client_mocker.py:
--------------------------------------------------------------------------------
1 | from unittest.mock import patch
2 |
3 | from .mock_kubectl import MockKubeCtl
4 |
5 |
6 | class KubeMocker(object):
7 |
8 | def __init__(self):
9 | self.mock_kubectl = MockKubeCtl()
10 |
11 | def can_parse(self, command):
12 | if isinstance(command, list):
13 | prog = command[0]
14 | if prog.startswith("kubectl"):
15 | return True
16 | return False
17 |
18 | @staticmethod
19 | def extra_patches():
20 | return [patch(
21 | 'clusterman.clients.kube_client.KubeClient._check_environment',
22 | return_value=True)]
23 |
24 | def run_command(self, command):
25 | return self.mock_kubectl.run_command(command)
26 |
27 |
28 | class ClientMocker(object):
29 | """
30 | Replaces helm and kube clients with their Mock versions
31 | """
32 |
33 | """ Mocks all calls to the helm and kubectl commands"""
34 | def __init__(self, testcase, mockers=None):
35 | self.mockers = mockers or [KubeMocker()]
36 | self.extra_patches = []
37 | for mocker in self.mockers:
38 | self.extra_patches += mocker.extra_patches()
39 | self.patch1 = patch('clusterman.clients.helpers.run_command',
40 | self.mock_run_command)
41 | self.patch1.start()
42 | testcase.addCleanup(self.patch1.stop)
43 | for each in self.extra_patches:
44 | each.start()
45 | testcase.addCleanup(each.stop)
46 |
47 | def mock_run_command(self, command, shell=False):
48 | for mocker in self.mockers:
49 | if mocker.can_parse(command):
50 | return mocker.run_command(command)
51 |
--------------------------------------------------------------------------------
/cloudman/helmsman/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.10 on 2020-06-01 07:23
2 |
3 | from django.db import migrations, models
4 |
5 |
6 | class Migration(migrations.Migration):
7 |
8 | initial = True
9 |
10 | dependencies = [
11 | ]
12 |
13 | operations = [
14 | migrations.CreateModel(
15 | name='HMInstallTemplate',
16 | fields=[
17 | ('added', models.DateTimeField(auto_now_add=True)),
18 | ('updated', models.DateTimeField(auto_now=True)),
19 | ('name', models.CharField(max_length=60, primary_key=True, serialize=False)),
20 | ('repo', models.SlugField(max_length=60)),
21 | ('chart', models.SlugField(max_length=60)),
22 | ('chart_version', models.CharField(blank=True, max_length=60, null=True)),
23 | ('template', models.TextField(blank=True, null=True)),
24 | ('context', models.TextField(blank=True, null=True)),
25 | ('display_name', models.TextField(blank=True, null=True)),
26 | ('summary', models.TextField(blank=True, null=True)),
27 | ('description', models.TextField(blank=True, null=True)),
28 | ('maintainers', models.TextField(blank=True, null=True)),
29 | ('info_url', models.TextField(blank=True, null=True)),
30 | ('icon_url', models.TextField(blank=True, null=True)),
31 | ('screenshot_url', models.TextField(blank=True, null=True)),
32 | ],
33 | options={
34 | 'verbose_name': 'Install Template',
35 | 'verbose_name_plural': 'Install Templates',
36 | },
37 | ),
38 | ]
39 |
--------------------------------------------------------------------------------
/cloudman/helmsman/helpers.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | import yaml
3 | from django.conf import settings
4 |
5 | from contextlib import contextmanager
6 |
7 |
8 | @contextmanager
9 | def TempInputFile(text, prefix="helmsman"):
10 | """
11 | Context manager to carry out an action
12 | after creating a temporary file with the
13 | given text content.
14 |
15 | :params text: The text to write to a file
16 | Usage:
17 | with TempInputFile("hello world"):
18 | do_something()
19 | """
20 | with tempfile.NamedTemporaryFile(mode="w", prefix=prefix, delete=not settings.DEBUG) as f:
21 | f.write(text)
22 | f.flush()
23 | yield f
24 |
25 |
26 | @contextmanager
27 | def TempValuesFile(values, prefix="helmsman"):
28 | """
29 | Context manager to carry out an action
30 | after creating a temporary file with the
31 | given yaml values.
32 |
33 | :params values: The yaml values to write to a file
34 | Usage:
35 | with TempValuesFile({'hello': 'world'}):
36 | do_something()
37 | """
38 | with tempfile.NamedTemporaryFile(mode="w", prefix=prefix, delete=not settings.DEBUG) as f:
39 | yaml.safe_dump(values, stream=f, default_flow_style=False)
40 | yield f
41 |
42 |
43 | # based on: https://codereview.stackexchange.com/questions/21033/flatten-dic
44 | # tionary-in-python-functional-style
45 | def flatten_dict(d):
46 | def items():
47 | for key, value in d.items():
48 | if isinstance(value, dict):
49 | for subkey, subvalue in flatten_dict(value).items():
50 | yield key + "." + subkey, subvalue
51 | else:
52 | yield key, value
53 | return dict(items())
54 |
--------------------------------------------------------------------------------
/cloudman/cloudman/urls.py:
--------------------------------------------------------------------------------
1 | """
2 | CloudMan URL Configuration.
3 |
4 | The `urlpatterns` list routes URLs to views. For more information please see:
5 | https://docs.djangoproject.com/en/1.11/topics/http/urls/
6 | Examples:
7 | Function views
8 | 1. Add an import: from my_app import views
9 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
10 | Class-based views
11 | 1. Add an import: from other_app.views import Home
12 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
13 | Including another URLconf
14 | 1. Import the include() function: from django.urls import re_path, include
15 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
16 | """
17 | from django.conf import settings
18 | from django.urls import include
19 | from django.urls import path
20 | from rest_framework.schemas import get_schema_view
21 |
22 |
23 |
24 | schema_view = get_schema_view(title='CloudMan API', url=settings.REST_SCHEMA_BASE_URL,
25 | urlconf='cloudman.urls')
26 |
27 | app_name = 'cloudman'
28 | urlpatterns = [
29 | path('cloudman/cloudlaunch/cloudlaunch/api/v1/auth/user/', include('cloudlaunchserver.urls')),
30 | path('cloudman/', include('cloudlaunchserver.urls')),
31 | path('cloudman/api/v1/', include('clusterman.urls')),
32 | path('cloudman/api/v1/', include('helmsman.urls')),
33 | path('cloudman/api/v1/', include('projman.urls')),
34 | path('cloudman/api/v1/schema/', schema_view),
35 | path('cloudman/oidc/', include('mozilla_django_oidc.urls')),
36 | ]
37 |
38 | # Uncomment to have Gunicorn serve static content (dev only)
39 | # Also run: python manage.py collectstatic
40 | # from django.contrib.staticfiles.urls import staticfiles_urlpatterns
41 | # urlpatterns += staticfiles_urlpatterns()
42 |
--------------------------------------------------------------------------------
/cloudman/projman/rules.py:
--------------------------------------------------------------------------------
1 | from django.contrib.auth.models import Group
2 |
3 | import rules
4 |
5 |
6 | # Delegate to keycloak in future iteration
7 |
8 | # Predicates
9 | @rules.predicate
10 | def can_view_project(user, project):
11 | if not project:
12 | return False
13 | return rules.is_group_member(f'projman-{project.namespace}')(user)
14 |
15 |
16 | @rules.predicate
17 | def is_project_admin(user, project):
18 | if not project:
19 | return False
20 | return rules.is_group_member(f'projman-{project.namespace}-admin')(user)
21 |
22 |
23 | @rules.predicate
24 | def is_project_owner(user, project):
25 | if not project:
26 | return False
27 | return project.owner == user
28 |
29 |
30 | @rules.predicate
31 | def is_chart_owner(user, proj_chart):
32 | # Should have update rights on the parent project
33 | if not proj_chart:
34 | return False
35 | return user.has_perm('projman.change_project', proj_chart.project)
36 |
37 |
38 | @rules.predicate
39 | def can_view_chart(user, proj_chart):
40 | # Should have view rights on the parent project
41 | if not proj_chart:
42 | return False
43 | return user.has_perm('projman.view_project', proj_chart.project)
44 |
45 |
46 | # Permissions
47 | rules.add_perm('projman.view_project', is_project_owner | is_project_admin | rules.is_staff | can_view_project)
48 | rules.add_perm('projman.add_project', rules.is_staff)
49 | rules.add_perm('projman.change_project', is_project_owner | is_project_admin | rules.is_staff)
50 | rules.add_perm('projman.delete_project', is_project_owner | is_project_admin | rules.is_staff)
51 |
52 | rules.add_perm('projman.view_chart', is_chart_owner | rules.is_staff | can_view_chart)
53 | rules.add_perm('projman.add_chart', is_project_owner | is_project_admin | rules.is_staff)
54 | rules.add_perm('projman.change_chart', is_chart_owner | rules.is_staff)
55 | rules.add_perm('projman.delete_chart', is_chart_owner | rules.is_staff)
56 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # Tox (http://tox.testrun.org/) is a tool for running tests
2 | # in multiple virtualenvs. This configuration file will run the
3 | # test suite on all supported python versions. To use it, "pip install tox"
4 | # and then run "tox" from this directory.
5 | #
6 | # To run a specific test use:
7 | # tox clusterman.tests.test_cluster_api.CMClusterScaleSignalTest
8 |
9 | [tox]
10 | envlist = py38,integration
11 | skipsdist = True
12 |
13 | [testenv]
14 | commands = {envpython} -m coverage run --source cloudman --branch cloudman/manage.py test {posargs:clusterman helmsman projman}
15 | setenv =
16 | CELERY_CONFIG_MODULE=cloudman.celeryconfig_test
17 | # Fix for import issue: https://github.com/travis-ci/travis-ci/issues/7940
18 | BOTO_CONFIG=/dev/null
19 | passenv =
20 | SENTRY_DSN
21 | deps =
22 | -rrequirements_test.txt
23 | coverage
24 |
25 | [testenv:integration]
26 | docker =
27 | keycloak
28 | commands = {envpython} -m coverage run --source cloudman --branch cloudman/manage.py test {posargs:cloudman clusterman helmsman projman}
29 |
30 | [docker:keycloak]
31 | image = jboss/keycloak:7.0.0
32 | # Environment variables are passed to the container. They are only
33 | # available to that container, and not to the testenv, other
34 | # containers, or as replacements in other parts of tox.ini
35 | environment =
36 | KEYCLOAK_USER=admin
37 | KEYCLOAK_PASSWORD=testpassword
38 | JAVA_OPTS=-server -Xms64m -Xmx512m -XX:MetaspaceSize=96M -XX:MaxMetaspaceSize=256m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.awt.headless=true -Dkeycloak.migration.action=import -Dkeycloak.migration.provider=singleFile -Dkeycloak.migration.file=/testdata/realm-export.json -Dkeycloak.migration.strategy=OVERWRITE_EXISTING
39 | OIDC_ENABLED=1
40 | ports =
41 | 8080:8080/tcp
42 | volumes =
43 | bind:ro:{toxinidir}/cloudman/cloudman/tests/fixtures/:/testdata/
44 | healthcheck_cmd = curl -f http://localhost:8080/auth/ || exit 1
45 | healthcheck_timeout = 1
46 | healthcheck_retries = 30
47 | healthcheck_interval = 10
48 | healthcheck_start_period = 10
49 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/data/initial_cluster_data_aws.yaml:
--------------------------------------------------------------------------------
1 | app_config:
2 | config_appliance:
3 | inventoryTemplate:
4 | repository: https://github.com/CloudVE/ansible-cloudman2
5 | runner: ansible
6 | sshUser: ubuntu
7 | config_cloudlaunch:
8 | customImageID: null
9 | firewall:
10 | - rules:
11 | - cidr: 0.0.0.0/0
12 | from: "22"
13 | protocol: tcp
14 | to: "22"
15 | securityGroup: cloudlaunch-cm2
16 | gateway: null
17 | keyPair: null
18 | network: null
19 | provider_settings:
20 | ebsOptimised: null
21 | volumeIOPS: null
22 | rootStorageType: instance
23 | staticIP: null
24 | subnet: null
25 | vmType: m5.24xlarge
26 | config_cloudman2:
27 | clusterPassword: 123456
28 | cm_boot_image: cloudve/cloudman-boot
29 | pulsarOnly: false
30 | cloud_config:
31 | credentials:
32 | # temp credentials don't have id and name
33 | aws_access_key: dummy_key
34 | aws_secret_key: dummy_secret
35 | image:
36 | description: Ubuntu 16.04 with Docker
37 | image_id: ami-123456
38 | name: Ubuntu 16.04 with Docker
39 | target:
40 | id: 25
41 | resourcetype: CloudDeploymentTarget
42 | target_zone:
43 | cloud:
44 | id: aws
45 | name: Amazon Web Services
46 | resourcetype: AWSCloud
47 | name: us-east1
48 | region:
49 | cloud: aws
50 | id: 22
51 | name: us-east1
52 | region_id: amazon-us-east
53 | resourcetype: AWSRegion
54 | zone_id: default
55 | host_config:
56 | host_address: 127.0.0.1
57 | run_cmd: null
58 | ssh_private_key: |
59 | -----BEGIN PRIVATE KEY-----
60 | MSomeREASONABLECcontentBAQEFAASCBKkwggSlAgEAAoIBAQDV8ZKINVKPejyt
61 | e1KdtdUcj4zA9d3R0qI6UrrZICaXCiCST8Wyd0GbtDxElwMx1I4Wvce4r4ESZcdO
62 | zZZdd8whRDbQDY0lYJrXGpoZvg==
63 | -----END PRIVATE KEY-----
64 | ssh_public_key: ssh-rsa AAAASomeKey/0DV
65 | ssh_user: ubuntu
66 | rke_config:
67 | rke_registration_server: 10.1.1.210
68 | rke_registration_token: token-bf4j5:sometoken
69 | rke_cluster_id: cluster.hostname.com
70 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml:
--------------------------------------------------------------------------------
1 | app_config:
2 | config_appliance:
3 | inventoryTemplate:
4 | repository: https://github.com/CloudVE/ansible-cloudman2
5 | runner: ansible
6 | sshUser: ubuntu
7 | config_cloudlaunch:
8 | customImageID: null
9 | firewall:
10 | - rules:
11 | - cidr: 0.0.0.0/0
12 | from: "22"
13 | protocol: tcp
14 | to: "22"
15 | securityGroup: cloudlaunch-cm2
16 | gateway: null
17 | keyPair: null
18 | network: null
19 | provider_settings:
20 | ebsOptimised: null
21 | volumeIOPS: null
22 | rootStorageType: instance
23 | staticIP: null
24 | subnet: null
25 | vmType: m2.large
26 | config_cloudman2:
27 | clusterPassword: 123456
28 | cm_boot_image: cloudve/cloudman-boot
29 | pulsarOnly: false
30 | cloud_config:
31 | credentials:
32 | # temp credentials don't have id and name
33 | gcp_service_creds_dict:
34 | entry1: value1
35 | entry2: value2
36 | gcp_vm_default_username: "default"
37 | image:
38 | description: Ubuntu 16.04 with Docker
39 | image_id: ami-123456
40 | name: Ubuntu 16.04 with Docker
41 | target:
42 | id: 27
43 | resourcetype: CloudDeploymentTarget
44 | target_zone:
45 | cloud:
46 | id: gcp
47 | name: Google Cloud Platform
48 | resourcetype: GCPCloud
49 | name: us-east1
50 | region:
51 | cloud: gcp
52 | id: 24
53 | name: us-east1
54 | region_id: gcp-us-east
55 | resourcetype: GCPRegion
56 | zone_id: default
57 | host_config:
58 | host_address: 127.0.0.1
59 | run_cmd: null
60 | ssh_private_key: |
61 | -----BEGIN PRIVATE KEY-----
62 | MSomeREASONABLECcontentBAQEFAASCBKkwggSlAgEAAoIBAQDV8ZKINVKPejyt
63 | e1KdtdUcj4zA9d3R0qI6UrrZICaXCiCST8Wyd0GbtDxElwMx1I4Wvce4r4ESZcdO
64 | zZZdd8whRDbQDY0lYJrXGpoZvg==
65 | -----END PRIVATE KEY-----
66 | ssh_public_key: ssh-rsa AAAASomeKey/0DV
67 | ssh_user: ubuntu
68 | rke_config:
69 | rke_registration_server: 10.1.1.210
70 | rke_registration_token: token-bf4j5:sometoken
71 | rke_cluster_id: cluster.hostname.com
72 |
--------------------------------------------------------------------------------
/cloudman/projman/management/commands/projman_load_config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import yaml
3 |
4 | from django.core.management import call_command
5 | from django.core.management.base import BaseCommand
6 |
7 | from helmsman import helpers
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Loads projman config data from a yaml file'
12 |
13 | def add_arguments(self, parser):
14 | parser.add_argument('config_file', type=argparse.FileType('r'))
15 |
16 | def handle(self, *args, **options):
17 | settings = yaml.safe_load(options['config_file'].read())
18 | self.process_settings(settings or {})
19 |
20 | @staticmethod
21 | def process_settings(settings):
22 | projects = settings.get('projects')
23 | for project in projects or {}:
24 | if project:
25 | call_command("projman_create_project", project)
26 | charts = projects.get(project, {}).get('charts', [])
27 | for key in charts or []:
28 | chart = charts.get(key)
29 | template = chart.get("install_template")
30 | if template:
31 | release_name = chart.get("release_name", '')
32 | values = chart.get("values", '')
33 | context = chart.get("context", '')
34 | extra_args = []
35 | if chart.get("upgrade"):
36 | extra_args += ['--upgrade']
37 | if chart.get("reset_values"):
38 | extra_args += ['--reset_values']
39 | with helpers.TempValuesFile(values) as values_file:
40 | with helpers.TempValuesFile(context) as context_file:
41 | call_command("install_template_in_project",
42 | project, template,
43 | release_name,
44 | values_file.name,
45 | context_file.name,
46 | *extra_args)
47 |
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/test_helmsman_unit.py:
--------------------------------------------------------------------------------
1 | from django.contrib.auth.models import User
2 | from django.test import TestCase, override_settings
3 |
4 | from helmsman.api import HelmsManAPI
5 | from helmsman.api import HMServiceContext
6 |
7 | class InstallTemplateUnitTest(TestCase):
8 |
9 |
10 | def setUp(self):
11 | admin = User.objects.get_or_create(username='admin', is_superuser=True)[0]
12 | self.client = HelmsManAPI(HMServiceContext(user=admin))
13 |
14 | def test_render_values(self):
15 | base_tpl = ('hosts:\n'
16 | ' - ~\n'
17 | ' {%- if not (context.domain | ipaddr) %}\n'
18 | ' - "{{ context.domain }}"\n'
19 | ' {%- endif %}')
20 |
21 | tpl = self.client.templates.create(
22 | 'dummytpl', 'dummyrepo', 'dummychart',
23 | template=base_tpl)
24 |
25 | ip_expected = ('hosts:\n'
26 | ' - ~')
27 | host_expected = ('hosts:\n'
28 | ' - ~\n'
29 | ' - "example.com"')
30 |
31 | ip_rendered = tpl.render_values(context={'domain': '192.168.2.1'})
32 | self.assertEquals(ip_expected, ip_rendered)
33 |
34 | host_rendered = tpl.render_values(context={'domain': 'example.com'})
35 | self.assertEquals(host_expected, host_rendered)
36 |
37 | @override_settings(CM_GLOBAL_CONTEXT={'domain': 'globaldomain.com'})
38 | def test_render_values_global_context(self):
39 | base_tpl = ('hosts:\n'
40 | ' - ~\n'
41 | ' {%- if not (context.global.domain | ipaddr) %}\n'
42 | ' - "{{ context.global.domain }}"\n'
43 | ' {%- endif %}')
44 |
45 | tpl = self.client.templates.create(
46 | 'dummytpl', 'dummyrepo', 'dummychart',
47 | template=base_tpl)
48 |
49 | host_expected = ('hosts:\n'
50 | ' - ~\n'
51 | ' - "globaldomain.com"')
52 |
53 | host_rendered = tpl.render_values(context={'domain': 'example.com'})
54 | self.assertEquals(host_expected, host_rendered)
55 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/data/initial_cluster_data_openstack.yaml:
--------------------------------------------------------------------------------
1 | app_config:
2 | config_appliance:
3 | inventoryTemplate:
4 | repository: https://github.com/CloudVE/ansible-cloudman2
5 | runner: ansible
6 | sshUser: ubuntu
7 | config_cloudlaunch:
8 | customImageID: null
9 | firewall:
10 | - rules:
11 | - cidr: 0.0.0.0/0
12 | from: "22"
13 | protocol: tcp
14 | to: "22"
15 | securityGroup: cloudlaunch-cm2
16 | gateway: null
17 | keyPair: null
18 | network: null
19 | provider_settings:
20 | ebsOptimised: null
21 | volumeIOPS: null
22 | rootStorageType: instance
23 | staticIP: null
24 | subnet: null
25 | vmType: m2.large
26 | config_cloudman2:
27 | clusterPassword: 123456
28 | cm_boot_image: cloudve/cloudman-boot
29 | pulsarOnly: false
30 | cloud_config:
31 | credentials:
32 | # temp credentials don't have id and name
33 | os_username: "testuser@domain.com"
34 | os_password: "testpassword"
35 | os_project_name: testproject
36 | os_project_domain_name: test_project_domain
37 | os_user_domain_name: test_user_domain
38 | image:
39 | description: Ubuntu 16.04 with Docker
40 | image_id: ami-123456
41 | name: Ubuntu 16.04 with Docker
42 | target:
43 | id: 28
44 | resourcetype: CloudDeploymentTarget
45 | target_zone:
46 | cloud:
47 | id: openstack
48 | name: OpenStack
49 | resourcetype: OpenStackCloud
50 | name: melbourne
51 | region:
52 | cloud: openstack
53 | id: 25
54 | name: melbourne
55 | region_id: melbourne
56 | resourcetype: OpenStackRegion
57 | zone_id: default
58 | host_config:
59 | host_address: 127.0.0.1
60 | run_cmd: null
61 | ssh_private_key: |
62 | -----BEGIN PRIVATE KEY-----
63 | MSomeREASONABLECcontentBAQEFAASCBKkwggSlAgEAAoIBAQDV8ZKINVKPejyt
64 | e1KdtdUcj4zA9d3R0qI6UrrZICaXCiCST8Wyd0GbtDxElwMx1I4Wvce4r4ESZcdO
65 | zZZdd8whRDbQDY0lYJrXGpoZvg==
66 | -----END PRIVATE KEY-----
67 | ssh_public_key: ssh-rsa AAAASomeKey/0DV
68 | ssh_user: ubuntu
69 | rke_config:
70 | rke_registration_server: 10.1.1.210
71 | rke_registration_token: token-bf4j5:sometoken
72 | rke_cluster_id: cluster.hostname.com
73 |
--------------------------------------------------------------------------------
/cloudman/projman/views.py:
--------------------------------------------------------------------------------
1 | """ProjMan Create views."""
2 | from rest_framework.views import APIView
3 | from rest_framework.exceptions import PermissionDenied
4 | from rest_framework.response import Response
5 | from rest_framework.permissions import IsAuthenticated
6 |
7 | from djcloudbridge import drf_helpers
8 | from . import serializers
9 | from .api import ProjManAPI
10 |
11 |
12 | class ProjManAPIView(APIView):
13 | """List ProjMan API endpoints"""
14 |
15 | def get(self, request, format=None):
16 | """Return available clusters."""
17 | response = {'url': request.build_absolute_uri('projects')}
18 | return Response(response)
19 |
20 |
21 | class ProjectViewSet(drf_helpers.CustomModelViewSet):
22 | """Returns list of projects managed by ProjMan."""
23 |
24 | permission_classes = (IsAuthenticated,)
25 | # Required for the Browsable API renderer to have a nice form.
26 | serializer_class = serializers.PMProjectSerializer
27 |
28 | def list_objects(self):
29 | """Get a list of all registered projects."""
30 | return ProjManAPI.from_request(self.request).projects.list()
31 |
32 | def get_object(self):
33 | """Get info about a specific project."""
34 | return ProjManAPI.from_request(self.request).projects.get(
35 | self.kwargs["pk"])
36 |
37 |
38 | class ProjectChartViewSet(drf_helpers.CustomModelViewSet):
39 | """
40 | Returns a list of charts belonging to a project.
41 | """
42 | permission_classes = (IsAuthenticated,)
43 | # Required for the Browsable API renderer to have a nice form.
44 | serializer_class = serializers.PMProjectChartSerializer
45 |
46 | def list_objects(self):
47 | try:
48 | project = ProjManAPI.from_request(self.request).projects.get(
49 | self.kwargs["project_pk"])
50 | except PermissionDenied:
51 | project = None
52 | if project:
53 | return project.charts.list()
54 | else:
55 | return []
56 |
57 | def get_object(self):
58 | project = ProjManAPI.from_request(self.request).projects.get(
59 | self.kwargs["project_pk"])
60 | if project:
61 | return project.charts.get(self.kwargs["pk"])
62 | else:
63 | return None
64 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tasks.py:
--------------------------------------------------------------------------------
1 | """Tasks to be executed asynchronously (via Celery)."""
2 | from celery.app import shared_task
3 | from celery.result import AsyncResult
4 | from celery.result import allow_join_result
5 |
6 | from django.contrib.auth.models import User
7 |
8 | from clusterman import api
9 | from clusterman.clients.kube_client import KubeClient
10 |
11 |
12 | def node_not_present(node):
13 | kube_client = KubeClient()
14 | print(f"Checking for presence of node: {node.name}")
15 | k8s_node = kube_client.nodes.find(labels={'usegalaxy.org/cm_node_name': node.name})
16 | return not k8s_node
17 |
18 |
19 | def wait_till_deployment_deleted(deployment_delete_task_id):
20 | with allow_join_result():
21 | deployment_delete_task = AsyncResult(deployment_delete_task_id)
22 | print("Waiting for node deployment to be deleted...")
23 | deployment_delete_task.wait()
24 | if deployment_delete_task.successful():
25 | print("Deployment deleted successfully.")
26 | return
27 | else:
28 | task_meta = deployment_delete_task.backend.get_task_meta(
29 | deployment_delete_task.id)
30 | print(f"Deployment delete failed: {task_meta.get('status')} with traceback:"
31 | f"{task_meta.get('traceback')}")
32 |
33 |
34 | @shared_task(bind=True, expires=120)
35 | def delete_node(self, deployment_delete_task_id, cluster_id, node_id):
36 | """
37 | Triggers a delete task through cloudlaunch.
38 | If successful, removes reference to node
39 | """
40 | admin = User.objects.filter(is_superuser=True).first()
41 | cmapi = api.CloudManAPI(api.CMServiceContext(user=admin))
42 | cluster = cmapi.clusters.get(cluster_id)
43 | node = cluster.nodes.get(node_id)
44 | wait_till_deployment_deleted(deployment_delete_task_id)
45 | if node_not_present(node):
46 | # if desired state has been reached, clusterman no longer
47 | # needs to maintain a reference to the node
48 | # call the saved django delete method which we remapped
49 | print(f"Node does not exist, removing clusterman reference.")
50 | node.original_delete()
51 | else:
52 | print("Deleted node still exists, not removing clusterman"
53 | "node reference.")
54 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/data/initial_cluster_data_azure.yaml:
--------------------------------------------------------------------------------
1 | app_config:
2 | config_appliance:
3 | inventoryTemplate:
4 | repository: https://github.com/CloudVE/ansible-cloudman2
5 | runner: ansible
6 | sshUser: ubuntu
7 | config_cloudlaunch:
8 | customImageID: null
9 | firewall:
10 | - rules:
11 | - cidr: 0.0.0.0/0
12 | from: "22"
13 | protocol: tcp
14 | to: "22"
15 | securityGroup: cloudlaunch-cm2
16 | gateway: null
17 | keyPair: null
18 | network: null
19 | provider_settings:
20 | ebsOptimised: null
21 | volumeIOPS: null
22 | rootStorageType: instance
23 | staticIP: null
24 | subnet: null
25 | vmType: m2.large
26 | config_cloudman2:
27 | clusterPassword: 123456
28 | cm_boot_image: cloudve/cloudman-boot
29 | pulsarOnly: false
30 | cloud_config:
31 | credentials:
32 | # temp credentials don't have id and name
33 | azure_subscription_id: some id
34 | azure_client_id: some_client_id
35 | azure_secret: some_secret
36 | azure_tenant: some_tenant
37 | azure_resource_group: some_resource_group
38 | azure_storage_account: some_storage_account
39 | azure_vm_default_username: some_vm_default_username
40 | image:
41 | description: Ubuntu 16.04 with Docker
42 | image_id: ami-123456
43 | name: Ubuntu 16.04 with Docker
44 | target:
45 | id: 26
46 | resourcetype: CloudDeploymentTarget
47 | target_zone:
48 | cloud:
49 | id: azure
50 | name: Microsoft Azure
51 | resourcetype: AzureCloud
52 | name: us-east1
53 | region:
54 | cloud: azure
55 | id: 23
56 | name: us-east1
57 | region_id: azure-us-east
58 | resourcetype: AzureRegion
59 | zone_id: default
60 | host_config:
61 | host_address: 127.0.0.1
62 | run_cmd: null
63 | ssh_private_key: |
64 | -----BEGIN PRIVATE KEY-----
65 | MSomeREASONABLECcontentBAQEFAASCBKkwggSlAgEAAoIBAQDV8ZKINVKPejyt
66 | e1KdtdUcj4zA9d3R0qI6UrrZICaXCiCST8Wyd0GbtDxElwMx1I4Wvce4r4ESZcdO
67 | zZZdd8whRDbQDY0lYJrXGpoZvg==
68 | -----END PRIVATE KEY-----
69 | ssh_public_key: ssh-rsa AAAASomeKey/0DV
70 | ssh_user: ubuntu
71 | rke_config:
72 | rke_registration_server: 10.1.1.210
73 | rke_registration_token: token-bf4j5:sometoken
74 | rke_cluster_id: cluster.hostname.com
75 |
76 |
--------------------------------------------------------------------------------
/cloudman/cloudman/oidc.py:
--------------------------------------------------------------------------------
1 | from cloudman.auth import get_from_well_known
2 | from django.contrib.auth.models import Group
3 | from django.db import transaction
4 | from mozilla_django_oidc import auth, utils, views
5 |
6 |
7 | def provider_logout(request):
8 | return get_from_well_known(
9 | utils.import_from_settings('OIDC_OP_METADATA_ENDPOINT'),
10 | 'end_session_endpoint')
11 |
12 |
13 | class CMOIDCAuthenticationBackend(auth.OIDCAuthenticationBackend):
14 |
15 | def create_user(self, claims):
16 | user = super(CMOIDCAuthenticationBackend, self).create_user(claims)
17 | return self.update_user(user, claims)
18 |
19 | def update_user(self, user, claims):
20 | roles = claims.get('roles')
21 | user.first_name = claims.get('given_name', '')
22 | user.last_name = claims.get('family_name', '')
23 | user.is_staff = 'admin' in roles or 'superuser' in roles
24 | user.is_superuser = 'superuser' in roles
25 | user.save()
26 | self.update_groups(user, claims)
27 |
28 | return user
29 |
30 | def update_groups(self, user, claims):
31 | """
32 | Transform roles obtained from keycloak into Django Groups and
33 | add them to the user. Note that any role not passed via keycloak
34 | will be removed from the user.
35 | """
36 | with transaction.atomic():
37 | user.groups.clear()
38 | for role in claims.get('roles'):
39 | group, _ = Group.objects.get_or_create(name=role)
40 | group.user_set.add(user)
41 |
42 | def get_userinfo(self, access_token, id_token, payload):
43 | """
44 | Get user details from the access_token and id_token and return
45 | them in a dict.
46 | """
47 | userinfo = super().get_userinfo(access_token, id_token, payload)
48 | accessinfo = self.verify_token(access_token, nonce=payload.get('nonce'))
49 | roles = accessinfo.get('realm_access', {}).get('roles', [])
50 |
51 | userinfo['roles'] = roles
52 | return userinfo
53 |
54 |
55 | class OIDCAuthenticationRequestView(views.OIDCAuthenticationRequestView):
56 |
57 | def __init__(self, *args, **kwargs):
58 | super(OIDCAuthenticationRequestView, self).__init__(*args, **kwargs)
59 |
60 | self.OIDC_OP_AUTH_ENDPOINT = get_from_well_known(
61 | utils.import_from_settings('OIDC_OP_METADATA_ENDPOINT'),
62 | 'authorization_endpoint')
63 |
--------------------------------------------------------------------------------
/cloudman/clusterman/management/commands/create_cluster.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 | import logging as log
4 | import json
5 | import yaml
6 |
7 | from django.contrib.auth.models import User
8 | from django.core.management.base import BaseCommand
9 |
10 | from clusterman import exceptions
11 |
12 |
13 | class Command(BaseCommand):
14 | help = 'Creates a CloudMan cluster. Currently supported cluster' \
15 | 'types: KUBE_RKE. Specify RKE connection settings in yaml' \
16 | 'format in the settings_file.'
17 |
18 | def add_arguments(self, parser):
19 | parser.add_argument('name')
20 | parser.add_argument('cluster_type')
21 | parser.add_argument('settings_file', type=argparse.FileType('r'))
22 | parser.add_argument('--format', required=False, default="yaml",
23 | choices=['yaml', 'json', 'base64yaml'],
24 | help='Format that the data is encoded in')
25 |
26 | def handle(self, *args, **options):
27 | name = options['name']
28 | cluster_type = options['cluster_type']
29 | data = options['settings_file'].read()
30 | format = options['format']
31 | if format == "base64yaml":
32 | # Pad data: https://gist.github.com/perrygeo/ee7c65bb1541ff6ac770
33 | data = base64.b64decode(data + "===").decode('utf-8')
34 |
35 | if format == "json":
36 | settings = json.loads(data)
37 | elif format == "yaml" or format == "base64yaml":
38 | settings = yaml.safe_load(data)
39 |
40 | self.create_cluster(name, cluster_type, settings)
41 |
42 | @staticmethod
43 | def create_cluster(name, cluster_type, settings):
44 | try:
45 | print("Creating cluster: {0}, type: cluster_type".format(
46 | name, cluster_type))
47 | from clusterman import api
48 | admin = User.objects.filter(is_superuser=True).first()
49 | cmapi = api.CloudManAPI(api.CMServiceContext(user=admin))
50 | try:
51 | cmapi.clusters.create(
52 | name, cluster_type, connection_settings=settings)
53 | print("cluster created successfully.")
54 | except exceptions.CMDuplicateNameException:
55 | cluster = cmapi.clusters.find(name=name)[0]
56 | cmapi.clusters.update(cluster)
57 | print("cluster already exists. Reinitialized.")
58 | except Exception as e:
59 | log.exception("An error occurred while creating the initial cluster!!:")
60 | print("An error occurred while creating the initial cluster!!:", str(e))
61 |
--------------------------------------------------------------------------------
/cloudman/clusterman/management/commands/create_autoscale_user.py:
--------------------------------------------------------------------------------
1 | import logging as log
2 |
3 | from django.contrib.auth.models import Permission
4 | from django.contrib.auth.models import User
5 | from django.core.management.base import BaseCommand
6 |
7 | from clusterman.models import GlobalSettings
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Creates a user for managing autoscaling. This user has permissions to scale' \
12 | 'the cluster only, and cannot perform any other cluster admin actions.'
13 |
14 | def add_arguments(self, parser):
15 | parser.add_argument(
16 | '--username', default='autoscaleuser',
17 | help='username for autoscaling endpoint')
18 | parser.add_argument(
19 | '--password', required=False,
20 | help='Password for this user, autogenerated if not specified')
21 | parser.add_argument(
22 | '--impersonate_account', required=False,
23 | help='User account to impersonate when scaling. This account is assumed to have stored'
24 | ' cloud credentials or IAM access. Defaults to the first super admin found.')
25 |
26 | def handle(self, *args, **options):
27 | username = options['username']
28 | password = options['password']
29 | account = options['impersonate_account']
30 |
31 | return self.create_autoscale_user(username, password, account)
32 |
33 | @staticmethod
34 | def _add_permissions(user, perm_names):
35 | for name in perm_names:
36 | permission = Permission.objects.get(codename=name)
37 | user.user_permissions.add(permission)
38 | return user
39 |
40 | @staticmethod
41 | def create_autoscale_user(username, password, account):
42 | try:
43 | print("Creating autoscale user: {0}".format(username))
44 | user, created = User.objects.get_or_create(username=username)
45 | if created:
46 | user.set_password(password)
47 | Command._add_permissions(
48 | user, ['view_cmcluster', 'add_cmclusternode',
49 | 'delete_cmclusternode'])
50 | user.save()
51 | if account:
52 | impersonate_user = User.objects.get(username=account)
53 | else:
54 | impersonate_user = User.objects.filter(is_superuser=True).first()
55 | GlobalSettings().settings.autoscale_impersonate = impersonate_user.username
56 | return "Autoscale user created successfully."
57 | else:
58 | return "Autoscale user already exists."
59 | except Exception as e:
60 | log.exception("An error occurred while creating the autoscale user!!:")
61 | return ("An error occurred while creating the autoscale user!!: %s" % e)
62 |
--------------------------------------------------------------------------------
/util/convert_kwalify_to_json_schema.py:
--------------------------------------------------------------------------------
1 | # Converts a kwalify yaml schema to a json schema
2 | import yaml
3 | import json
4 | import os
5 | from collections import OrderedDict
6 |
7 | from pykwalify.core import Core
8 |
9 |
10 | # Extracted from https://github.com/galaxyproject/galaxy/blob/master/lib/
11 | # galaxy/webapps/config_manage.py
12 | # This resolver handles custom !include tags in galaxy yaml
13 | def _ordered_load(stream):
14 |
15 | class OrderedLoader(yaml.Loader):
16 |
17 | def __init__(self, stream):
18 | self._root = os.path.split(stream.name)[0]
19 | super(OrderedLoader, self).__init__(stream)
20 |
21 | def include(self, node):
22 | filename = os.path.join(self._root, self.construct_scalar(node))
23 | with open(filename, 'r') as f:
24 | return yaml.load(f, OrderedLoader)
25 |
26 | def construct_mapping(loader, node):
27 | loader.flatten_mapping(node)
28 | return OrderedDict(loader.construct_pairs(node))
29 |
30 | OrderedLoader.add_constructor(
31 | yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
32 | construct_mapping)
33 | OrderedLoader.add_constructor('!include', OrderedLoader.include)
34 |
35 | return yaml.load(stream, OrderedLoader)
36 |
37 |
38 | # Load the Galaxy config schema file
39 | schema_path = './config_schema.yml'
40 | with open(schema_path, "r") as f:
41 | schema = _ordered_load(f)
42 |
43 | # Parse it using pykwalify
44 | c = Core(source_file="galaxy.yml", schema_data=schema)
45 |
46 | # Get a handle to the galaxy config section
47 | galaxy_config = c.schema['mapping']['galaxy']
48 |
49 | TYPE_MAPPINGS = {
50 | 'map': 'object',
51 | 'str': 'string',
52 | 'int': 'number',
53 | 'bool': 'boolean'
54 | }
55 |
56 |
57 | # Recursively transform it to a json schema
58 | def transform_schema(schema):
59 | json_schema = {}
60 | for key, val in schema.items():
61 | if key == 'type':
62 | json_schema['type'] = TYPE_MAPPINGS.get(val, val)
63 | elif key == 'mapping':
64 | json_schema['properties'] = transform_schema(val)
65 | elif key == 'desc':
66 | json_schema['description'] = ((val[:150] + '...') if len(val) > 150
67 | else val)
68 | elif key == 'required':
69 | pass
70 | else:
71 | if not val:
72 | # Assume undefined bools are false so diffing works on the
73 | # front-end
74 | if schema.get('type') == 'bool':
75 | json_schema[key] = False
76 | else:
77 | json_schema[key] = ""
78 | elif isinstance(val, dict):
79 | json_schema[key] = transform_schema(val)
80 | else:
81 | json_schema[key] = val
82 | return json_schema
83 |
84 |
85 | json_cschema = transform_schema(galaxy_config)
86 | print(json.dumps(json_cschema, indent=4))
87 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:20.04 as stage1
2 |
3 | ARG DEBIAN_FRONTEND=noninteractive
4 | ENV PYTHONUNBUFFERED 1
5 |
6 | ENV KUBE_LATEST_VERSION=v1.24.4
7 | ENV HELM_VERSION=v3.9.4
8 | ENV HELM_FILENAME=helm-${HELM_VERSION}-linux-amd64.tar.gz
9 |
10 | RUN set -xe; \
11 | apt-get -qq update && apt-get install -y --no-install-recommends \
12 | apt-transport-https \
13 | git-core \
14 | make \
15 | software-properties-common \
16 | gcc \
17 | python3-dev \
18 | libffi-dev \
19 | python3-pip \
20 | python3-setuptools \
21 | curl \
22 | && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
23 | && curl -L https://get.helm.sh/${HELM_FILENAME} | tar xz && mv linux-amd64/helm /usr/local/bin/helm && rm -rf linux-amd64 \
24 | && apt-get autoremove -y && apt-get clean \
25 | && rm -rf /var/lib/apt/lists/* /tmp/* \
26 | && mkdir -p /app \
27 | && pip3 install virtualenv \
28 | && virtualenv -p python3 --prompt "(cloudman)" /app/venv
29 |
30 | # Set working directory to /app/
31 | WORKDIR /app/
32 |
33 | # Only add files required for installation to improve build caching
34 | ADD requirements.txt /app
35 | ADD setup.py /app
36 | ADD README.rst /app
37 | ADD HISTORY.rst /app
38 | ADD cloudman/cloudman/__init__.py /app/cloudman/cloudman/__init__.py
39 |
40 | # Install requirements. Move this above ADD as 'pip install cloudman-server'
41 | # asap so caching works
42 | RUN /app/venv/bin/pip3 install -U pip && /app/venv/bin/pip3 install --no-cache-dir -r requirements.txt
43 |
44 |
45 | # Stage-2
46 | FROM ubuntu:20.04
47 |
48 | ARG DEBIAN_FRONTEND=noninteractive
49 | ENV PYTHONUNBUFFERED 1
50 |
51 | # Create cloudman user environment
52 | RUN useradd -ms /bin/bash cloudman \
53 | && mkdir -p /app \
54 | && chown cloudman:cloudman /app -R \
55 | && apt-get -qq update && apt-get install -y --no-install-recommends \
56 | git-core \
57 | python3-pip \
58 | python3-setuptools \
59 | locales locales-all \
60 | && apt-get autoremove -y && apt-get clean \
61 | && rm -rf /var/lib/apt/lists/* /tmp/*
62 |
63 | ENV LC_ALL en_US.UTF-8
64 |
65 | WORKDIR /app/cloudman/
66 |
67 | # Copy cloudman files to final image
68 | COPY --chown=cloudman:cloudman --from=stage1 /app /app
69 | COPY --chown=cloudman:cloudman --from=stage1 /usr/local/bin/kubectl /usr/local/bin/kubectl
70 | COPY --chown=cloudman:cloudman --from=stage1 /usr/local/bin/helm /usr/local/bin/helm
71 |
72 | # Add the source files last to minimize layer cache invalidation
73 | ADD --chown=cloudman:cloudman . /app
74 |
75 | # Switch to new, lower-privilege user
76 | USER cloudman
77 |
78 | RUN chmod a+x /usr/local/bin/kubectl \
79 | && chmod a+x /usr/local/bin/helm \
80 | && /app/venv/bin/python manage.py collectstatic --no-input
81 |
82 | # gunicorn will listen on this port
83 | EXPOSE 8000
84 |
85 | CMD /bin/bash -c "source /app/venv/bin/activate && /app/venv/bin/gunicorn -k gevent -b :8000 --access-logfile - --error-logfile - --log-level info cloudman.wsgi"
86 |
--------------------------------------------------------------------------------
/cloudman/helmsman/management/commands/helmsman_load_config.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import yaml
3 |
4 | from django.core.management import call_command
5 | from django.core.management.base import BaseCommand
6 |
7 | from helmsman import helpers
8 | from helmsman.management.commands.add_template_registry import Command as TplCommand
9 |
10 |
11 | class Command(BaseCommand):
12 | help = 'Loads helmsman config data from a yaml file'
13 |
14 | def add_arguments(self, parser):
15 | parser.add_argument('config_file', type=argparse.FileType('r'))
16 |
17 | def handle(self, *args, **options):
18 | settings = yaml.safe_load(options['config_file'].read())
19 | self.process_settings(settings)
20 |
21 | @staticmethod
22 | def process_settings(settings):
23 | if settings.get('repositories'):
24 | print("Processing chart repositories...")
25 | Command.process_helm_repos(settings.get('repositories'))
26 | else:
27 | print("No chart repositories defined.")
28 |
29 | if settings.get('template_registries'):
30 | print("Processing template registries...")
31 | Command.process_template_registries(settings.get('template_registries'))
32 | else:
33 | print("No template registries defined.")
34 |
35 | if settings.get('install_templates'):
36 | print("Processing install templates...")
37 | TplCommand.process_install_templates(settings.get('install_templates'))
38 | else:
39 | print("No install templates defined.")
40 |
41 | if settings.get('charts'):
42 | print("Processing charts in helmsman config...")
43 | Command.process_helm_charts(settings.get('charts'))
44 | else:
45 | print("No charts defined in helmsan config.")
46 |
47 | @staticmethod
48 | def process_helm_repos(repositories):
49 | for repo in repositories:
50 | call_command("add_repo", repo.get('name'), repo.get('url'))
51 |
52 | @staticmethod
53 | def process_template_registries(template_registries):
54 | for registry in template_registries:
55 | call_command("add_template_registry", registry.get('name'), registry.get('url'))
56 |
57 | @staticmethod
58 | def process_helm_charts(charts):
59 | for chart in charts.values():
60 | extra_args = {}
61 | if chart.get('namespace'):
62 | extra_args["namespace"] = chart.get('namespace')
63 | if chart.get('create_namespace'):
64 | extra_args['create_namespace'] = True
65 | if chart.get('version'):
66 | extra_args["chart_version"] = chart.get('version')
67 | if chart.get('upgrade'):
68 | extra_args["upgrade"] = True
69 | if chart.get('values'):
70 | values = chart.get('values')
71 | with helpers.TempValuesFile(values) as f:
72 | extra_args["values_file"] = f.name
73 | call_command("add_chart", chart.get('name'), **extra_args)
74 | else:
75 | call_command("add_chart", chart.get('name'), **extra_args)
76 |
--------------------------------------------------------------------------------
/cloudman/helmsman/management/commands/add_template_registry.py:
--------------------------------------------------------------------------------
1 | import logging as log
2 | import requests
3 | import yaml
4 |
5 | from django.core.management import call_command
6 | from django.core.management.base import BaseCommand
7 |
8 | from helmsman import helpers
9 |
10 |
11 | class Command(BaseCommand):
12 | help = 'Adds a new template registry to cloudman'
13 |
14 | def add_arguments(self, parser):
15 | parser.add_argument('name', help='Name of the template registry')
16 | parser.add_argument('url', help='Url to the template registry')
17 |
18 | def handle(self, *args, **options):
19 | self.add_template_registry(options['name'], options['url'])
20 |
21 | @staticmethod
22 | def add_template_registry(name, url):
23 | print(f"Importing template registry: {name} from: {url}")
24 | try:
25 | with requests.get(url) as r:
26 | registry = yaml.safe_load(r.content)
27 | if registry.get('install_templates'):
28 | Command.process_install_templates(registry.get('install_templates'))
29 | except Exception as e:
30 | log.exception(f"An error occurred while importing registry '{name}':", e)
31 | print(f"An error occurred while importing registry '{name}':", str(e))
32 | raise e
33 |
34 | @staticmethod
35 | def process_install_templates(install_templates):
36 | for template_name in install_templates:
37 | template = install_templates.get(template_name)
38 | extra_args = []
39 | if template.get('chart_version'):
40 | extra_args += ["--chart_version", template.get('chart_version')]
41 | if template.get('context'):
42 | extra_args += ["--context", template.get('context')]
43 | if template.get('display_name'):
44 | extra_args += ["--display_name", template.get('display_name')]
45 | if template.get('summary'):
46 | extra_args += ["--summary", template.get('summary')]
47 | if template.get('description'):
48 | extra_args += ["--description", template.get('description')]
49 | if template.get('maintainers'):
50 | extra_args += ["--maintainers", template.get('maintainers')]
51 | if template.get('info_url'):
52 | extra_args += ["--info_url", template.get('info_url')]
53 | if template.get('icon_url'):
54 | extra_args += ["--icon_url", template.get('icon_url')]
55 | if template.get('screenshot_url'):
56 | extra_args += ["--screenshot_url", template.get('screenshot_url')]
57 | if template.get('upgrade'):
58 | extra_args += ["--upgrade"]
59 | if template.get('template'):
60 | with helpers.TempInputFile(template.get('template')) as f:
61 | extra_args += ["--template_file", f.name]
62 | call_command("add_install_template", template_name,
63 | template.get('repo'), template.get('chart'),
64 | *extra_args)
65 | else:
66 | call_command("add_install_template", template_name,
67 | template.get('repo'), template.get('chart'),
68 | *extra_args)
69 |
70 |
--------------------------------------------------------------------------------
/cloudman/helmsman/views.py:
--------------------------------------------------------------------------------
1 | from rest_framework.views import APIView
2 | from rest_framework.response import Response
3 | from rest_framework.permissions import IsAuthenticated
4 |
5 | from djcloudbridge import drf_helpers
6 | from . import serializers
7 | from .api import HelmsManAPI
8 |
9 |
10 | class HelmsManAPIView(APIView):
11 | """List Helmsman API endpoints"""
12 |
13 | def get(self, request, format=None):
14 | """Return available charts."""
15 | response = {'repositories': request.build_absolute_uri('repositories'),
16 | 'charts': request.build_absolute_uri('charts')}
17 | return Response(response)
18 |
19 |
20 | class ChartRepoViewSet(drf_helpers.CustomModelViewSet):
21 | """Returns list of repositories managed by CloudMan."""
22 |
23 | permission_classes = (IsAuthenticated,)
24 | # Required for the Browsable API renderer to have a nice form.
25 | serializer_class = serializers.HMChartRepoSerializer
26 |
27 | def list_objects(self):
28 | """Get a list of all registered repository."""
29 | return HelmsManAPI.from_request(self.request).repositories.list()
30 |
31 | def get_object(self):
32 | """Get info about a specific repository."""
33 | return (HelmsManAPI.from_request(self.request)
34 | .repositories.get(self.kwargs["pk"]))
35 |
36 |
37 | class ChartViewSet(drf_helpers.CustomModelViewSet):
38 | """Returns list of charts managed by CloudMan."""
39 |
40 | permission_classes = (IsAuthenticated,)
41 | # Required for the Browsable API renderer to have a nice form.
42 | serializer_class = serializers.HMChartSerializer
43 |
44 | def list_objects(self):
45 | """Get a list of all registered charts."""
46 | return HelmsManAPI.from_request(self.request).charts.list()
47 |
48 | def get_object(self):
49 | """Get info about a specific chart."""
50 | return (HelmsManAPI.from_request(self.request)
51 | .charts.get(self.kwargs["pk"]))
52 |
53 |
54 | class NamespaceViewSet(drf_helpers.CustomModelViewSet):
55 | """Returns list of charts managed by CloudMan."""
56 |
57 | permission_classes = (IsAuthenticated,)
58 | # Required for the Browsable API renderer to have a nice form.
59 | serializer_class = serializers.HMNamespaceSerializer
60 |
61 | def list_objects(self):
62 | """Get a list of all registered charts."""
63 | return HelmsManAPI.from_request(self.request).namespaces.list()
64 |
65 | def get_object(self):
66 | """Get info about a specific chart."""
67 | return (HelmsManAPI.from_request(self.request)
68 | .namespaces.get(self.kwargs["pk"]))
69 |
70 |
71 | class InstallTemplatesViewSet(drf_helpers.CustomModelViewSet):
72 | """Returns list of templates managed by CloudMan."""
73 |
74 | permission_classes = (IsAuthenticated,)
75 | # Required for the Browsable API renderer to have a nice form.
76 | serializer_class = serializers.HMInstallTemplateSerializer
77 |
78 | def list_objects(self):
79 | """Get a list of all registered templates."""
80 | return HelmsManAPI.from_request(self.request).templates.list()
81 |
82 | def get_object(self):
83 | """Get info about a specific chart."""
84 | return (HelmsManAPI.from_request(self.request)
85 | .templates.get(self.kwargs["pk"]))
86 |
--------------------------------------------------------------------------------
/cloudman/projman/serializers.py:
--------------------------------------------------------------------------------
1 | """DRF serializers for the CloudMan Create API endpoints."""
2 |
3 | from rest_framework import serializers
4 | from djcloudbridge import serializers as dj_serializers
5 | from helmsman import serializers as helmsman_serializers
6 | from .api import ProjManAPI
7 | from rest_framework.exceptions import ValidationError
8 |
9 |
10 | class PMProjectSerializer(serializers.Serializer):
11 | id = serializers.CharField(read_only=True)
12 | name = serializers.CharField()
13 | namespace = serializers.CharField(read_only=True)
14 | permissions = serializers.SerializerMethodField()
15 |
16 | def get_permissions(self, project):
17 | """
18 | Implementation of permissions field
19 | """
20 | user = self.context['view'].request.user
21 | return {
22 | 'add_project': user.has_perm('projman.add_project', project),
23 | 'change_project': user.has_perm('projman.change_project', project),
24 | 'delete_project': user.has_perm('projman.delete_project', project)
25 | }
26 |
27 | def create(self, valid_data):
28 | return ProjManAPI.from_request(self.context['request']).projects.create(
29 | valid_data.get('name'))
30 |
31 |
32 | class PMProjectChartSerializer(helmsman_serializers.HMChartSerializer):
33 | # remove the inherited field
34 | namespace = None
35 | project = PMProjectSerializer(read_only=True)
36 | permissions = serializers.SerializerMethodField()
37 |
38 | def get_permissions(self, chart):
39 | """
40 | Implementation of permissions field
41 | """
42 | user = self.context['view'].request.user
43 | return {
44 | 'change_chart': user.has_perm('projman.change_chart', chart),
45 | 'delete_chart': user.has_perm('projman.delete_chart', chart)
46 | }
47 |
48 | def create(self, valid_data):
49 | project_id = self.context['view'].kwargs.get("project_pk")
50 | project = ProjManAPI.from_request(self.context['request']).projects.get(project_id)
51 | if not project:
52 | raise ValidationError("Specified project id: %s does not exist"
53 | % project_id)
54 | return project.charts.create(
55 | valid_data.get('use_install_template'),
56 | release_name=valid_data.get('release_name'),
57 | values=valid_data.get('values'))
58 |
59 | def update(self, chart, validated_data):
60 | project_id = self.context['view'].kwargs.get("project_pk")
61 | project = ProjManAPI.from_request(self.context['request']).projects.get(project_id)
62 | if not project:
63 | raise ValidationError("Specified project id: %s does not exist"
64 | % project_id)
65 | if validated_data.get('state') == "rollback":
66 | return project.charts.rollback(chart)
67 | else:
68 | return project.charts.update(chart, validated_data.get("values"))
69 |
70 |
71 | class UserSerializer(dj_serializers.UserDetailsSerializer):
72 | permissions = serializers.SerializerMethodField()
73 |
74 | def get_permissions(self, user_obj):
75 | return {
76 | 'is_admin': user_obj.is_staff
77 | }
78 |
79 | class Meta(dj_serializers.UserDetailsSerializer.Meta):
80 | fields = dj_serializers.UserDetailsSerializer.Meta.fields + ('permissions',)
81 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/data/helmsman_config.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: cloudve
3 | url: https://raw.githubusercontent.com/CloudVE/helm-charts/master/
4 | - name: jupyterhub
5 | url: https://jupyterhub.github.io/helm-chart/
6 | install_templates:
7 | jupyter:
8 | repo: jupyterhub
9 | chart: jupyterhub
10 | template: |
11 | ingress:
12 | enabled: true
13 | path: '{{context.project.access_path}}/jupyterhub'
14 | hub:
15 | baseUrl: '{{context.project.access_path}}/jupyterhub'
16 | proxy:
17 | secretToken: '{{random_alphanumeric(65)}}'
18 | greeting: {{context.dummy}}
19 | galaxy:
20 | repo: cloudve
21 | chart: galaxy
22 | chart_version: 3.3.0
23 | context:
24 | storageclass: ebs-provisioner
25 | template: |
26 | config:
27 | oidc_backends_config.xml: |
28 |
29 |
30 |
31 | https://ngkc4.cloudve.org/auth
32 | galaxy-auth
33 | {{random_alphanumeric(8)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(12)}}
34 | https://ngkc4.cloudve.org{{context.project.access_path}}/galaxy/authnz/custos/callback
35 | master
36 |
37 |
38 | galaxy.yml:
39 | galaxy:
40 | enable_oidc: true
41 | oidc_backends_config_file: /galaxy/server/config/oidc_backends_config.xml
42 | oidc_config_file: /galaxy/server/config/oidc_config.xml
43 | oidc_config.xml: |
44 |
45 |
46 |
47 |
48 |
49 |
50 | ingress:
51 | annotations:
52 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
53 | kubernetes.io/tls-acme: "true"
54 | nginx.ingress.kubernetes.io/secure-backends: "true"
55 | enabled: true
56 | hosts:
57 | - ngkc4.cloudve.org
58 | path: {{context.project.access_path}}/galaxy
59 | tls:
60 | - hosts:
61 | - ngkc4.cloudve.org
62 | secretName: ngkc4-cloudve-org-key
63 | persistence:
64 | size: 95Gi
65 | storageClass: nfs-provisioner
66 | postgresql:
67 | persistence:
68 | storageClass: {{ context.storageclass }}
69 |
70 | charts:
71 | dashboard:
72 | name: stable/kubernetes-dashboard
73 | namespace: kube-system
74 | create_namespace: true
75 | values:
76 | enableInsecureLogin: true
77 | ingress:
78 | annotations:
79 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
80 | kubernetes.io/tls-acme: "true"
81 | nginx.ingress.kubernetes.io/secure-backends: "true"
82 | enabled: true
83 | hosts:
84 | - null
85 | - ngkc4.cloudve.org
86 | paths:
87 | - /dashboard
88 | - /dashboard/*
89 | tls:
90 | - hosts:
91 | - ngkc4.cloudve.org
92 | secretName: ngkc4-cloudve-org-key
93 | rbac:
94 | clusterAdminRole: true
95 |
--------------------------------------------------------------------------------
/cloudman/cloudman/tests/test_cloudman_auth.py:
--------------------------------------------------------------------------------
1 | import html
2 | import re
3 | import requests
4 |
5 | from django.contrib.auth.models import User
6 | from django.urls import reverse
7 |
8 | from rest_framework import status
9 | from rest_framework.test import APITestCase, APILiveServerTestCase
10 |
11 |
12 | class CMCloudManAuthIntegrationTests(APILiveServerTestCase):
13 | host = "localhost"
14 | port = 8000
15 | REGEX_KEYCLOAK_LOGIN_ACTION = re.compile(r'action=\"(.*)\"\s+')
16 | REGEX_CSRF_TOKEN = re.compile(r'csrfmiddlewaretoken\" value=\"(.*)\"')
17 |
18 | def setUp(self):
19 | self.session = requests.Session()
20 |
21 | def _attempt_login(self):
22 | url = reverse('oidc_authentication_init')
23 | return self.session.get(f"http://localhost:8000{url}")
24 |
25 | def _attempt_logout(self):
26 | # first, get a CSRF token
27 | response = self._get_clusters()
28 | matches = self.REGEX_CSRF_TOKEN.search(response.text)
29 | csrftoken = html.unescape(matches.groups(1)[0])
30 | # now logout with that csrf token
31 | url = reverse('oidc_logout')
32 | return self.session.post(f"http://localhost:8000{url}", headers={'X-CSRFToken': csrftoken})
33 |
34 | def _get_clusters(self):
35 | url = reverse('clusterman:clusters-list')
36 | # retrieve html page
37 | return self.session.get(f"http://localhost:8000{url}?format=api")
38 |
39 | def test_redirects_to_keycloak(self):
40 | response = self._attempt_login()
41 | self.assertEqual(response.status_code, status.HTTP_200_OK, response)
42 | self.assertIn("auth/realms/master/protocol/openid-connect/auth", response.url)
43 | return response
44 |
45 | def _login_via_keycloak(self, username, password):
46 | response = self._attempt_login()
47 | response = self.session.get(response.url)
48 | matches = self.REGEX_KEYCLOAK_LOGIN_ACTION.search(response.text)
49 | auth_url = html.unescape(matches.groups(1)[0])
50 | response = self.session.post(auth_url, data={
51 | "username": username, "password": password})
52 | return response
53 |
54 | def test_can_auth_admin(self):
55 | response = self._login_via_keycloak("admin", "testpassword")
56 | # Should have redirected back if auth succeeded
57 | self.assertIn("http://localhost:8000/", response.url)
58 |
59 | # User should have been created
60 | oidc_user = User.objects.get(email="admin@cloudve.org")
61 | assert oidc_user.is_superuser
62 | assert oidc_user.is_staff
63 |
64 | def test_invalid_auth_admin(self):
65 | response = self._login_via_keycloak("admin", "wrongpassword")
66 | # Should not have redirected back if auth succeeded
67 | self.assertNotIn("http://localhost:8000/", response.url)
68 |
69 | # User should have been created
70 | with self.assertRaises(User.DoesNotExist):
71 | User.objects.get(email="admin@cloudve.org")
72 |
73 | def test_can_auth_non_admin(self):
74 | response = self._login_via_keycloak("nonadmin", "testpassword")
75 | # Should have redirected back if auth succeeded
76 | self.assertIn("http://localhost:8000/", response.url)
77 |
78 | # User should have been created
79 | oidc_user = User.objects.get(email="nonadmin@cloudve.org")
80 | assert not oidc_user.is_superuser
81 | assert not oidc_user.is_staff
82 |
83 | def test_can_logout(self):
84 | self._login_via_keycloak("nonadmin", "testpassword")
85 | response = self._attempt_logout()
86 | self.assertIn("auth/realms/master/protocol/openid-connect/logout", response.url)
87 |
--------------------------------------------------------------------------------
/cloudman/helmsman/management/commands/add_chart.py:
--------------------------------------------------------------------------------
1 | import yaml
2 |
3 | from django.contrib.auth.models import User
4 | from django.core.management.base import BaseCommand
5 |
6 | from ...api import HelmsManAPI, HMServiceContext
7 | from ...api import ChartExistsException, NamespaceNotFoundException
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Adds a new repository to helm'
12 |
13 | def add_arguments(self, parser):
14 | parser.add_argument('chart_ref',
15 | help='Reference to a chart e.g. cloudve/cloudman')
16 | parser.add_argument('--namespace', required=True,
17 | help='namespace to install chart into')
18 | parser.add_argument('--release_name', required=False,
19 | help='name to give release')
20 | parser.add_argument('--chart_version', required=False,
21 | help='version of chart to install. defaults'
22 | ' to latest')
23 | parser.add_argument('--values_file', required=False,
24 | help='Values file to apply to the chart')
25 | parser.add_argument('--create_namespace', dest='create_namespace',
26 | action='store_true',
27 | help='attempt to create namespace if not found')
28 | parser.add_argument('--upgrade', dest='upgrade_chart',
29 | action='store_true',
30 | help='upgrade chart if it already exists')
31 |
32 | def handle(self, *args, **options):
33 | self.add_chart(options['chart_ref'], options['namespace'],
34 | options['release_name'], options['chart_version'],
35 | options['values_file'], options['create_namespace'],
36 | options['upgrade_chart'])
37 |
38 | @staticmethod
39 | def add_chart(chart_ref, namespace, release_name, version, values_file,
40 | create_namespace, upgrade_chart):
41 | Command.install_or_upgrade(chart_ref, namespace, release_name,
42 | version, values_file, create_namespace,
43 | upgrade_chart)
44 |
45 | @staticmethod
46 | def install_or_upgrade(chart_ref, namespace, release_name,
47 | version, values_file, create_namespace,
48 | upgrade_chart):
49 | admin = User.objects.filter(is_superuser=True).first()
50 | client = HelmsManAPI(HMServiceContext(user=admin))
51 | repo_name, chart_name = chart_ref.split("/")
52 | values = None
53 | if values_file:
54 | with open(values_file, 'r') as f:
55 | values = yaml.safe_load(f)
56 | if not client.namespaces.get(namespace):
57 | print(f"Namespace '{namespace}' not found.")
58 | if create_namespace:
59 | print(f"Creating Namespace '{namespace}'.")
60 | client.namespaces.create(namespace)
61 | else:
62 | message = (f"Namespace {namespace} does not exist. "
63 | f"Use the '--create_namespace' flag if you have "
64 | f"appropriate permissions.")
65 | raise NamespaceNotFoundException(message)
66 | chart = client.charts.find(namespace, chart_name)
67 | if chart and upgrade_chart:
68 | print(f"Upgrading chart {repo_name}/{chart_name} in namespace"
69 | f" {namespace}")
70 | client.charts.update(chart, values, version=version)
71 | else:
72 | print(f"Installing chart {repo_name}/{chart_name} into namespace"
73 | f" {namespace}")
74 | try:
75 | client.charts.create(repo_name, chart_name, namespace,
76 | release_name, version, values)
77 | except ChartExistsException as e:
78 | print(f"Chart {repo_name}/{chart_name} already installed.")
79 |
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/test_mgmt_commands.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.contrib.auth.models import User
4 | from django.core.management import call_command
5 | from django.core.management.base import CommandError
6 | from django.test import TestCase
7 |
8 | from .client_mocker import ClientMocker
9 | from ..clients.helm_client import HelmClient
10 |
11 | from helmsman import models as hm_models
12 | from helmsman.api import NamespaceNotFoundException
13 | from helmsman.api import HelmsManAPI, HMServiceContext
14 |
15 |
16 | class CommandsTestCase(TestCase):
17 |
18 | TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
19 | INITIAL_HELMSMAN_DATA = os.path.join(
20 | TEST_DATA_PATH, 'helmsman_config.yaml')
21 | INITIAL_HELMSMAN_DATA_UPDATE = os.path.join(
22 | TEST_DATA_PATH, 'helmsman_config_update.yaml')
23 |
24 | def setUp(self):
25 | super().setUp()
26 | self.mock_client = ClientMocker(self)
27 | self.client.force_login(
28 | User.objects.get_or_create(username='admin', is_superuser=True)[0])
29 |
30 | def tearDown(self):
31 | self.client.logout()
32 |
33 | def test_helmsman_load_config_no_args(self):
34 | with self.assertRaisesRegex(CommandError, "required: config_file"):
35 | call_command('helmsman_load_config')
36 |
37 | def test_helmsman_load_config(self):
38 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
39 | client = HelmClient()
40 | repos = client.repositories.list()
41 | for repo in repos:
42 | self.assertIn(repo.get('NAME'), ["stable", "cloudve", "jupyterhub"])
43 | template = hm_models.HMInstallTemplate.objects.get(name='dummy')
44 | self.assertEqual(template.summary, "dummy chart")
45 | releases = client.releases.list("default")
46 | for rel in releases:
47 | self.assertIn(rel.get('CHART'),
48 | ["cloudlaunch-0.2.0", "galaxy-1.0.0"])
49 |
50 | def test_add_chart_no_namespace(self):
51 | with self.assertRaises(NamespaceNotFoundException):
52 | call_command("add_chart", "cloudve/galaxy", namespace="new")
53 |
54 | def test_helmsman_install_duplicate_template(self):
55 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
56 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
57 |
58 | def test_helmsman_load_config_template_registry(self):
59 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
60 | template = hm_models.HMInstallTemplate.objects.get(name='terminalman')
61 | self.assertEqual(template.chart, "terminalman")
62 | self.assertIn("starting_dir", template.context)
63 |
64 | def test_update_install_template(self):
65 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
66 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA_UPDATE)
67 | # dummy template should be unchanged since upgrade = false
68 | template = hm_models.HMInstallTemplate.objects.get(name='dummy')
69 | self.assertEqual(template.display_name, "dummy")
70 | # another dummy template should be updated since upgrade was specified
71 | template = hm_models.HMInstallTemplate.objects.get(name='anotherdummy')
72 | self.assertEqual(template.chart_version, "4.0.0")
73 |
74 | def test_update_chart(self):
75 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
76 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA_UPDATE)
77 | helm_api = HelmsManAPI(HMServiceContext(
78 | user=User.objects.get_or_create(username='admin', is_superuser=True)[0]))
79 | chart = helm_api.charts.find("anotherdummy", "anotherdummy")
80 | # version should be unchanged since upgrade = false
81 | self.assertEqual(chart.chart_version, "2.0.0")
82 | # dummy chart should be updated since upgrade = true
83 | chart = helm_api.charts.find("dummy", "dummy")
84 | self.assertEqual(chart.chart_version, "2.0.0")
85 |
--------------------------------------------------------------------------------
/cloudman/clusterman/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # Generated by Django 2.2.10 on 2020-02-23 17:26
2 |
3 | from django.db import migrations, models
4 | import django.db.models.deletion
5 |
6 |
7 | class Migration(migrations.Migration):
8 |
9 | initial = True
10 |
11 | dependencies = [
12 | ('cloudlaunch', '0001_initial'),
13 | ('djcloudbridge', '0001_initial'),
14 | ]
15 |
16 | operations = [
17 | migrations.CreateModel(
18 | name='CMAutoScaler',
19 | fields=[
20 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
21 | ('name', models.CharField(max_length=60)),
22 | ('vm_type', models.CharField(max_length=200)),
23 | ('min_nodes', models.IntegerField(default=0)),
24 | ('max_nodes', models.IntegerField(default=None, null=True)),
25 | ],
26 | options={
27 | 'verbose_name': 'Cluster Autoscaler',
28 | 'verbose_name_plural': 'Cluster Autoscalers',
29 | },
30 | ),
31 | migrations.CreateModel(
32 | name='CMCluster',
33 | fields=[
34 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
35 | ('added', models.DateTimeField(auto_now_add=True)),
36 | ('updated', models.DateTimeField(auto_now=True)),
37 | ('name', models.CharField(max_length=60, unique=True)),
38 | ('cluster_type', models.CharField(max_length=255)),
39 | ('autoscale', models.BooleanField(default=True, help_text='Whether autoscaling is activated')),
40 | ('_connection_settings', models.TextField(blank=True, db_column='connection_settings', help_text='External provider specific settings for this cluster.', max_length=16384, null=True)),
41 | ],
42 | options={
43 | 'verbose_name': 'Cluster',
44 | 'verbose_name_plural': 'Clusters',
45 | },
46 | ),
47 | migrations.CreateModel(
48 | name='GlobalSettings_SettingsStore',
49 | fields=[
50 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
51 | ('key', models.CharField(max_length=255)),
52 | ('value', models.TextField()),
53 | ],
54 | ),
55 | migrations.CreateModel(
56 | name='CMClusterNode',
57 | fields=[
58 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
59 | ('name', models.CharField(max_length=60)),
60 | ('autoscaler', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='nodegroup', to='clusterman.CMAutoScaler')),
61 | ('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='node_list', to='clusterman.CMCluster')),
62 | ('deployment', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cm_cluster_node', to='cloudlaunch.ApplicationDeployment')),
63 | ],
64 | options={
65 | 'verbose_name': 'Cluster Node',
66 | 'verbose_name_plural': 'Cluster Nodes',
67 | },
68 | ),
69 | migrations.AddField(
70 | model_name='cmautoscaler',
71 | name='cluster',
72 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='autoscaler_list', to='clusterman.CMCluster'),
73 | ),
74 | migrations.AddField(
75 | model_name='cmautoscaler',
76 | name='zone',
77 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='autoscaler_list', to='djcloudbridge.Zone'),
78 | ),
79 | migrations.AlterUniqueTogether(
80 | name='cmautoscaler',
81 | unique_together={('cluster', 'name')},
82 | ),
83 | ]
84 |
--------------------------------------------------------------------------------
/cloudman/projman/tests/test_mgmt_commands.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from django.contrib.auth.models import User
4 | from django.core.management import call_command
5 | from django.core.management.base import CommandError
6 | from django.test import TestCase
7 |
8 | from helmsman.api import HelmsManAPI, HMServiceContext, NamespaceExistsException
9 | from helmsman.tests.client_mocker import ClientMocker
10 |
11 | from projman import models as pm_models
12 | from projman.api import ProjManAPI, PMServiceContext
13 |
14 |
15 | class CommandsTestCase(TestCase):
16 |
17 | TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
18 | INITIAL_HELMSMAN_DATA = os.path.join(
19 | TEST_DATA_PATH, 'helmsman_config.yaml')
20 | INITIAL_PROJECT_DATA = os.path.join(
21 | TEST_DATA_PATH, 'projman_config.yaml')
22 | INITIAL_PROJECT_DATA_UPDATE = os.path.join(
23 | TEST_DATA_PATH, 'projman_config_update.yaml')
24 |
25 | def setUp(self):
26 | super().setUp()
27 | self.mock_client = ClientMocker(self)
28 | self.client.force_login(
29 | User.objects.get_or_create(username='admin', is_superuser=True)[0])
30 |
31 | def tearDown(self):
32 | self.client.logout()
33 |
34 | def test_projman_load_config_no_args(self):
35 | with self.assertRaisesRegex(CommandError, "required: config_file"):
36 | call_command('projman_load_config')
37 |
38 | def test_projman_load_config(self):
39 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
40 | call_command('projman_load_config', self.INITIAL_PROJECT_DATA)
41 | project1 = pm_models.CMProject.objects.get(name='first')
42 | project2 = pm_models.CMProject.objects.get(name='second')
43 | self.assertEquals(project1.name, 'first')
44 | self.assertEquals(project1.owner.username, 'admin')
45 | self.assertEquals(project2.name, 'second')
46 | self.assertEquals(project2.owner.username, 'admin')
47 | admin = User.objects.filter(is_superuser=True).first()
48 | client = HelmsManAPI(HMServiceContext(user=admin))
49 | self.assertEquals(client.namespaces.get(project2.name).name, 'second')
50 | # Test error for default namespace
51 | with self.assertRaises(NamespaceExistsException):
52 | call_command("projman_create_project", "default")
53 | client.namespaces.delete(project1.name)
54 | client.namespaces.delete(project2.name)
55 |
56 | def test_projman_update_config(self):
57 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
58 | call_command('projman_load_config', self.INITIAL_PROJECT_DATA)
59 | call_command('projman_load_config', self.INITIAL_PROJECT_DATA_UPDATE)
60 | projman_api = ProjManAPI(PMServiceContext(
61 | user=User.objects.get_or_create(username='admin', is_superuser=True)[0]))
62 | proj1 = projman_api.projects.find("first")
63 | chart1 = proj1.charts.find("galaxy")
64 | # should be unchanged since upgrade = false
65 | self.assertEqual(chart1.values['postgresql']['persistence']['storageClass'],
66 | "ebs-provisioner")
67 | # should be updated since upgrade = true
68 | proj2 = projman_api.projects.find("second")
69 | chart2 = proj2.charts.find("galaxy")
70 | self.assertEqual(chart2.values['postgresql']['persistence']['storageClass'],
71 | "updated-provisioner")
72 |
73 | def test_projman_context_update_by_release_name(self):
74 | call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA)
75 | call_command('projman_load_config', self.INITIAL_PROJECT_DATA)
76 | projman_api = ProjManAPI(PMServiceContext(
77 | user=User.objects.get_or_create(username='admin', is_superuser=True)[0]))
78 | proj = projman_api.projects.find("second")
79 | chart = proj.charts.get("jup")
80 | self.assertEqual(chart.values['greeting'], "hello")
81 | call_command('projman_load_config', self.INITIAL_PROJECT_DATA_UPDATE)
82 | chart = proj.charts.get("jup")
83 | self.assertEqual(chart.values['greeting'], "world")
84 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # -*- coding: utf-8 -*-
3 | import os
4 | import re
5 | import sys
6 |
7 | from setuptools import setup
8 | from setuptools import find_packages
9 |
10 |
11 | def get_version(*file_paths):
12 | """Retrieves the version from cloudman/__init__.py"""
13 | filename = os.path.join(os.path.dirname(__file__), *file_paths)
14 | version_file = open(filename).read()
15 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
16 | version_file, re.M)
17 | if version_match:
18 | return version_match.group(1)
19 | raise RuntimeError('Unable to find version string.')
20 |
21 |
22 | version = get_version("cloudman", "cloudman", "__init__.py")
23 |
24 |
25 | if sys.argv[-1] == 'publish':
26 | try:
27 | import wheel
28 | print("Wheel version: ", wheel.__version__)
29 | except ImportError:
30 | print('Wheel library missing. Please run "pip install wheel"')
31 | sys.exit()
32 | os.system('python setup.py sdist upload')
33 | os.system('python setup.py bdist_wheel upload')
34 | sys.exit()
35 |
36 | if sys.argv[-1] == 'tag':
37 | print("Tagging the version on git:")
38 | os.system("git tag -a %s -m 'version %s'" % (version, version))
39 | os.system("git push --tags")
40 | sys.exit()
41 |
42 | readme = open('README.rst').read()
43 | history = open('HISTORY.rst').read().replace('.. :changelog:', '')
44 |
45 | REQS_BASE = [
46 | 'Django>=3.0',
47 | # ======== Celery =========
48 | 'celery>=5.0',
49 | # celery results backend which uses the django DB
50 | 'django-celery-results>=1.0.1',
51 | # celery background task monitor which uses the django DB
52 | 'django-celery-beat>=1.1.0',
53 | # ======== DRF =========
54 | 'djangorestframework>=3.7.7',
55 | # pluggable social auth for django login
56 | 'django-allauth>=0.34.0',
57 | # Provides nested routing for DRF
58 | 'drf-nested-routers>=0.90.0',
59 | # For DRF filtering by querystring
60 | 'django-filter>=1.1.0',
61 | # ======== Permissions =========
62 | # object level permissions for django auth
63 | 'rules',
64 | # ======== CloudLaunch =========
65 | 'cloudlaunch-server>=0.1.1',
66 | 'cloudlaunch-cli',
67 | # ===== CloudMan =====
68 | # To store generic key-value pairs
69 | 'django-hierarkey',
70 | # ==== OIDC ====
71 | 'mozilla-django-oidc',
72 | # for deployments
73 | 'paramiko'
74 | ]
75 |
76 | REQS_PROD = ([
77 | # postgres database driver
78 | 'psycopg2-binary',
79 | 'gunicorn[gevent]'] + REQS_BASE
80 | )
81 |
82 | REQS_TEST = ([
83 | 'tox>=2.9.1',
84 | 'tox-docker>=2.0.0a3',
85 | 'coverage>=4.4.1',
86 | 'flake8>=3.4.1',
87 | 'flake8-import-order>=0.13'] + REQS_BASE
88 | )
89 |
90 | REQS_DEV = ([
91 | # As celery message broker during development
92 | 'sphinx>=1.3.1',
93 | 'bumpversion>=0.5.3',
94 | 'pylint-django'] + REQS_TEST
95 | )
96 |
97 | setup(
98 | name='cloudman-server',
99 | version=version,
100 | description=("CloudMan is a ReSTful, extensible Django app for"
101 | " managing clusters"),
102 | long_description=readme + '\n\n' + history,
103 | author='Galaxy Project',
104 | author_email='help@cloudve.org',
105 | url='https://github.com/galaxyproject/cloudman',
106 | package_dir={'': 'cloudman'},
107 | packages=find_packages('cloudman'),
108 | include_package_data=True,
109 | install_requires=REQS_BASE,
110 | extras_require={
111 | 'dev': REQS_DEV,
112 | 'test': REQS_TEST,
113 | 'prod': REQS_PROD
114 | },
115 | license="MIT",
116 | keywords='cloudman',
117 | classifiers=[
118 | 'Development Status :: 3 - Alpha',
119 | 'Framework :: Django',
120 | 'Framework :: Django :: 2.0',
121 | 'Intended Audience :: Developers',
122 | 'License :: OSI Approved :: BSD License',
123 | 'Natural Language :: English',
124 | 'Programming Language :: Python :: 3.6',
125 | 'Topic :: Internet :: WWW/HTTP',
126 | 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application'
127 | ],
128 | )
129 |
--------------------------------------------------------------------------------
/cloudman/projman/management/commands/install_template_in_project.py:
--------------------------------------------------------------------------------
1 | import logging as log
2 | import yaml
3 |
4 | from django.core.management.base import BaseCommand
5 | from django.contrib.auth.models import User
6 |
7 | from helmsman.api import ChartExistsException
8 |
9 | from ...api import ProjManAPI, PMServiceContext
10 |
11 |
12 | class Command(BaseCommand):
13 | help = 'Installs a template in a project.'
14 |
15 | def add_arguments(self, parser):
16 | parser.add_argument('project_name')
17 | parser.add_argument('template_name')
18 | parser.add_argument('release_name')
19 | parser.add_argument('values_file', help='Values file to apply to the chart')
20 | parser.add_argument('context_file', help='Context to apply to the chart')
21 | parser.add_argument('--upgrade', dest='upgrade_chart', action='store_true')
22 | parser.add_argument('--reset_values', dest='reset_values', action='store_true')
23 |
24 | def handle(self, *args, **options):
25 | values_file = options.get("values_file")
26 | if values_file:
27 | with open(values_file, 'r') as f:
28 | values = yaml.safe_load(f)
29 | else:
30 | values = {}
31 | context_file = options.get("context_file")
32 | if context_file:
33 | with open(context_file, 'r') as f:
34 | context = yaml.safe_load(f)
35 | else:
36 | context = {}
37 | self.install_template_in_project(options['project_name'],
38 | options['template_name'],
39 | options['release_name'],
40 | values,
41 | context=context,
42 | upgrade_chart=options['upgrade_chart'],
43 | reset_values=options['reset_values'])
44 |
45 | @staticmethod
46 | def install_template_in_project(project_name, template_name,
47 | release_name=None, values=None, context=None,
48 | upgrade_chart=False, reset_values=False):
49 | try:
50 | print("Installing template {}"
51 | " into project: {}".format(template_name, project_name))
52 | admin = User.objects.filter(is_superuser=True).first()
53 | pmapi = ProjManAPI(PMServiceContext(user=admin))
54 | proj = pmapi.projects.find(project_name)
55 | if not proj:
56 | print("Cannot find project {}.")
57 | return None
58 | try:
59 | if release_name:
60 | existing = proj.charts.get(release_name)
61 | else:
62 | existing = proj.charts.find(template_name)
63 | if existing and upgrade_chart:
64 | ch = proj.charts.update(existing, values, context=context,
65 | reset_values=reset_values)
66 | print(f"Successfully updated template '{template_name}' "
67 | f"with release named '{release_name}' into project "
68 | f"'{project_name}'")
69 | else:
70 | ch = proj.charts.create(template_name,
71 | release_name,
72 | values, context)
73 | print(f"Successfully installed template '{template_name}' "
74 | f"with release named '{release_name}' into project "
75 | f"'{project_name}'")
76 | return ch
77 | except ChartExistsException as ce:
78 | log.warning(str(ce))
79 | print(str(ce))
80 |
81 | except Exception as e:
82 | log.exception(f"An error occurred while "
83 | f"installing template '{template_name}' "
84 | f"into project '{project_name}'", e)
85 | print(f"Error occurred while installing template '{template_name}' "
86 | f"into project '{project_name}'", str(e))
87 | # Re-raise the exception
88 | raise e
89 |
--------------------------------------------------------------------------------
/cloudman/cloudman/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for cloudman project.
3 | """
4 | from cloudlaunchserver.settings import *
5 | from cloudman.auth import get_from_well_known
6 |
7 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8 |
9 | # Absolute path to the directory static files should be collected to.
10 | # Don't put anything in this directory yourself; store your static files
11 | # in apps' "static/" subdirectories and in STATICFILES_DIRS.
12 | # Example: "/var/www/example.com/static/"
13 | STATIC_ROOT = os.path.join(BASE_DIR, 'static')
14 |
15 | # Application definition
16 | INSTALLED_APPS += [
17 | 'mozilla_django_oidc',
18 | 'clusterman',
19 | 'helmsman.apps.HelmsManConfig',
20 | 'projman',
21 | # Discover and apply permission rules in each project
22 | 'rules.apps.AutodiscoverRulesConfig'
23 | ]
24 |
25 | AUTHENTICATION_BACKENDS = [
26 | 'rules.permissions.ObjectPermissionBackend',
27 | 'django.contrib.auth.backends.ModelBackend',
28 | 'cloudman.oidc.CMOIDCAuthenticationBackend'
29 | ]
30 |
31 | MIDDLEWARE += [
32 | 'mozilla_django_oidc.middleware.SessionRefresh'
33 | ]
34 |
35 | REST_FRAMEWORK['DEFAULT_AUTHENTICATION_CLASSES'] += ('mozilla_django_oidc.contrib.drf.OIDCAuthentication',)
36 |
37 | OIDC_ENABLED = os.environ.get('OIDC_ENABLED', False)
38 |
39 | # OIDC settings. Set only if OIDC_ENABLED
40 | OIDC_RP_CLIENT_ID = "cloudman"
41 | OIDC_RP_CLIENT_SECRET = None
42 | OIDC_OP_AUTHORIZATION_ENDPOINT = "http://localhost:8080/auth/realms/master/.well-known/openid-configuration"
43 | OIDC_OP_TOKEN_ENDPOINT = "dummy"
44 | OIDC_OP_USER_ENDPOINT = "dummy"
45 | OIDC_OP_JWKS_ENDPOINT = "dummy"
46 | OIDC_RP_SIGN_ALGO = "RS256"
47 |
48 | if OIDC_ENABLED:
49 | # KeyCloak realm url
50 | OIDC_OP_METADATA_ENDPOINT = os.environ.get(
51 | "OIDC_METADATA_URI") or "http://localhost:8080/auth/realms/master/.well-known/openid-configuration"
52 | # Client ID configured in the Auth Server
53 | OIDC_RP_CLIENT_ID = os.environ.get("OIDC_CLIENT_ID") or "cloudman"
54 | OIDC_RP_CLIENT_SECRET = os.environ.get("OIDC_CLIENT_SECRET")
55 | OIDC_OP_AUTHORIZATION_ENDPOINT = get_from_well_known(OIDC_OP_METADATA_ENDPOINT, 'authorization_endpoint')
56 | OIDC_OP_TOKEN_ENDPOINT = get_from_well_known(OIDC_OP_METADATA_ENDPOINT, 'token_endpoint')
57 | OIDC_OP_USER_ENDPOINT = get_from_well_known(OIDC_OP_METADATA_ENDPOINT, 'userinfo_endpoint')
58 | OIDC_OP_JWKS_ENDPOINT = get_from_well_known(OIDC_OP_METADATA_ENDPOINT, 'jwks_uri')
59 | OIDC_RP_SIGN_ALGO = os.environ.get("OIDC_SIGN_ALGO") or "RS256"
60 | OIDC_USERNAME_ALGO = lambda claim: claim
61 | OIDC_OP_LOGOUT_URL_METHOD = 'cloudman.oidc.provider_logout'
62 | OIDC_AUTHENTICATE_CLASS = 'cloudman.oidc.OIDCAuthenticationRequestView'
63 |
64 | SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
65 | LOGIN_REDIRECT_URL = "/"
66 |
67 | ROOT_URLCONF = 'cloudman.urls'
68 |
69 | WSGI_APPLICATION = 'cloudman.wsgi.application'
70 |
71 |
72 | #CLOUDLAUNCH_PATH_PREFIX = os.environ.get('CLOUDLAUNCH_PATH_PREFIX', '')
73 | STATIC_URL = CLOUDLAUNCH_PATH_PREFIX + '/cloudman/static/'
74 | FORCE_SCRIPT_NAME = CLOUDLAUNCH_PATH_PREFIX
75 | REST_SCHEMA_BASE_URL = CLOUDLAUNCH_PATH_PREFIX + "/cloudman/cloudlaunch/"
76 |
77 | REST_AUTH_SERIALIZERS = {
78 | 'USER_DETAILS_SERIALIZER': 'projman.serializers.UserSerializer'
79 | }
80 |
81 | DATABASES = {
82 | 'default': {
83 | 'ENGINE': 'django.db.backends.' + os.environ.get('CLOUDMAN_DB_ENGINE', 'sqlite3'),
84 | 'NAME': os.environ.get('CLOUDMAN_DB_NAME', os.path.join(BASE_DIR, 'db.sqlite3')),
85 | # The following settings are not used with sqlite3:
86 | 'USER': os.environ.get('CLOUDMAN_DB_USER'),
87 | 'HOST': os.environ.get('CLOUDMAN_DB_HOST'), # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
88 | 'PORT': os.environ.get('CLOUDMAN_DB_PORT'), # Set to empty string for default.
89 | 'PASSWORD': os.environ.get('CLOUDMAN_DB_PASSWORD'),
90 | }
91 | }
92 |
93 | CM_GLOBAL_CONTEXT_PATH = "/opt/cloudman/global_context.yaml"
94 |
95 | if os.path.isfile(CM_GLOBAL_CONTEXT_PATH) and os.access(CM_GLOBAL_CONTEXT_PATH, os.R_OK):
96 | import yaml
97 | with open(CM_GLOBAL_CONTEXT_PATH) as f:
98 | print(f"Loading cloudman global context from: {CM_GLOBAL_CONTEXT_PATH}")
99 | CM_GLOBAL_CONTEXT = yaml.safe_load(f)
100 | else:
101 | CM_GLOBAL_CONTEXT = {}
102 |
103 | # Allow settings to be overridden in a cloudman/settings_local.py
104 | try:
105 | from cloudman.settings_local import * # noqa
106 | except ImportError:
107 | pass
108 |
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/data/helmsman_config.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: cloudve
3 | url: https://raw.githubusercontent.com/CloudVE/helm-charts/master/
4 | - name: jupyterhub
5 | url: https://jupyterhub.github.io/helm-chart/
6 | template_registries:
7 | - name: cloudve
8 | url: https://raw.githubusercontent.com/galaxyproject/cloudlaunch-registry/master/template-registry.yaml
9 | install_templates:
10 | dummy:
11 | repo: dummy
12 | chart: dummy
13 | version: 1.0.0
14 | summary: dummy chart
15 | description: dummy chart description
16 | display_name: dummy
17 | maintainers: dummy
18 | anotherdummy:
19 | repo: dummy
20 | chart: anotherdummy
21 | summary: dummy chart
22 | chart_version: 3.0.0
23 | description: dummy chart description
24 | display_name: dummy
25 | maintainers: dummy
26 | jupyter:
27 | repo: jupyterhub
28 | chart: jupyterhub
29 | summary: jupyter chart
30 | description: jupyter chart description
31 | display_name: "Jupyter"
32 | maintainers: jupyter
33 | info_url: https://jupyter.org/
34 | icon_url: https://jupyter.org/assets/hublogo.svg
35 | screenshot_url: https://jupyter.org/assets/hublogo.svg
36 | template: |
37 | ingress:
38 | enabled: true
39 | path: '{{context.project.access_path}}/jupyterhub'
40 | hub:
41 | baseUrl: '{{context.project.access_path}}/jupyterhub'
42 | proxy:
43 | secretToken: '{{random_alphanumeric(65)}}'
44 | galaxy:
45 | repo: cloudve
46 | chart: galaxy
47 | chart_version: 3.3.0
48 | template: |
49 | config:
50 | oidc_backends_config.xml: |
51 |
52 |
53 |
54 | https://ngkc4.cloudve.org/auth
55 | galaxy-auth
56 | {{random_alphanumeric(8)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(12)}}
57 | https://ngkc4.cloudve.org{{context.project.access_path}}/galaxy/authnz/custos/callback
58 | master
59 |
60 |
61 | galaxy.yml:
62 | galaxy:
63 | enable_oidc: true
64 | oidc_backends_config_file: /galaxy/server/config/oidc_backends_config.xml
65 | oidc_config_file: /galaxy/server/config/oidc_config.xml
66 | oidc_config.xml: |
67 |
68 |
69 |
70 |
71 |
72 |
73 | ingress:
74 | annotations:
75 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
76 | kubernetes.io/tls-acme: "true"
77 | nginx.ingress.kubernetes.io/secure-backends: "true"
78 | enabled: true
79 | hosts:
80 | - ngkc4.cloudve.org
81 | path: {{context.project.access_path}}/galaxy
82 | tls:
83 | - hosts:
84 | - ngkc4.cloudve.org
85 | secretName: ngkc4-cloudve-org-key
86 | persistence:
87 | size: 95Gi
88 | storageClass: nfs-provisioner
89 | postgresql:
90 | persistence:
91 | storageClass: ebs-provisioner
92 |
93 | charts:
94 | dashboard:
95 | name: stable/kubernetes-dashboard
96 | namespace: kube-system
97 | create_namespace: true
98 | chart_version: 2.0.0
99 | values:
100 | enableInsecureLogin: true
101 | ingress:
102 | annotations:
103 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
104 | kubernetes.io/tls-acme: "true"
105 | nginx.ingress.kubernetes.io/secure-backends: "true"
106 | enabled: true
107 | hosts:
108 | - null
109 | - ngkc4.cloudve.org
110 | paths:
111 | - /dashboard
112 | - /dashboard/*
113 | tls:
114 | - hosts:
115 | - ngkc4.cloudve.org
116 | secretName: ngkc4-cloudve-org-key
117 | rbac:
118 | clusterAdminRole: true
119 | dummy:
120 | name: dummy/dummy
121 | namespace: dummy
122 | create_namespace: true
123 | version: 1.0.0
124 | anotherdummy:
125 | name: dummy/anotherdummy
126 | namespace: anotherdummy
127 | create_namespace: true
128 | version: 2.0.0
129 | upgrade: false
130 |
--------------------------------------------------------------------------------
/cloudman/clusterman/management/commands/import_cloud_data.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import base64
3 | import json
4 | import yaml
5 | from django.core.management.base import BaseCommand, CommandError
6 | from djcloudbridge import models as cb_models
7 | from cloudlaunch import models as cl_models
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Loads initial cloud data in base64 format. The cloud ' \
12 | 'data should contain cloud connection and credentials info for' \
13 | 'the admin user'
14 |
15 | def add_arguments(self, parser):
16 | parser.add_argument('filename', type=argparse.FileType('r'))
17 | parser.add_argument('--format', required=False, default="yaml",
18 | choices=['yaml', 'json', 'base64yaml'],
19 | help='Format that the data is encoded in')
20 |
21 | def handle(self, *args, **options):
22 | data = options['filename'].read()
23 | format = options['format']
24 | if format == "base64yaml":
25 | # Pad data: https://gist.github.com/perrygeo/ee7c65bb1541ff6ac770
26 | data = base64.b64decode(data + "===").decode('utf-8')
27 |
28 | if format == "json":
29 | decoded_data = json.loads(data)
30 | elif format == "yaml" or format == "base64yaml":
31 | decoded_data = yaml.safe_load(data)
32 | self.load_cloud_data(decoded_data)
33 |
34 | @staticmethod
35 | def load_cloud_data(json_data):
36 | config = json_data.get('cloud_config')
37 | target = config.get('target')
38 | image = config.get('image')
39 | zone = target.get('target_zone')
40 | region = zone.get('region')
41 | cloud = zone.get('cloud')
42 | credentials = config.get('credentials')
43 |
44 | cloud_type = cloud.pop('resourcetype')
45 | if cloud_type == 'AWSCloud':
46 | cloud_model = cb_models.AWSCloud
47 | region_model = cb_models.AWSRegion
48 | credentials_model = cb_models.AWSCredentials
49 | elif cloud_type == 'AzureCloud':
50 | cloud_model = cb_models.AzureCloud
51 | region_model = cb_models.AzureRegion
52 | credentials_model = cb_models.AzureCredentials
53 | elif cloud_type == 'GCPCloud':
54 | cloud_model = cb_models.GCPCloud
55 | region_model = cb_models.GCPRegion
56 | credentials_model = cb_models.GCPCredentials
57 | credentials['gcp_service_creds_dict'] = json.dumps(
58 | credentials['gcp_service_creds_dict'])
59 | elif cloud_type == 'OpenStackCloud':
60 | cloud_model = cb_models.OpenStackCloud
61 | region_model = cb_models.OpenStackRegion
62 | credentials_model = cb_models.OpenStackCredentials
63 |
64 | # create cloud
65 | cloud_id = cloud.pop('id')
66 | cloud_obj, _ = cloud_model.objects.get_or_create(
67 | id=cloud_id, defaults={**cloud})
68 |
69 | # create region
70 | region_id = region.pop('region_id')
71 | region.pop('resourcetype', None)
72 | region['cloud'] = cloud_obj
73 | region_obj, _ = region_model.objects.get_or_create(
74 | region_id=region_id, defaults={**region})
75 |
76 | # create zone
77 | zone_id = zone.pop('zone_id')
78 | zone.pop('cloud')
79 | zone.pop('region')
80 | zone_obj, _ = cb_models.Zone.objects.get_or_create(
81 | region=region_obj, zone_id=zone_id, defaults={**zone})
82 |
83 | # create credentials and link to admin user
84 | if credentials:
85 | credentials.pop('id', None)
86 | name = credentials.pop('name', 'default')
87 | cloud_id = credentials.pop('cloud_id', cloud_id)
88 | credentials.pop('default', None)
89 | credentials_model.objects.get_or_create(user_profile_id="admin",
90 | name=name,
91 | cloud_id=cloud_id,
92 | default=True,
93 | defaults={**credentials})
94 |
95 | # create image
96 | name = image.pop('name')
97 | image_obj, _ = cl_models.Image.objects.get_or_create(
98 | name=name, defaults={**image, "region": region_obj})
99 |
100 | # connect rke app as target
101 | version = cl_models.ApplicationVersion.objects.filter(
102 | application='cm_rke_kubernetes_plugin').first()
103 | target = cl_models.CloudDeploymentTarget.objects.filter(
104 | target_zone=zone_obj).first()
105 | cl_models.ApplicationVersionCloudConfig.objects.get_or_create(
106 | application_version=version, target=target, image=image_obj)
107 |
--------------------------------------------------------------------------------
/cloudman/helmsman/tests/data/helmsman_config_update.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: cloudve
3 | url: https://raw.githubusercontent.com/CloudVE/helm-charts/master/
4 | - name: jupyterhub
5 | url: https://jupyterhub.github.io/helm-chart/
6 | template_registries:
7 | - name: cloudve
8 | url: https://raw.githubusercontent.com/galaxyproject/cloudlaunch-registry/master/template-registry.yaml
9 | install_templates:
10 | dummy:
11 | repo: dummy
12 | chart: dummy
13 | version: 1.0.0
14 | summary: modified dummy chart
15 | description: modified dummy chart description
16 | display_name: modified dummy
17 | maintainers: modified dummy
18 | upgrade: false
19 | anotherdummy:
20 | upgrade: true
21 | repo: dummy
22 | chart: anotherdummy
23 | summary: dummy chart
24 | chart_version: 4.0.0
25 | description: dummy chart description
26 | display_name: dummy
27 | maintainers: dummy
28 | jupyter:
29 | repo: jupyterhub
30 | chart: jupyterhub
31 | summary: jupyter chart
32 | description: jupyter chart description
33 | display_name: "Jupyter"
34 | maintainers: jupyter updated
35 | info_url: https://jupyter.org/
36 | icon_url: https://jupyter.org/assets/hublogo.svg
37 | screenshot_url: https://jupyter.org/assets/hublogo.svg
38 | template: |
39 | ingress:
40 | enabled: true
41 | path: '{{context.project.access_path}}/jupyterhub'
42 | hub:
43 | baseUrl: '{{context.project.access_path}}/jupyterhub'
44 | proxy:
45 | secretToken: '{{random_alphanumeric(65)}}'
46 | galaxy:
47 | upgrade: true
48 | repo: cloudve
49 | chart: galaxy
50 | chart_version: 3.3.0
51 | template: |
52 | config:
53 | oidc_backends_config.xml: |
54 |
55 |
56 |
57 | https://ngkc4.cloudve.org/auth
58 | galaxy-auth
59 | {{random_alphanumeric(8)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(4)}}-{{random_alphanumeric(12)}}
60 | https://ngkc4.cloudve.org{{context.project.access_path}}/galaxy/authnz/custos/callback
61 | master
62 |
63 |
64 | galaxy.yml:
65 | galaxy:
66 | enable_oidc: true
67 | oidc_backends_config_file: /galaxy/server/config/oidc_backends_config.xml
68 | oidc_config_file: /galaxy/server/config/oidc_config.xml
69 | oidc_config.xml: |
70 |
71 |
72 |
73 |
74 |
75 |
76 | ingress:
77 | annotations:
78 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
79 | kubernetes.io/tls-acme: "true"
80 | nginx.ingress.kubernetes.io/secure-backends: "true"
81 | enabled: true
82 | hosts:
83 | - ngkc4.cloudve.org
84 | path: {{context.project.access_path}}/galaxy
85 | tls:
86 | - hosts:
87 | - ngkc4.cloudve.org
88 | secretName: ngkc4-cloudve-org-key
89 | persistence:
90 | size: 95Gi
91 | storageClass: nfs-provisioner
92 | postgresql:
93 | persistence:
94 | storageClass: ebs-provisioner
95 |
96 | charts:
97 | dashboard:
98 | name: stable/kubernetes-dashboard
99 | namespace: kube-system
100 | create_namespace: true
101 | version: 2.0.0
102 | values:
103 | enableInsecureLogin: true
104 | ingress:
105 | annotations:
106 | certmanager.k8s.io/cluster-issuer: letsencrypt-prod
107 | kubernetes.io/tls-acme: "true"
108 | nginx.ingress.kubernetes.io/secure-backends: "true"
109 | enabled: true
110 | hosts:
111 | - null
112 | - ngkc4.cloudve.org
113 | paths:
114 | - /dashboard
115 | - /dashboard/*
116 | tls:
117 | - hosts:
118 | - ngkc4.cloudve.org
119 | secretName: ngkc4-cloudve-org-key
120 | rbac:
121 | clusterAdminRole: true
122 | dummy:
123 | name: dummy/dummy
124 | namespace: dummy
125 | create_namespace: true
126 | version: 2.0.0
127 | upgrade: true
128 | anotherdummy:
129 | name: dummy/anotherdummy
130 | namespace: anotherdummy
131 | create_namespace: true
132 | version: 3.0.0
133 | upgrade: false
134 |
--------------------------------------------------------------------------------
/cloudman/clusterman/models.py:
--------------------------------------------------------------------------------
1 | from django.db import models
2 |
3 | from hierarkey.models import GlobalSettingsBase, Hierarkey
4 |
5 | from cloudlaunch import models as cl_models
6 | from djcloudbridge import models as cb_models
7 | import yaml
8 |
9 |
10 | hierarkey = Hierarkey(attribute_name='settings')
11 |
12 |
13 | @hierarkey.set_global()
14 | class GlobalSettings(GlobalSettingsBase):
15 | pass
16 |
17 |
18 | class CMCluster(models.Model):
19 | """CloudMan cluster details."""
20 | # Automatically add timestamps when object is created
21 | added = models.DateTimeField(auto_now_add=True)
22 | # Automatically add timestamps when object is updated
23 | updated = models.DateTimeField(auto_now=True)
24 | name = models.CharField(max_length=60, unique=True)
25 | cluster_type = models.CharField(max_length=255, blank=False, null=False)
26 | autoscale = models.BooleanField(
27 | default=True, help_text="Whether autoscaling is activated")
28 | _connection_settings = models.TextField(
29 | max_length=1024 * 16, help_text="External provider specific settings "
30 | "for this cluster.", blank=True, null=True,
31 | db_column='connection_settings')
32 |
33 | @property
34 | def connection_settings(self):
35 | return yaml.safe_load(self._connection_settings)
36 |
37 | @connection_settings.setter
38 | def connection_settings(self, value):
39 | """
40 | Save the connection_settings value.
41 |
42 | .. seealso:: connection_settings property getter
43 | """
44 | self._connection_settings = yaml.dump(value, default_flow_style=False)
45 |
46 | @property
47 | def default_vm_type(self):
48 | return self.connection_settings.get('app_config', {}).get(
49 | 'config_cloudlaunch', {}).get('vmType')
50 |
51 | @property
52 | def default_zone(self):
53 | target_zone = self.connection_settings.get(
54 | 'cloud_config', {}).get('target', {}).get('target_zone', {})
55 | cloud_id = target_zone.get('cloud', {}).get('id')
56 | region_id = target_zone.get('region', {}).get('region_id')
57 | zone_id = target_zone.get('zone_id')
58 | zone = cb_models.Zone.objects.get(zone_id=zone_id, region__region_id=region_id,
59 | region__cloud__id=cloud_id)
60 | return zone
61 |
62 | class Meta:
63 | verbose_name = "Cluster"
64 | verbose_name_plural = "Clusters"
65 |
66 |
67 | class CMAutoScaler(models.Model):
68 | name = models.CharField(max_length=60)
69 | cluster = models.ForeignKey(CMCluster, on_delete=models.CASCADE,
70 | null=False, related_name="autoscaler_list")
71 | vm_type = models.CharField(max_length=200)
72 | allowed_vm_type_prefixes = models.CharField(max_length=300, blank=True,
73 | default=None, null=True)
74 | zone = models.ForeignKey(cb_models.Zone, on_delete=models.CASCADE,
75 | null=False, related_name="autoscaler_list")
76 | min_nodes = models.IntegerField(default=0)
77 | max_nodes = models.IntegerField(default=None, null=True)
78 |
79 | class Meta:
80 | verbose_name = "Cluster Autoscaler"
81 | verbose_name_plural = "Cluster Autoscalers"
82 | unique_together = (("cluster", "name"),)
83 |
84 |
85 | class CMClusterNode(models.Model):
86 | name = models.CharField(max_length=60)
87 | cluster = models.ForeignKey(CMCluster, on_delete=models.CASCADE,
88 | null=False, related_name="node_list")
89 | # This introduces a tight coupling between the cloudlaunch and cloudman
90 | # models, although we go through the cloudlaunch API for everything else.
91 | # This may need to be changed to an IntegerField if we go for a fully
92 | # decoupled route.
93 | deployment = models.OneToOneField(
94 | cl_models.ApplicationDeployment, models.CASCADE,
95 | related_name="cm_cluster_node")
96 | autoscaler = models.ForeignKey(
97 | CMAutoScaler, on_delete=models.CASCADE, null=True,
98 | related_name="nodegroup")
99 |
100 | class Meta:
101 | verbose_name = "Cluster Node"
102 | verbose_name_plural = "Cluster Nodes"
103 |
104 | def is_stable(self):
105 | """
106 | Return true if node is in a stable state, such as SUCCESS or FAILURE
107 | and not PROGRESSING or DELETING
108 | """
109 | return (self.deployment.tasks.latest('updated').status
110 | in ['SUCCESS', 'FAILURE'])
111 |
112 | def is_running(self):
113 | """
114 | Return true if node was successfully launched
115 | """
116 | return (self.deployment.tasks.filter(
117 | action=cl_models.ApplicationDeploymentTask.LAUNCH).first().status
118 | not in ['FAILURE'] and self.deployment.tasks.filter(
119 | action=cl_models.ApplicationDeploymentTask.DELETE).count() == 0)
120 |
--------------------------------------------------------------------------------
/cloudman/helmsman/management/commands/add_install_template.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import logging as log
3 |
4 | from django.contrib.auth.models import User
5 | from django.core.management.base import BaseCommand
6 |
7 | from ...api import HelmsManAPI, HMServiceContext
8 |
9 |
10 | class Command(BaseCommand):
11 | help = 'Adds a new template to the install templates'
12 |
13 | def add_arguments(self, parser):
14 | parser.add_argument('name',
15 | help='Name to give the install template')
16 | parser.add_argument('repo',
17 | help='Which repo to install from (e.g. stable)')
18 | parser.add_argument('chart',
19 | help='name of the chart to install (e.g. postgres)')
20 | parser.add_argument('--chart_version', required=False,
21 | help='version of chart to install. defaults'
22 | ' to latest')
23 | parser.add_argument('--template_file', required=False,
24 | type=argparse.FileType('r'),
25 | help='The jinja2 template to use to render final values')
26 | parser.add_argument('--context', required=False,
27 | help='Default context values to use when'
28 | ' evaluating this jinja2 template')
29 | parser.add_argument('--display_name', required=False,
30 | help='chart display name')
31 | parser.add_argument('--summary', required=False,
32 | help='chart summary')
33 | parser.add_argument('--description', required=False,
34 | help='chart description')
35 | parser.add_argument('--maintainers', required=False,
36 | help='chart maintainers')
37 | parser.add_argument('--info_url', required=False,
38 | help='chart info url')
39 | parser.add_argument('--icon_url', required=False,
40 | help='chart icon url')
41 | parser.add_argument('--screenshot_url', required=False,
42 | help='chart screenshot url')
43 | parser.add_argument('--upgrade', dest='upgrade_template',
44 | action='store_true',
45 | help='upgrade template if it already exists')
46 |
47 | def handle(self, *args, **options):
48 | self.add_install_template(
49 | options['name'], options['repo'], options['chart'],
50 | options.get('chart_version'),
51 | options['template_file'].read() if options.get('template_file') else None,
52 | options.get('context'),
53 | options.get('display_name'),
54 | options.get('summary'),
55 | options.get('description'),
56 | options.get('maintainers'),
57 | options.get('info_url'),
58 | options.get('icon_url'),
59 | options.get('screenshot_url'),
60 | options.get('upgrade_template'))
61 |
62 | @staticmethod
63 | def add_install_template(name, repo, chart, chart_version, template,
64 | context, display_name, summary, description,
65 | maintainers, info_url, icon_url, screenshot_url,
66 | upgrade_template):
67 | try:
68 | print(f"Adding template: {name}")
69 | admin = User.objects.filter(is_superuser=True).first()
70 | client = HelmsManAPI(HMServiceContext(user=admin))
71 | existing_template = client.templates.find(name=name)
72 | if existing_template and upgrade_template:
73 | client.templates.update(
74 | existing_template, repo, chart, chart_version, template, context,
75 | display_name=display_name, summary=summary,
76 | description=description, maintainers=maintainers,
77 | info_url=info_url, icon_url=icon_url, screenshot_url=screenshot_url)
78 | print(f"Successfully added template named: '{name}'"
79 | f" for chart: '{repo}/{chart}'.")
80 | elif existing_template:
81 | print(f"Template named: '{name}' for chart: '{repo}/{chart}'"
82 | " already exists.")
83 | else:
84 | client.templates.create(
85 | name, repo, chart, chart_version, template, context,
86 | display_name=display_name, summary=summary,
87 | description=description, maintainers=maintainers,
88 | info_url=info_url, icon_url=icon_url, screenshot_url=screenshot_url)
89 | print(f"Successfully added template named: '{name}'"
90 | f" for chart: '{repo}/{chart}'.")
91 | except Exception as e:
92 | log.exception("An error occurred while "
93 | f"adding the template '{name}':", e)
94 | print(f"An error occurred while adding the template '{name}':",
95 | str(e))
96 | raise e
97 |
--------------------------------------------------------------------------------
/cloudman/clusterman/clients/kube_client.py:
--------------------------------------------------------------------------------
1 | """A wrapper around the kubectl commandline client"""
2 | import shutil
3 |
4 | import tenacity
5 |
6 | from . import helpers
7 |
8 |
9 | class KubeService(object):
10 | """Marker interface for CloudMan services"""
11 | def __init__(self, client):
12 | self._client = client
13 |
14 | def client(self):
15 | return self._client
16 |
17 |
18 | class KubeClient(KubeService):
19 |
20 | def __init__(self):
21 | self._check_environment()
22 | super(KubeClient, self).__init__(self)
23 | self._namespace_svc = KubeNamespaceService(self)
24 | self._node_svc = KubeNodeService(self)
25 | self._secret_svc = KubeSecretService(self)
26 |
27 | @staticmethod
28 | def _check_environment():
29 | if not shutil.which("kubectl"):
30 | raise Exception("Could not find kubectl executable in path")
31 |
32 | @property
33 | def namespaces(self):
34 | return self._namespace_svc
35 |
36 | @property
37 | def nodes(self):
38 | return self._node_svc
39 |
40 | @property
41 | def secrets(self):
42 | return self._secret_svc
43 |
44 |
45 | class KubeNamespaceService(KubeService):
46 |
47 | def __init__(self, client):
48 | super(KubeNamespaceService, self).__init__(client)
49 |
50 | def list(self):
51 | data = helpers.run_list_command(["kubectl", "get", "namespaces"],
52 | delimiter=" ", skipinitialspace=True)
53 | return data
54 |
55 | # def _list_names(self):
56 | # data = self.list()
57 | # output = [each.get('NAME') for each in data]
58 | # return output
59 |
60 | def create(self, namespace_name):
61 | return helpers.run_command(
62 | ["kubectl", "create", "namespace", namespace_name])
63 |
64 | # def _create_if_not_exists(self, namespace_name):
65 | # if namespace_name not in self._list_names():
66 | # return self.create(namespace_name)
67 |
68 | def delete(self, namespace_name):
69 | return helpers.run_command(
70 | ["kubectl", "delete", "namespace", namespace_name])
71 |
72 |
73 | class KubeNodeService(KubeService):
74 |
75 | def __init__(self, client):
76 | super(KubeNodeService, self).__init__(client)
77 |
78 | def list(self):
79 | data = helpers.run_yaml_command(["kubectl", "get", "nodes", "-o", "yaml"])
80 | return data['items']
81 |
82 | def find(self, address=None, labels=None):
83 | labels = labels or {}
84 | nodes = self.list()
85 | if address:
86 | nodes = [node for node in nodes if address
87 | in [addr.get('address') for addr in
88 | node.get('status', {}).get('addresses', {})]]
89 | if labels:
90 | nodes = [node for node in nodes if labels.items()
91 | <= node.get('metadata', {}).get('labels', {}).items()]
92 | return nodes
93 |
94 | def cordon(self, node):
95 | name = node.get('metadata', {}).get('name')
96 | return helpers.run_command(["kubectl", "cordon", name])
97 |
98 | def _get_job_pods_in_node(self, node_name, state):
99 | """
100 | Return a list of all pods in a node in a particular state, such
101 | as Running. Only looks for pods that belong to a job
102 | (job-name selector).
103 | """
104 | return helpers.run_yaml_command(
105 | ["kubectl", "get", "pods", "--all-namespaces", "--field-selector",
106 | f"spec.nodeName={node_name},status.phase={state}",
107 | "--selector", "job-name", "-o", "yaml"])
108 |
109 | def wait_till_jobs_complete(self, node, timeout=3600*24*7):
110 | name = node.get('metadata', {}).get('name')
111 | retryer = tenacity.Retrying(
112 | stop=tenacity.stop_after_delay(timeout),
113 | retry=tenacity.retry_if_result(
114 | lambda result: len(result.get('items', [])) != 0),
115 | wait=tenacity.wait_fixed(5))
116 | retryer(self._get_job_pods_in_node, name, "Running")
117 |
118 | def drain(self, node, force=True, timeout=120, ignore_daemonsets=True):
119 | name = node.get('metadata', {}).get('name')
120 | return helpers.run_command(
121 | ["kubectl", "drain", name, f"--timeout={timeout}s",
122 | f"--force={'true' if force else 'false'}",
123 | f"--ignore-daemonsets={'true' if ignore_daemonsets else 'false'}"]
124 | )
125 |
126 | def delete(self, node):
127 | name = node.get('metadata', {}).get('name')
128 | return helpers.run_command(
129 | ["kubectl", "delete", "node", name]
130 | )
131 |
132 | def set_label(self, node, labels):
133 | name = node.get('metadata', {}).get('name')
134 | return helpers.run_command(
135 | ["kubectl", "label", "nodes", name]
136 | + [f"{key}={value}" for key, value in labels.items()]
137 | )
138 |
139 |
140 | class KubeSecretService(KubeService):
141 |
142 | def __init__(self, client):
143 | super(KubeSecretService, self).__init__(client)
144 |
145 | def get(self, secret_name, namespace=None):
146 | command = ["kubectl", "get", "secrets", "-o", "yaml", secret_name]
147 | if namespace:
148 | command += ["-n", namespace]
149 | data = helpers.run_yaml_command(command)
150 | return data
151 |
--------------------------------------------------------------------------------
/cloudman/helmsman/serializers.py:
--------------------------------------------------------------------------------
1 | """DRF serializers for the CloudMan Create API endpoints."""
2 |
3 | from rest_framework import serializers
4 | from .api import HelmsManAPI
5 |
6 |
7 | class HMInstallTemplateSerializer(serializers.Serializer):
8 | id = serializers.CharField(read_only=True)
9 | name = serializers.CharField()
10 | repo = serializers.SlugField()
11 | chart = serializers.SlugField()
12 | chart_version = serializers.CharField(allow_blank=True, required=False)
13 | template = serializers.CharField()
14 | context = serializers.DictField(required=False)
15 | display_name = serializers.CharField(allow_blank=True, required=False)
16 | summary = serializers.CharField(allow_blank=True, required=False)
17 | description = serializers.CharField(allow_blank=True, required=False)
18 | maintainers = serializers.CharField(allow_blank=True, required=False)
19 | info_url = serializers.CharField(allow_blank=True, required=False)
20 | icon_url = serializers.CharField(allow_blank=True, required=False)
21 | screenshot_url = serializers.CharField(allow_blank=True, required=False)
22 |
23 | def create(self, valid_data):
24 | return HelmsManAPI.from_request(
25 | self.context['request']).templates.create(
26 | name=valid_data.get('name'),
27 | repo=valid_data.get('repo'),
28 | chart=valid_data.get('chart'),
29 | chart_version=valid_data.get('chart_version'),
30 | template=valid_data.get('template'),
31 | context=valid_data.get('context'),
32 | display_name=valid_data.get('display_name'),
33 | summary=valid_data.get('summary'),
34 | description=valid_data.get('description'),
35 | maintainers=valid_data.get('maintainers'),
36 | info_url=valid_data.get('info_url'),
37 | icon_url=valid_data.get('icon_url'),
38 | screenshot_url=valid_data.get('screenshot_url'))
39 |
40 | def render_values(self, valid_data):
41 | return HelmsManAPI.from_request(self.context['request']
42 | ).templates.render_values(
43 | valid_data.get('name'),
44 | **valid_data)
45 |
46 | def update(self, template, valid_data):
47 | return HelmsManAPI.from_request(self.context['request']).templates.update(
48 | template,
49 | repo=valid_data.get('repo'),
50 | chart=valid_data.get('chart'),
51 | chart_version=valid_data.get('chart_version'),
52 | template=valid_data.get('template'),
53 | context=valid_data.get('context'),
54 | display_name=valid_data.get('display_name'),
55 | summary=valid_data.get('summary'),
56 | description=valid_data.get('description'),
57 | maintainers=valid_data.get('maintainers'),
58 | info_url=valid_data.get('info_url'),
59 | icon_url=valid_data.get('icon_url'),
60 | screenshot_url=valid_data.get('screenshot_url')
61 | )
62 |
63 | def delete(self, valid_data):
64 | return HelmsManAPI.from_request(self.context['request']
65 | ).templates.delete(
66 | valid_data.get('name'))
67 |
68 |
69 | class HMChartRepoSerializer(serializers.Serializer):
70 | id = serializers.CharField(read_only=True)
71 | name = serializers.CharField()
72 |
73 |
74 | class HMChartSerializer(serializers.Serializer):
75 | id = serializers.CharField(read_only=True)
76 | name = serializers.CharField(required=False)
77 | display_name = serializers.CharField(read_only=True)
78 | chart_version = serializers.CharField(allow_blank=True, required=False)
79 | revision = serializers.IntegerField(allow_null=True, required=False)
80 | app_version = serializers.CharField(read_only=True)
81 | namespace = serializers.CharField()
82 | state = serializers.CharField(allow_blank=True, read_only=False, required=False)
83 | updated = serializers.CharField(read_only=True)
84 | access_address = serializers.CharField(read_only=True)
85 | values = serializers.DictField(required=False)
86 | repo = HMChartRepoSerializer(read_only=True)
87 | repo_name = serializers.CharField(write_only=True, allow_blank=True, required=False)
88 | install_template = HMInstallTemplateSerializer(read_only=True, required=False)
89 | use_install_template = serializers.CharField(write_only=True, allow_blank=True, required=False)
90 |
91 | def create(self, valid_data):
92 | return HelmsManAPI.from_request(self.context['request']).charts.create(
93 | valid_data.get('repo_name'), valid_data.get('name'),
94 | valid_data.get('namespace'), valid_data.get('release_name'),
95 | valid_data.get('chart_version'), valid_data.get('values'))
96 |
97 | def update(self, chart, validated_data):
98 | if validated_data.get('state') == "rollback":
99 | return (HelmsManAPI.from_request(self.context['request']).charts
100 | .rollback(chart))
101 | return HelmsManAPI.from_request(self.context['request']).charts.update(
102 | chart, validated_data.get('values'), version=validated_data.get('chart_version'))
103 |
104 |
105 | class HMNamespaceSerializer(serializers.Serializer):
106 | name = serializers.CharField()
107 | status = serializers.CharField(allow_blank=True)
108 | age = serializers.CharField(allow_blank=True)
109 |
110 | def create(self, valid_data):
111 | return HelmsManAPI.from_request(self.context['request']
112 | ).namespaces.create(
113 | valid_data.get('name'))
114 |
115 | def delete(self, valid_data):
116 | return HelmsManAPI.from_request(self.context['request']
117 | ).namespaces.delete(
118 | valid_data.get('name'))
119 |
--------------------------------------------------------------------------------
/cloudman/clusterman/tests/test_mgmt_commands.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 | import os
3 |
4 | from django.contrib.auth.models import User
5 | from django.core.management import call_command
6 | from django.core.management.base import CommandError
7 | from django.db import transaction
8 | from django.test import TestCase
9 |
10 | from djcloudbridge import models as cb_models
11 | from clusterman import models as cm_models
12 |
13 |
14 | TEST_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data')
15 |
16 |
17 | def load_kube_config():
18 | kube_config_path = os.path.join(TEST_DATA_PATH, 'kube_config.yaml')
19 | with open(kube_config_path) as f:
20 | return f.read()
21 |
22 |
23 | class ClusterCommandTestCase(TestCase):
24 |
25 | INITIAL_CLUSTER_DATA = os.path.join(
26 | TEST_DATA_PATH, 'initial_cluster_data_aws.yaml')
27 | INITIAL_CLUSTER_DATA_AZURE = os.path.join(
28 | TEST_DATA_PATH, 'initial_cluster_data_azure.yaml')
29 | INITIAL_CLUSTER_DATA_GCP = os.path.join(
30 | TEST_DATA_PATH, 'initial_cluster_data_gcp.yaml')
31 | INITIAL_CLUSTER_DATA_OPENSTACK = os.path.join(
32 | TEST_DATA_PATH, 'initial_cluster_data_openstack.yaml')
33 |
34 | def setUp(self):
35 | super().setUp()
36 | self.client.force_login(
37 | User.objects.get_or_create(username='admin', is_superuser=True)[0])
38 |
39 | def test_import_cloud_data_no_args(self):
40 | with self.assertRaisesRegex(CommandError, "required: filename"):
41 | call_command('import_cloud_data')
42 |
43 | def test_import_cloud_data_aws(self):
44 | call_command('import_cloud_data', self.INITIAL_CLUSTER_DATA)
45 | zone_obj = cb_models.Zone.objects.get(
46 | region__cloud__id='aws', region__region_id='amazon-us-east',
47 | zone_id='default')
48 | self.assertEquals(zone_obj.name, 'us-east1')
49 |
50 | def test_import_cloud_data_azure(self):
51 | call_command('import_cloud_data', self.INITIAL_CLUSTER_DATA_AZURE)
52 | zone_obj = cb_models.Zone.objects.get(
53 | region__cloud__id='azure', region__region_id='azure-us-east',
54 | zone_id='default')
55 | self.assertEquals(zone_obj.name, 'us-east1')
56 |
57 | def test_import_cloud_data_gcp(self):
58 | call_command('import_cloud_data', self.INITIAL_CLUSTER_DATA_GCP)
59 | zone_obj = cb_models.Zone.objects.get(
60 | region__cloud__id='gcp', region__region_id='gcp-us-east',
61 | zone_id='default')
62 | self.assertEquals(zone_obj.name, 'us-east1')
63 |
64 | def test_import_cloud_data_openstack(self):
65 | call_command('import_cloud_data', self.INITIAL_CLUSTER_DATA_OPENSTACK)
66 | zone_obj = cb_models.Zone.objects.get(
67 | region__cloud__id='openstack', region__region_id='melbourne',
68 | zone_id='default')
69 | self.assertEquals(zone_obj.name, 'melbourne')
70 |
71 | def test_create_cluster_no_args(self):
72 | with self.assertRaisesRegex(
73 | CommandError, "required: name, cluster_type, settings_file"):
74 | call_command('create_cluster')
75 |
76 | def test_create_cluster(self):
77 | call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA)
78 | cluster = cm_models.CMCluster.objects.get(name='test_cluster')
79 | self.assertEquals(cluster.cluster_type, 'KUBE_RKE')
80 |
81 | def test_create_cluster_existing(self):
82 | with transaction.atomic():
83 | call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA)
84 | self.assertEqual(cm_models.CMCluster.objects.all().count(), 1)
85 | with transaction.atomic():
86 | call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA)
87 | self.assertEqual(cm_models.CMCluster.objects.all().count(), 1)
88 |
89 |
90 | class CreateAutoScaleUserCommandTestCase(TestCase):
91 |
92 | def setUp(self):
93 | self.client.force_login(
94 | User.objects.get_or_create(username='admin', is_superuser=True)[0])
95 |
96 | def test_create_autoscale_user_no_args(self):
97 | call_command('create_autoscale_user')
98 | self.assertTrue(User.objects.get(username='autoscaleuser'))
99 |
100 | def test_create_autoscale_user(self):
101 | call_command('create_autoscale_user', "--username", "testautoscale",
102 | "--password", "hello")
103 | user = User.objects.get(username='testautoscale')
104 | self.assertEquals(user.username, "testautoscale")
105 | self.assertTrue(self.client.login(username="testautoscale", password="hello"))
106 |
107 | def test_create_autoscale_user_existing(self):
108 | out = StringIO()
109 | call_command('create_autoscale_user', "--username", "testautoscale2",
110 | "--password", "hello", stdout=out)
111 | self.assertIn("created successfully", out.getvalue())
112 | out = StringIO()
113 | call_command('create_autoscale_user', "--username", "testautoscale2",
114 | "--password", "hello", stdout=out)
115 | self.assertIn("already exists", out.getvalue())
116 |
117 | def test_create_autoscale_user_does_not_clobber_existing(self):
118 | User.objects.create_user(username="hello", password="world")
119 | call_command('create_autoscale_user', "--username", "hello",
120 | "--password", "overwrite")
121 | # Password should remain unchanged
122 | self.assertTrue(self.client.login(username="hello", password="world"))
123 |
124 | def test_create_autoscale_user_with_impersonate(self):
125 | out = StringIO()
126 | call_command('create_autoscale_user', "--username", "hello",
127 | "--password", "overwrite", "--impersonate_account", "admin",
128 | stdout=out)
129 | self.assertIn("created successfully", out.getvalue())
130 |
131 | def test_create_autoscale_user_with_non_existent_impersonate(self):
132 | out = StringIO()
133 | call_command('create_autoscale_user', "--username", "hello",
134 | "--password", "overwrite", "--impersonate_account", "non_existent",
135 | stdout=out)
136 | self.assertNotIn("created successfully", out.getvalue())
137 |
--------------------------------------------------------------------------------
/cloudman/clusterman/serializers.py:
--------------------------------------------------------------------------------
1 | """DRF serializers for the CloudMan Create API endpoints."""
2 |
3 | from rest_framework import serializers
4 | from rest_framework import status
5 | from rest_framework.exceptions import ValidationError
6 |
7 | from cloudlaunch import serializers as cl_serializers
8 | from djcloudbridge import models as cb_models
9 | from djcloudbridge.drf_helpers import CustomHyperlinkedIdentityField
10 |
11 | from .api import CloudManAPI
12 | from .exceptions import CMDuplicateNameException
13 |
14 |
15 | class CMClusterSerializer(serializers.Serializer):
16 | id = serializers.CharField(read_only=True)
17 | name = serializers.CharField()
18 | cluster_type = serializers.CharField()
19 | connection_settings = serializers.DictField(write_only=True, required=False)
20 | default_vm_type = serializers.CharField(read_only=True)
21 | default_zone = cl_serializers.DeploymentZoneSerializer(read_only=True)
22 | autoscale = serializers.BooleanField(required=False, initial=True, default=True)
23 | nodes = CustomHyperlinkedIdentityField(view_name='node-list',
24 | lookup_field='cluster_id',
25 | lookup_url_kwarg='cluster_pk')
26 |
27 | def create(self, valid_data):
28 | try:
29 | cmapi = CloudManAPI.from_request(self.context['request'])
30 | return cmapi.clusters.create(
31 | valid_data.get('name'), valid_data.get('cluster_type'),
32 | valid_data.get('connection_settings'),
33 | autoscale=valid_data.get('autoscale'))
34 | except CMDuplicateNameException as e:
35 | raise ValidationError(detail=str(e))
36 |
37 | def update(self, instance, valid_data):
38 | instance.name = valid_data.get('name') or instance.name
39 | instance.autoscale = valid_data.get('autoscale')
40 | return CloudManAPI.from_request(
41 | self.context['request']).clusters.update(instance)
42 |
43 |
44 | class CMClusterNodeSerializer(serializers.Serializer):
45 | id = serializers.CharField(read_only=True)
46 | name = serializers.CharField(read_only=True)
47 | cluster = CMClusterSerializer(read_only=True)
48 | vm_type = serializers.CharField(write_only=True)
49 | deployment = cl_serializers.DeploymentSerializer(read_only=True)
50 | autoscaler = serializers.PrimaryKeyRelatedField(read_only=True)
51 |
52 | def create(self, valid_data):
53 | cluster_id = self.context['view'].kwargs.get("cluster_pk")
54 | cluster = CloudManAPI.from_request(self.context['request']).clusters.get(cluster_id)
55 | if not cluster:
56 | raise ValidationError("Specified cluster id: %s does not exist"
57 | % cluster_id)
58 | return cluster.nodes.create(valid_data.get('vm_type'))
59 |
60 |
61 | class CMClusterAutoScalerSerializer(serializers.Serializer):
62 | id = serializers.CharField(read_only=True)
63 | name = serializers.CharField(allow_blank=True)
64 | cluster = CMClusterSerializer(read_only=True)
65 | vm_type = serializers.CharField()
66 | allowed_vm_type_prefixes = serializers.CharField(allow_null=True, required=False)
67 | zone = serializers.PrimaryKeyRelatedField(queryset=cb_models.Zone.objects.all())
68 | min_nodes = serializers.IntegerField(min_value=0, allow_null=True,
69 | required=False)
70 | max_nodes = serializers.IntegerField(min_value=1, max_value=5000,
71 | allow_null=True, required=False)
72 |
73 | def create(self, valid_data):
74 | cluster_id = self.context['view'].kwargs.get("cluster_pk")
75 | cluster = CloudManAPI.from_request(self.context['request']).clusters.get(cluster_id)
76 | if not cluster:
77 | raise ValidationError("Specified cluster id: %s does not exist"
78 | % cluster_id)
79 | return cluster.autoscalers.create(
80 | valid_data.get('vm_type'),
81 | allowed_vm_type_prefixes=valid_data.get('allowed_vm_type_prefixes'),
82 | name=valid_data.get('name'),
83 | zone=valid_data.get('zone'),
84 | min_nodes=valid_data.get('min_nodes'),
85 | max_nodes=valid_data.get('max_nodes'))
86 |
87 | def update(self, instance, valid_data):
88 | cluster_id = self.context['view'].kwargs.get("cluster_pk")
89 | cluster = CloudManAPI.from_request(self.context['request']).clusters.get(cluster_id)
90 | instance.name = valid_data.get('name') or instance.name
91 | instance.vm_type = valid_data.get('vm_type') or instance.vm_type
92 | instance.allowed_vm_type_prefixes = valid_data.get('allowed_vm_type_prefixes')
93 | instance.min_nodes = (instance.min_nodes if valid_data.get('min_nodes') is None
94 | else valid_data.get('min_nodes'))
95 | instance.max_nodes = (instance.max_nodes if valid_data.get('max_nodes') is None
96 | else valid_data.get('max_nodes'))
97 | instance.zone = valid_data.get('zone') or instance.zone
98 | return cluster.autoscalers.update(instance)
99 |
100 |
101 | # xref: https://prometheus.io/docs/alerting/configuration/#webhook_config
102 | class PrometheusAlertSerializer(serializers.Serializer):
103 | status = serializers.CharField(allow_blank=True, required=False)
104 | labels = serializers.DictField(required=False)
105 | annotations = serializers.DictField(required=False)
106 | startsAt = serializers.CharField(allow_blank=True, required=False)
107 | endsAt = serializers.CharField(allow_blank=True, required=False)
108 | generatorURL = serializers.CharField(allow_blank=True, required=False)
109 |
110 |
111 | # xref: https://prometheus.io/docs/alerting/configuration/#webhook_config
112 | class PrometheusWebHookSerializer(serializers.Serializer):
113 | version = serializers.CharField()
114 | groupKey = serializers.CharField(allow_blank=True, required=False)
115 | receiver = serializers.CharField(allow_blank=True, required=False)
116 | groupLabels = serializers.DictField(required=False)
117 | commonLabels = serializers.DictField(required=False)
118 | commonAnnotations = serializers.DictField(required=False)
119 | externalURL = serializers.CharField(allow_blank=True, required=False)
120 | alerts = serializers.ListField(child=PrometheusAlertSerializer(),
121 | allow_empty=True, required=False)
122 |
--------------------------------------------------------------------------------
/cloudman/helmsman/clients/helm_client.py:
--------------------------------------------------------------------------------
1 | """A wrapper around the helm commandline client"""
2 | import contextlib
3 | import shutil
4 |
5 | from enum import Enum
6 |
7 | from clusterman.clients import helpers
8 |
9 | from helmsman import helpers as hm_helpers
10 |
11 |
12 | class HelmService(object):
13 | """Marker interface for CloudMan services"""
14 | def __init__(self, client):
15 | self._client = client
16 |
17 | def client(self):
18 | return self._client
19 |
20 |
21 | class HelmClient(HelmService):
22 |
23 | def __init__(self):
24 | self._check_environment()
25 | super(HelmClient, self).__init__(self)
26 | self._release_svc = HelmReleaseService(self)
27 | self._repo_svc = HelmRepositoryService(self)
28 | self._repo_chart_svc = HelmRepoChartService(self)
29 |
30 | @staticmethod
31 | def _check_environment():
32 | if not shutil.which("helm"):
33 | raise Exception("Could not find helm executable in path")
34 |
35 | @property
36 | def releases(self):
37 | return self._release_svc
38 |
39 | @property
40 | def repositories(self):
41 | return self._repo_svc
42 |
43 | @property
44 | def repo_charts(self):
45 | return self._repo_chart_svc
46 |
47 |
48 | class HelmValueHandling(Enum):
49 | RESET = 0 # equivalent to --reset-values
50 | REUSE = 1 # equivalent to --reuse-values
51 | DEFAULT = 2 # uses only values passed in
52 |
53 |
54 | class HelmReleaseService(HelmService):
55 |
56 | def __init__(self, client):
57 | super(HelmReleaseService, self).__init__(client)
58 |
59 | def list(self, namespace=None):
60 | cmd = ["helm", "list"]
61 | if namespace:
62 | cmd += ["--namespace", namespace]
63 | else:
64 | cmd += ["--all-namespaces"]
65 | data = helpers.run_list_command(cmd)
66 | return data
67 |
68 | def get(self, namespace, release_name):
69 | return {}
70 |
71 | def _set_values_and_run_command(self, cmd, values_list):
72 | """
73 | Handles helm values by writing values to a temporary file,
74 | after which the command is run. The temporary file is cleaned
75 | up on exit from this method. This allows special values like braces
76 | to be handled without complex escaping, which the helm --set flag
77 | can't handle.
78 |
79 | The values can be a list of values files, in which case they will
80 | all be written to multiple temp files and passed to helm.
81 | """
82 | if not isinstance(values_list, list):
83 | values_list = [values_list]
84 | # contextlib.exitstack allows multiple temp files to be cleaned up
85 | # on exit. ref: https://stackoverflow.com/a/19412700
86 | with contextlib.ExitStack() as stack:
87 | files = [stack.enter_context(hm_helpers.TempValuesFile(values))
88 | for values in values_list]
89 | for file in files:
90 | cmd += ["-f", file.name]
91 | return helpers.run_command(cmd)
92 |
93 | def create(self, chart, namespace, release_name=None,
94 | version=None, values=None):
95 | cmd = ["helm", "install", "--namespace", namespace]
96 |
97 | if release_name:
98 | cmd += [release_name, chart]
99 | else:
100 | cmd += [chart, "--generate-name"]
101 | if version:
102 | cmd += ["--version", version]
103 | return self._set_values_and_run_command(cmd, values)
104 |
105 | def update(self, namespace, release_name, chart, values=None,
106 | value_handling=HelmValueHandling.REUSE, version=None):
107 | """
108 | The chart argument can be either: a chart reference('stable/mariadb'),
109 | a path to a chart directory, a packaged chart, or a fully qualified
110 | URL. For chart references, the latest version will be specified unless
111 | the '--version' flag is set.
112 | """
113 | cmd = ["helm", "upgrade", "--namespace", namespace,
114 | release_name, chart]
115 | if value_handling == value_handling.RESET:
116 | cmd += ["--reset-values"]
117 | elif value_handling == value_handling.REUSE:
118 | cmd += ["--reuse-values"]
119 | else: # value_handling.DEFAULT
120 | pass
121 | if version:
122 | cmd += ["--version", version]
123 | return self._set_values_and_run_command(cmd, values)
124 |
125 | def history(self, namespace, release_name):
126 | data = helpers.run_list_command(
127 | ["helm", "history", "--namespace", namespace, release_name])
128 | return data
129 |
130 | def rollback(self, namespace, release_name, revision=None):
131 | if not revision:
132 | history = self.history(namespace, release_name)
133 | if history and len(history) > 1:
134 | # Rollback to previous
135 | revision = history[-2].get('REVISION')
136 | else:
137 | return
138 | return helpers.run_command(
139 | ["helm", "rollback", "--namespace", namespace,
140 | release_name, revision])
141 |
142 | def delete(self, namespace, release_name):
143 | return helpers.run_command(
144 | ["helm", "delete", "--namespace", namespace, release_name])
145 |
146 | def get_values(self, namespace, release_name, get_all=True):
147 | """
148 | get_all=True will also dump chart default values.
149 | get_all=False will only return user overridden values.
150 | """
151 | cmd = ["helm", "get", "values", "-o", "yaml",
152 | "--namespace", namespace, release_name]
153 | if get_all:
154 | cmd += ["--all"]
155 | return helpers.run_yaml_command(cmd)
156 |
157 | @staticmethod
158 | def parse_chart_name(name):
159 | """
160 | Parses a chart name-version string such as galaxy-cvmfs-csi-1.0.0 and
161 | returns the name portion (e.g. galaxy-cvmfs-csi) only
162 | """
163 | return name.rpartition("-")[0] if name else name
164 |
165 | @staticmethod
166 | def parse_chart_version(name):
167 | """
168 | Parses a chart name-version string such as galaxy-cvmfs-csi-1.0.0 and
169 | returns the version portion (e.g. 1.0.0) only
170 | """
171 | return name.rpartition("-")[2] if name else name
172 |
173 |
174 | class HelmRepositoryService(HelmService):
175 |
176 | def __init__(self, client):
177 | super(HelmRepositoryService, self).__init__(client)
178 |
179 | def list(self):
180 | data = helpers.run_list_command(["helm", "repo", "list"])
181 | return data
182 |
183 | def update(self):
184 | return helpers.run_command(["helm", "repo", "update"])
185 |
186 | def create(self, repo_name, url):
187 | return helpers.run_command(["helm", "repo", "add", repo_name, url])
188 |
189 | def delete(self, repo_name):
190 | return helpers.run_command(["helm", "repo", "remove", repo_name])
191 |
192 |
193 | class HelmRepoChartService(HelmService):
194 |
195 | def __init__(self, client):
196 | super(HelmRepoChartService, self).__init__(client)
197 |
198 | def list(self, chart_name=None, chart_version=None, search_hub=False):
199 | # Perform exact match if chart_name specified.
200 | # https://github.com/helm/helm/issues/3890
201 | data = helpers.run_list_command(
202 | ["helm", "search", "hub" if search_hub else "repo"] +
203 | ["--regexp", "%s\\v" % chart_name] if chart_name else [] +
204 | ["--version", chart_version] if chart_version else [])
205 | return data
206 |
207 | def get(self, chart_name):
208 | return {}
209 |
210 | def find(self, name, version, search_hub=False):
211 | return self.list(chart_name=name, chart_version=version, search_hub=search_hub)
212 |
213 | def create(self, chart_name):
214 | raise Exception("Not implemented")
215 |
216 | def delete(self, release_name):
217 | raise Exception("Not implemented")
218 |
--------------------------------------------------------------------------------
/cloudman/clusterman/views.py:
--------------------------------------------------------------------------------
1 | """CloudMan Create views."""
2 | from django.contrib.auth.models import User
3 |
4 | from rest_framework.authentication import SessionAuthentication, BasicAuthentication
5 | from rest_framework.permissions import IsAuthenticated
6 | from rest_framework import viewsets, mixins
7 |
8 | from djcloudbridge import drf_helpers
9 | from . import serializers
10 | from .api import CloudManAPI
11 | from .api import CMServiceContext
12 | from .models import GlobalSettings
13 |
14 |
15 | class ClusterViewSet(drf_helpers.CustomModelViewSet):
16 | """Returns list of clusters managed by CloudMan."""
17 |
18 | permission_classes = (IsAuthenticated,)
19 | # Required for the Browsable API renderer to have a nice form.
20 | serializer_class = serializers.CMClusterSerializer
21 |
22 | def list_objects(self):
23 | """Get a list of all registered clusters."""
24 | return CloudManAPI.from_request(self.request).clusters.list()
25 |
26 | def get_object(self):
27 | """Get info about a specific cloud."""
28 | return CloudManAPI.from_request(self.request).clusters.get(
29 | self.kwargs["pk"])
30 |
31 |
32 | class ClusterNodeViewSet(drf_helpers.CustomModelViewSet):
33 | """
34 | Returns a list of nodes currently registered with CloudMan.
35 | """
36 | permission_classes = (IsAuthenticated,)
37 | # Required for the Browsable API renderer to have a nice form.
38 | serializer_class = serializers.CMClusterNodeSerializer
39 |
40 | def list_objects(self):
41 | cluster = CloudManAPI.from_request(self.request).clusters.get(
42 | self.kwargs["cluster_pk"])
43 | if cluster:
44 | return cluster.nodes.list()
45 | else:
46 | return []
47 |
48 | def get_object(self):
49 | cluster = CloudManAPI.from_request(self.request).clusters.get(
50 | self.kwargs["cluster_pk"])
51 | if cluster:
52 | return cluster.nodes.get(self.kwargs["pk"])
53 | else:
54 | return None
55 |
56 |
57 | class ClusterAutoScalerViewSet(drf_helpers.CustomModelViewSet):
58 | """
59 | Returns a list of autoscalers currently registered with CloudMan.
60 | """
61 | permission_classes = (IsAuthenticated,)
62 | # Required for the Browsable API renderer to have a nice form.
63 | serializer_class = serializers.CMClusterAutoScalerSerializer
64 |
65 | def list_objects(self):
66 | cluster = CloudManAPI.from_request(self.request).clusters.get(
67 | self.kwargs["cluster_pk"])
68 | if cluster:
69 | return cluster.autoscalers.list()
70 | else:
71 | return []
72 |
73 | def get_object(self):
74 | cluster = CloudManAPI.from_request(self.request).clusters.get(
75 | self.kwargs["cluster_pk"])
76 | if cluster:
77 | return cluster.autoscalers.get(self.kwargs["pk"])
78 | else:
79 | return None
80 |
81 |
82 | class CustomCreateOnlyModelViewSet(drf_helpers.CustomNonModelObjectMixin,
83 | mixins.CreateModelMixin,
84 | viewsets.GenericViewSet):
85 | pass
86 |
87 |
88 | class ClusterScaleUpSignalViewSet(CustomCreateOnlyModelViewSet):
89 | """
90 | Reads and updates AutoScaler fields
91 | Accepts GET, PUT, PATCH methods.
92 | """
93 | serializer_class = serializers.PrometheusWebHookSerializer
94 | permission_classes = (IsAuthenticated,)
95 | authentication_classes = [SessionAuthentication, BasicAuthentication]
96 |
97 | def _process_alert(self, alert):
98 | labels = {}
99 | zone_name = alert.get('labels', {}).get('availability_zone')
100 | if zone_name:
101 | labels['availability_zone'] = zone_name
102 |
103 | vcpus = float(alert.get('annotations', {}).get('cpus') or 0)
104 | if vcpus:
105 | labels['min_vcpus'] = vcpus
106 |
107 | ram = float(alert.get('annotations', {}).get('memory') or 0) / 1024 / 1024 / 1024
108 | if ram:
109 | labels['min_ram'] = ram
110 |
111 | scaling_group = alert.get('labels', {}).get(
112 | 'label_usegalaxy_org_cm_autoscaling_group')
113 | if scaling_group:
114 | labels['usegalaxy.org/cm_autoscaling_group'] = scaling_group
115 |
116 | impersonate = (User.objects.filter(
117 | username=GlobalSettings().settings.autoscale_impersonate).first()
118 | or User.objects.filter(is_superuser=True).first())
119 | cmapi = CloudManAPI(CMServiceContext(user=impersonate))
120 | cluster = cmapi.clusters.get(self.kwargs["cluster_pk"])
121 | if cluster:
122 | return cluster.scaleup(labels=labels)
123 | else:
124 | return None
125 |
126 | def perform_create(self, serializer):
127 | # first, check whether the current user has permissions to
128 | # autoscale
129 | cmapi = CloudManAPI.from_request(self.request)
130 | cmapi.check_permissions('autoscalers.can_autoscale')
131 | # If so, the remaining actions must be carried out as an impersonated user
132 | # whose profile contains the relevant cloud credentials, usually an admin
133 |
134 | alerts = serializer.validated_data.get('alerts', [])
135 |
136 | # pick only one alert per scaling group
137 | alerts_per_group = {}
138 | for alert in alerts:
139 | scaling_group = alert.get('labels', {}).get(
140 | 'label_usegalaxy_org_cm_autoscaling_group')
141 | if scaling_group not in alerts_per_group:
142 | alerts_per_group[scaling_group] = alert
143 |
144 | # dispatch scale up signal for each alert
145 | for alert in alerts_per_group.values():
146 | self._process_alert(alert)
147 |
148 |
149 | class ClusterScaleDownSignalViewSet(CustomCreateOnlyModelViewSet):
150 | """
151 | Reads and updates AutoScaler fields
152 | Accepts GET, PUT, PATCH methods.
153 | """
154 | serializer_class = serializers.PrometheusWebHookSerializer
155 | permission_classes = (IsAuthenticated,)
156 | authentication_classes = [SessionAuthentication, BasicAuthentication]
157 |
158 | def _process_alert(self, alert):
159 | labels = {}
160 | zone_name = alert.get('labels', {}).get('availability_zone')
161 | if zone_name:
162 | labels['availability_zone'] = zone_name
163 |
164 | node_name = alert.get('labels', {}).get(
165 | 'label_usegalaxy_org_cm_node_name')
166 | if node_name:
167 | labels['usegalaxy.org/cm_node_name'] = node_name
168 |
169 | scaling_group = alert.get('labels', {}).get(
170 | 'label_usegalaxy_org_cm_autoscaling_group')
171 | if scaling_group:
172 | labels['usegalaxy.org/cm_autoscaling_group'] = scaling_group
173 |
174 | impersonate = (User.objects.filter(
175 | username=GlobalSettings().settings.autoscale_impersonate).first()
176 | or User.objects.filter(is_superuser=True).first())
177 | cmapi = CloudManAPI(CMServiceContext(user=impersonate))
178 | cluster = cmapi.clusters.get(self.kwargs["cluster_pk"])
179 | if cluster:
180 | return cluster.scaledown(labels=labels)
181 | else:
182 | return None
183 |
184 | def perform_create(self, serializer):
185 | # first, check whether the current user has permissions to
186 | # autoscale
187 | print(f"Scale down signal received...")
188 | cmapi = CloudManAPI.from_request(self.request)
189 | cmapi.check_permissions('autoscalers.can_autoscale')
190 | # If so, the remaining actions must be carried out as an impersonated user
191 | # whose profile contains the relevant cloud credentials, usually an admin
192 |
193 | alerts = serializer.validated_data.get('alerts', [])
194 |
195 | # pick only one alert per scaling group
196 | alerts_per_group = {}
197 | for alert in alerts:
198 | scaling_group = alert.get('labels', {}).get(
199 | 'label_usegalaxy_org_cm_autoscaling_group')
200 | if scaling_group not in alerts_per_group:
201 | alerts_per_group[scaling_group] = alert
202 |
203 | # dispatch scale down signal for each alert
204 | for alert in alerts_per_group.values():
205 | self._process_alert(alert)
206 |
--------------------------------------------------------------------------------
/cloudman/clusterman/cluster_templates.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from rest_framework.exceptions import ValidationError
3 | from cloudlaunch import models as cl_models
4 |
5 | from clusterman.clients.kube_client import KubeClient
6 |
7 |
8 | class CMClusterTemplate(object):
9 |
10 | def __init__(self, context, cluster):
11 | self.context = context
12 | self.cluster = cluster
13 |
14 | @property
15 | def connection_settings(self):
16 | return self.cluster.connection_settings
17 |
18 | @abc.abstractmethod
19 | def add_node(self, name, vm_type=None, zone=None, min_vcpus=0, min_ram=0, vm_family="",
20 | autoscaling_group=None):
21 | pass
22 |
23 | @abc.abstractmethod
24 | def remove_node(self):
25 | pass
26 |
27 | @abc.abstractmethod
28 | def find_matching_node(self, labels=None):
29 | pass
30 |
31 | @abc.abstractmethod
32 | def activate_autoscaling(self, min_nodes=0, max_nodes=None, size=None):
33 | pass
34 |
35 | @abc.abstractmethod
36 | def deactivate_autoscaling(self):
37 | pass
38 |
39 | @staticmethod
40 | def get_template_for(context, cluster):
41 | if cluster.cluster_type == "KUBE_RKE":
42 | return CMRKETemplate(context, cluster)
43 | else:
44 | raise KeyError("Cannon get cluster template for unknown cluster "
45 | "type: %s" % cluster.cluster_type)
46 |
47 |
48 | class CMRKETemplate(CMClusterTemplate):
49 |
50 | def __init__(self, context, cluster):
51 | super(CMRKETemplate, self).__init__(context, cluster)
52 | settings = cluster.connection_settings.get('rke_config')
53 | self._rke_registration_server = settings.get('rke_registration_server')
54 | self._rke_registration_token = settings.get('rke_registration_token')
55 | self._rke_cluster_id = settings.get('rke_cluster_id')
56 | self._kube_cloud_conf = settings.get('kube_cloud_conf')
57 |
58 | @property
59 | def kube_cloud_conf(self):
60 | return self._kube_cloud_conf
61 |
62 | @property
63 | def rke_registration_server(self):
64 | return self._rke_registration_server
65 |
66 | @property
67 | def rke_registration_token(self):
68 | return self._rke_registration_token
69 |
70 | @property
71 | def rke_cluster_id(self):
72 | return self._rke_cluster_id
73 |
74 | def _find_matching_vm_type(self, zone_model=None, default_vm_type=None,
75 | min_vcpus=0, min_ram=0, vm_family=""):
76 | """
77 | Finds the vm_type that best matches the given criteria. If no criteria
78 | is specified, will return the default vm type.
79 |
80 | :param zone_model:
81 | :param default_vm_type:
82 | :param min_vcpus:
83 | :param min_ram:
84 | :param vm_family:
85 | :return:
86 | """
87 | vm_type = default_vm_type or self.cluster.default_vm_type
88 | vm_family = vm_family or ""
89 | if min_vcpus > 0 or min_ram > 0 or not vm_type.startswith(tuple(vm_family.split(","))):
90 | # Add some accommodation for rancher and k8s reserved resources
91 | # https://kubernetes.io/docs/tasks/administer-cluster/reserve-compute-resources/
92 | min_vcpus += 0.5
93 | min_ram *= 1.1
94 |
95 | cloud = self.context.cloudlaunch_client.infrastructure.clouds.get(
96 | zone_model.region.cloud.id)
97 | region = cloud.regions.get(zone_model.region.region_id)
98 | zone = region.zones.get(zone_model.zone_id)
99 | default_matches = zone.vm_types.list(vm_type_prefix=vm_type)
100 | if default_matches:
101 | default_match = default_matches[0]
102 | min_vcpus = min_vcpus if min_vcpus > float(default_match.vcpus) else default_match.vcpus
103 | min_ram = min_ram if min_ram > float(default_match.ram) else default_match.ram
104 | candidates = zone.vm_types.list(min_vcpus=min_vcpus, min_ram=min_ram,
105 | vm_type_prefix=vm_family)
106 | if candidates:
107 | candidate_type = sorted(candidates, key=lambda x: float(x.vcpus) * float(x.ram))[0]
108 | return candidate_type.name
109 | return vm_type
110 |
111 | def add_node(self, name, vm_type=None, zone=None, min_vcpus=0, min_ram=0, vm_family="",
112 | autoscaling_group=None):
113 | settings = self.cluster.connection_settings
114 | zone = zone or self.cluster.default_zone
115 | deployment_target = cl_models.CloudDeploymentTarget.objects.get(
116 | target_zone=zone)
117 | params = {
118 | 'name': name,
119 | 'application': 'cm_rke_kubernetes_plugin',
120 | 'deployment_target_id': deployment_target.id,
121 | 'application_version': '0.1.0',
122 | 'config_app': {
123 | 'action': 'add_node',
124 | 'config_kube_rke': {
125 | 'rke_registration_server': self.rke_registration_server,
126 | 'rke_registration_token': self.rke_registration_token,
127 | 'rke_cluster_id': self.rke_cluster_id,
128 | 'kube_cloud_conf': self.kube_cloud_conf
129 | },
130 | "config_appliance": {
131 | "sshUser": "ubuntu",
132 | "runner": "ansible",
133 | "repository": "https://github.com/CloudVE/cloudman-boot",
134 | "inventoryTemplate":
135 | "[controllers]\n\n"
136 | "[agents]\n"
137 | "${host}\n\n"
138 | "[rke_cluster:children]\n"
139 | "controllers\n"
140 | "agents\n\n"
141 | "[all:vars]\n"
142 | "ansible_ssh_port=22\n"
143 | "ansible_user='${user}'\n"
144 | "ansible_ssh_private_key_file=pk\n"
145 | "ansible_ssh_extra_args='-o StrictHostKeyChecking=no"
146 | " -o ControlMaster=no'\n"
147 | },
148 | 'config_cloudlaunch': (settings.get('app_config', {})
149 | .get('config_cloudlaunch', {})),
150 | 'config_cloudman': {
151 | 'cluster_name': self.cluster.name,
152 | 'autoscaling_group': autoscaling_group
153 | }
154 | }
155 | }
156 |
157 | params['config_app']['config_cloudlaunch']['vmType'] = \
158 | self._find_matching_vm_type(
159 | zone_model=zone, default_vm_type=vm_type, min_vcpus=min_vcpus,
160 | min_ram=min_ram, vm_family=vm_family)
161 | # params['config_app']['config_cloudlaunch']['skip_floating_ip'] = True
162 |
163 | print("Adding node: {0} of type: {1}".format(
164 | name, params['config_app']['config_cloudlaunch']['vmType']))
165 |
166 | # Don't use hostname config
167 | params['config_app']['config_cloudlaunch'].pop('hostnameConfig', None)
168 | try:
169 | print("Launching node with settings: {0}".format(params))
170 | return self.context.cloudlaunch_client.deployments.create(**params)
171 | except Exception as e:
172 | raise ValidationError("Could not launch node: " + str(e))
173 |
174 | def remove_node(self, node):
175 | print(f"Deleting deployment for node: {node.name}")
176 | return self.context.cloudlaunch_client.deployments.tasks.create(
177 | action='DELETE', deployment_pk=node.deployment.pk)
178 |
179 | def find_matching_node(self, labels=None):
180 | labels = labels.copy() if labels else {}
181 | kube_client = KubeClient()
182 | # find the k8s node which matches these labels
183 | k8s_matches = kube_client.nodes.find(labels=labels)
184 | if k8s_matches:
185 | k8s_node = k8s_matches[0]
186 | node_name = k8s_node.get('metadata', {}).get('labels', {}).get(
187 | 'usegalaxy.org/cm_node_name')
188 | # find the corresponding cloudman node
189 | for node in self.cluster.nodes.list():
190 | if node.name == node_name:
191 | return node
192 | return None
193 |
--------------------------------------------------------------------------------
/cloudman/clusterman/plugins/rke_kubernetes_app.py:
--------------------------------------------------------------------------------
1 | """Plugin implementation for a simple web application."""
2 | import tenacity
3 |
4 | from celery.utils.log import get_task_logger
5 |
6 | from cloudlaunch.backend_plugins.base_vm_app import BaseVMAppPlugin
7 | from cloudlaunch.backend_plugins.cloudman2_app import get_iam_handler_for
8 | from cloudlaunch.configurers import AnsibleAppConfigurer
9 |
10 | from clusterman.clients.kube_client import KubeClient
11 |
12 | from rest_framework.serializers import ValidationError
13 |
14 | log = get_task_logger('cloudlaunch')
15 |
16 |
17 | def get_required_val(data, name, message):
18 | val = data.get(name)
19 | if not val:
20 | raise ValidationError({"error": message})
21 | return val
22 |
23 |
24 | class NodeNotDeleted(Exception):
25 | pass
26 |
27 |
28 | class RKEKubernetesApp(BaseVMAppPlugin):
29 | """
30 | RKE Kubernetes Appliance.
31 | """
32 | @staticmethod
33 | def validate_app_config(provider, name, cloud_config, app_config):
34 | rke_config = get_required_val(
35 | app_config, "config_kube_rke", "RKE configuration data"
36 | " must be provided. config_kube_rke entry not found in"
37 | " app_config.")
38 | assert 'rke_cluster_id' in rke_config
39 | assert 'rke_registration_server' in rke_config
40 | assert 'rke_registration_token' in rke_config
41 | return app_config
42 |
43 | def deploy(self, name, task, app_config, provider_config, **kwargs):
44 | """
45 | Handle the app launch process and wait for http.
46 |
47 | Pass boolean ``check_http`` as a ``False`` kwarg if you don't
48 | want this method to perform the app http check and prefer to handle
49 | it in the child class.
50 | """
51 | result = super().deploy(
52 | name, task, app_config, provider_config)
53 | return result
54 |
55 | @tenacity.retry(stop=tenacity.stop_after_attempt(2),
56 | wait=tenacity.wait_fixed(10),
57 | reraise=True,
58 | after=lambda *args: log.debug("Node not deleted yet, checking again..."))
59 | def check_node_no_longer_exists(self, node_name):
60 | # Newly added node should now be registered with the cluster
61 | kube_client = KubeClient()
62 | k8s_node = kube_client.nodes.find(labels={'usegalaxy.org/cm_node_name': node_name})
63 | if not k8s_node:
64 | return True
65 | else:
66 | raise NodeNotDeleted(
67 | f"Deleted node with name: {node_name} still attached to the cluster.")
68 |
69 | @tenacity.retry(stop=tenacity.stop_after_attempt(2),
70 | wait=tenacity.wait_fixed(10),
71 | reraise=True,
72 | after=lambda *args: log.debug("Node not removed, retrying......"))
73 | def delete(self, provider, deployment):
74 | """
75 | Delete resource(s) associated with the supplied deployment.
76 |
77 | This is a blocking call that will wait until the instance is marked
78 | as deleted or disappears from the provider.
79 |
80 | *Note* that this method will delete resource(s) associated with
81 | the deployment - this is an un-recoverable action.
82 | """
83 | deployment_name = deployment.get('name')
84 | try:
85 | kube_client = KubeClient()
86 | k8s_node = kube_client.nodes.find(labels={
87 | 'usegalaxy.org/cm_node_name': deployment_name})
88 | if k8s_node:
89 | k8s_node = k8s_node[0]
90 | try:
91 | # stop new jobs being scheduled on this node
92 | print(f"Cordoning node: {deployment_name}")
93 | kube_client.nodes.cordon(k8s_node)
94 | # let existing jobs finish
95 | print(f"Waiting for jobs to finish on node: {deployment_name}")
96 | kube_client.nodes.wait_till_jobs_complete(k8s_node)
97 | # drain remaining pods
98 | print(f"Draining node: {deployment_name}")
99 | kube_client.nodes.drain(k8s_node, timeout=120)
100 | finally:
101 | # delete the k8s node
102 | print(f"Deleting k8s node: {deployment_name}")
103 | kube_client.nodes.delete(k8s_node)
104 | finally:
105 | # delete the VM
106 | result = super().delete(provider, deployment)
107 | if self.check_node_no_longer_exists(deployment_name):
108 | return result
109 | else:
110 | raise NodeNotDeleted(
111 | f"Node has not been removed from the cluster")
112 |
113 | def _get_configurer(self, app_config):
114 | # CloudMan2 can only be configured with ansible
115 | return RKEKubernetesAnsibleAppConfigurer()
116 |
117 | def _provision_host(self, name, task, app_config, provider_config):
118 | provider = provider_config.get('cloud_provider')
119 | clust_name = app_config.get('config_cloudman', {}).get('cluster_name')
120 |
121 | handler_class = get_iam_handler_for(provider.PROVIDER_ID)
122 | if handler_class:
123 | provider = provider_config.get('cloud_provider')
124 | handler = handler_class(provider, clust_name, app_config)
125 | provider_config['extra_provider_args'] = \
126 | handler.create_iam_policy()
127 | result = super()._provision_host(name, task, app_config, provider_config)
128 | return result
129 |
130 |
131 | class NodeNotRegistered(Exception):
132 | pass
133 |
134 |
135 | class RKEKubernetesAnsibleAppConfigurer(AnsibleAppConfigurer):
136 | """Add CloudMan2 specific vars to playbook."""
137 |
138 | def _cb_provider_id_to_kube_provider_id(self, provider_id):
139 | CB_CLOUD_TO_KUBE_CLOUD_MAP = {
140 | 'aws': 'aws',
141 | 'openstack': 'openstack',
142 | 'azure': 'azure',
143 | 'gcp': 'gce'
144 | }
145 | return CB_CLOUD_TO_KUBE_CLOUD_MAP.get(provider_id)
146 |
147 | @tenacity.retry(stop=tenacity.stop_after_attempt(2),
148 | wait=tenacity.wait_fixed(10),
149 | reraise=True,
150 | after=lambda *args: log.debug("Node not registered yet, checking again..."))
151 | def has_reached_desired_state(self, provider_config):
152 | # Newly added node should now be registered with the cluster
153 | kube_client = KubeClient()
154 | node_ip = provider_config.get(
155 | 'host_config', {}).get('private_ip')
156 | k8s_node = kube_client.nodes.find(address=node_ip)
157 | if k8s_node and k8s_node[0]:
158 | return True
159 | else:
160 | raise NodeNotRegistered(
161 | f"New node with ip: {node_ip} has still not registered with k8s cluster")
162 |
163 | @tenacity.retry(stop=tenacity.stop_after_attempt(2),
164 | wait=tenacity.wait_fixed(10),
165 | reraise=True,
166 | after=lambda *args, **kwargs: log.debug("Node not registered, rerunning playbook..."))
167 | def configure(self, app_config, provider_config):
168 | playbook_vars = {
169 | 'kube_cloud_provider': self._cb_provider_id_to_kube_provider_id(
170 | provider_config.get('cloud_provider').PROVIDER_ID),
171 | 'cluster_hostname': app_config.get('config_kube_rke', {}).get(
172 | 'rke_cluster_id'),
173 | 'rke_registration_server': app_config.get('config_kube_rke', {}).get(
174 | 'rke_registration_server'),
175 | 'kube_cloud_conf': app_config.get('config_kube_rke', {}).get(
176 | 'kube_cloud_conf'),
177 | 'rke_registration_token': app_config.get('config_kube_rke', {}).get(
178 | 'rke_registration_token')
179 | }
180 | result = super().configure(app_config, provider_config,
181 | playbook_vars=playbook_vars)
182 | if self.has_reached_desired_state(provider_config):
183 | kube_client = KubeClient()
184 | node_ip = provider_config.get(
185 | 'host_config', {}).get('private_ip')
186 | k8s_node = kube_client.nodes.find(address=node_ip)[0]
187 | labels = {
188 | 'usegalaxy.org/cm_node_name': app_config.get(
189 | 'deployment_config', {}).get('name', '')
190 | }
191 | autoscaling_group = app_config.get('config_cloudman', {}).get(
192 | 'autoscaling_group', '')
193 | if autoscaling_group:
194 | labels['usegalaxy.org/cm_autoscaling_group'] = autoscaling_group
195 | kube_client.nodes.set_label(k8s_node, labels)
196 | return result
197 | else:
198 | raise NodeNotRegistered(
199 | f"Node has not been added to the cluster")
--------------------------------------------------------------------------------
/cloudman/clusterman/resources.py:
--------------------------------------------------------------------------------
1 | import logging as log
2 |
3 | from djcloudbridge import models as cb_models
4 |
5 | from .cluster_templates import CMClusterTemplate
6 |
7 |
8 | class Cluster(object):
9 |
10 | def __init__(self, service, db_model):
11 | self.db_model = db_model
12 | self.service = service
13 |
14 | @property
15 | def id(self):
16 | return self.db_model.id
17 |
18 | @property
19 | def added(self):
20 | return self.db_model.added
21 |
22 | @property
23 | def updated(self):
24 | return self.db_model.updated
25 |
26 | @property
27 | def name(self):
28 | return self.db_model.name
29 |
30 | @name.setter
31 | def name(self, value):
32 | self.db_model.name = value
33 |
34 | @property
35 | def cluster_type(self):
36 | return self.db_model.cluster_type
37 |
38 | @property
39 | def connection_settings(self):
40 | return self.db_model.connection_settings
41 |
42 | @property
43 | def default_vm_type(self):
44 | return self.db_model.default_vm_type
45 |
46 | @property
47 | def default_zone(self):
48 | return self.db_model.default_zone
49 |
50 | @property
51 | def autoscale(self):
52 | return self.db_model.autoscale
53 |
54 | @autoscale.setter
55 | def autoscale(self, value):
56 | self.db_model.autoscale = bool(value)
57 |
58 | def delete(self):
59 | return self.service.delete(self)
60 |
61 | def get_cluster_template(self):
62 | return CMClusterTemplate.get_template_for(self.service.context, self)
63 |
64 | def _get_default_scaler(self):
65 | return self.autoscalers.get_or_create_default()
66 |
67 | def scaleup(self, labels=None):
68 | print(f"Scale up requested. labels: {labels}")
69 |
70 | if self.autoscale:
71 | matched = False
72 | for scaler in self.autoscalers.list():
73 | if scaler.match(labels=labels):
74 | matched = True
75 | scaler.scaleup(labels=labels)
76 | break
77 | if not matched:
78 | default_scaler = self._get_default_scaler()
79 | requested_group = labels.get("usegalaxy.org/cm_autoscaling_group")
80 | if not requested_group or requested_group == default_scaler.name:
81 | labels["usegalaxy.org/cm_autoscaling_group"] = default_scaler.name
82 | default_scaler.scaleup(labels=labels)
83 | else:
84 | log.debug("Autoscale up signal received but autoscaling is disabled.")
85 |
86 | def scaledown(self, labels=None):
87 | print(f"Scale down requested. labels: {labels}")
88 |
89 | if self.autoscale:
90 | matched = False
91 | for scaler in self.autoscalers.list():
92 | if scaler.match(labels=labels):
93 | matched = True
94 | scaler.scaledown(labels=labels)
95 | break
96 | if not matched:
97 | default_scaler = self._get_default_scaler()
98 | requested_group = labels.get("usegalaxy.org/cm_autoscaling_group")
99 | if not requested_group or requested_group == default_scaler.name:
100 | labels["usegalaxy.org/cm_autoscaling_group"] = default_scaler.name
101 | default_scaler.scaledown(labels=labels)
102 | else:
103 | log.debug("Autoscale down signal received but autoscaling is disabled.")
104 |
105 |
106 | class ClusterAutoScaler(object):
107 | """
108 | This class represents an AutoScaler Group, and is
109 | analogous to a scaling group in AWS.
110 | """
111 |
112 | def __init__(self, service, db_model):
113 | self.db_model = db_model
114 | self.service = service
115 |
116 | @property
117 | def cluster(self):
118 | return self.service.cluster
119 |
120 | @property
121 | def id(self):
122 | return self.db_model.id
123 |
124 | @property
125 | def name(self):
126 | return self.db_model.name
127 |
128 | @name.setter
129 | def name(self, value):
130 | self.db_model.name = value
131 |
132 | @property
133 | def vm_type(self):
134 | return self.db_model.vm_type
135 |
136 | @vm_type.setter
137 | def vm_type(self, value):
138 | self.db_model.vm_type = value
139 |
140 | @property
141 | def allowed_vm_type_prefixes(self):
142 | return self.db_model.allowed_vm_type_prefixes
143 |
144 | @allowed_vm_type_prefixes.setter
145 | def allowed_vm_type_prefixes(self, value):
146 | self.db_model.allowed_vm_type_prefixes = value
147 |
148 | @property
149 | def zone_id(self):
150 | return self.db_model.zone.id
151 |
152 | @property
153 | def zone(self):
154 | return self.db_model.zone
155 |
156 | @zone.setter
157 | def zone(self, value):
158 | self.db_model.zone = value
159 |
160 | @property
161 | def min_nodes(self):
162 | return self.db_model.min_nodes
163 |
164 | @min_nodes.setter
165 | def min_nodes(self, value):
166 | self.db_model.min_nodes = max(int(value), 0)
167 |
168 | @property
169 | def max_nodes(self):
170 | # 5000 being the current k8s node limit
171 | return self.db_model.max_nodes or 5000
172 |
173 | @max_nodes.setter
174 | def max_nodes(self, value):
175 | self.db_model.max_nodes = max(int(value), 0)
176 |
177 | def delete(self):
178 | return self.service.delete(self)
179 |
180 | def match(self, labels=None):
181 | # Currently, a scaling group matches by zone name and node only.
182 | # In future, we could add other criteria, like the scaling group name
183 | # itself, or custom labels to determine whether this scaling group
184 | # matches a scaling signal.
185 | labels = labels.copy() if labels else {}
186 | zone = labels.pop('availability_zone', None)
187 | scaling_group = labels.get('usegalaxy.org/cm_autoscaling_group')
188 | # Ignore these keys anyway
189 | labels.pop('min_vcpus', None)
190 | labels.pop('min_ram', None)
191 | if not zone and not labels:
192 | return False
193 | match = False
194 | if zone:
195 | match = zone == self.db_model.zone.name
196 | if scaling_group:
197 | match = self.name == scaling_group
198 | if labels:
199 | node = self.cluster.nodes.find(labels=labels)
200 | if node:
201 | match = bool(self.db_model.nodegroup.filter(id=node.id)
202 | .first())
203 | return match
204 |
205 | def _filter_stable_nodes(self, nodegroup):
206 | return list(reversed(
207 | [node for node in nodegroup.all()
208 | if node.is_stable()])
209 | )
210 |
211 | def _filter_running_nodes(self, nodegroup):
212 | return list(reversed(
213 | [node for node in nodegroup.all()
214 | if node.is_running()])
215 | )
216 |
217 | def scaleup(self, labels=None):
218 | print(f"Scaling up in group {self.name} with labels: {labels}")
219 | labels = labels or {}
220 | total_node_count = self.db_model.nodegroup.count()
221 | running_nodes = self._filter_running_nodes(self.db_model.nodegroup)
222 | running_count = len(running_nodes)
223 |
224 | # Allow 5 times the number of max nodes to be in a failed state
225 | if running_count < self.max_nodes and total_node_count < (5 * self.max_nodes):
226 | self.cluster.nodes.create(
227 | vm_type=self.vm_type,
228 | min_vcpus=labels.get('min_vcpus', 0),
229 | min_ram=labels.get('min_ram', 0),
230 | zone=self.zone,
231 | autoscaler=self)
232 |
233 | def scaledown(self, labels=None):
234 | print(f"Scaling down in group {self.name} with labels: {labels}")
235 | # If we've got here, we've already matched availability zone
236 | zone = labels.pop('availability_zone', None)
237 | nodes = self._filter_stable_nodes(self.db_model.nodegroup)
238 | node_count = len(nodes)
239 | if node_count > self.min_nodes:
240 | if labels:
241 | matching_node = self.cluster.nodes.find(
242 | labels=labels)
243 | if matching_node and matching_node.is_stable():
244 | print(f"Performing targeted deletion of: {matching_node}")
245 | matching_node.delete()
246 | elif matching_node:
247 | print(f"Node targeted for deletion found {matching_node}"
248 | " but not deleting as another operation is already"
249 | " in progress.")
250 | else:
251 | print(f"Targeted downscale attempted, but matching node"
252 | f" not found with labels: {labels}")
253 | return
254 | else:
255 | # if no host was specified,
256 | # remove the last added node
257 | last_node = nodes[0]
258 | print(f"Non-targeted downscale deleting last launched"
259 | f" node: {last_node}")
260 | node = self.cluster.nodes.get(last_node.id)
261 | node.delete()
262 |
--------------------------------------------------------------------------------