├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── bin ├── README.md ├── build.sh ├── deploy.sh ├── katana ├── slice_creation_time │ ├── Pipfile │ ├── Pipfile.lock │ ├── README.md │ ├── build │ │ ├── lib │ │ │ └── sct │ │ │ │ ├── __init__.py │ │ │ │ └── sct │ │ └── scripts-3.6 │ │ │ └── sct │ ├── sct.egg-info │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ ├── requires.txt │ │ └── top_level.txt │ ├── sct │ │ ├── __init__.py │ │ └── sct │ └── setup.py ├── stop.sh └── uninstall.sh ├── docker-compose.yaml ├── jenkins ├── cd │ ├── Jenkinsfile │ └── Jenkinsfile.kill └── ci │ ├── Jenkinsfile │ └── tests │ └── functional │ ├── initial_test.sh │ └── list_of_slices.sh ├── katana-alertmanager ├── Dockerfile ├── README.md └── alertmanager.yml ├── katana-apex ├── .classpath ├── .project ├── .pydevproject ├── .settings │ ├── org.eclipse.core.resources.prefs │ └── org.eclipse.jdt.core.prefs ├── Dockerfile ├── config │ └── config.json ├── events │ └── trigger-event.json ├── logic │ ├── SM_Alert_Policy_SSL.js │ ├── SM_Alert_TL.js │ ├── State_One_TL.js │ ├── State_Three_TL.js │ └── State_Two_TL.js ├── logs │ └── logback.xml ├── policy-apex │ └── Policy.apex ├── policy-json │ └── README.md ├── run.bat ├── scripts │ ├── REST_Tester.py │ ├── start.sh │ └── startTest.sh └── tosca-template │ └── ToscaTemplate.json ├── katana-cli ├── .dockerignore ├── Dockerfile ├── README.md ├── cli │ ├── cli.py │ └── commands │ │ ├── cmd_base_slice_des.py │ │ ├── cmd_bootstrap.py │ │ ├── cmd_ems.py │ │ ├── cmd_function.py │ │ ├── cmd_gst.py │ │ ├── cmd_location.py │ │ ├── cmd_nfvo.py │ │ ├── cmd_ns.py │ │ ├── cmd_policy.py │ │ ├── cmd_resources.py │ │ ├── cmd_slice.py │ │ ├── cmd_vim.py │ │ └── cmd_wim.py ├── requirements.txt └── setup.py ├── katana-grafana ├── .env ├── Dockerfile ├── README.md ├── prometheus.yml └── templates │ ├── new_dashboard.json │ ├── new_ns_status_panel.json │ ├── new_vm_monitoring_panel.json │ └── new_wim_panel.json ├── katana-mngr ├── .env ├── Dockerfile ├── README.md ├── katana │ ├── katana-mngr.py │ ├── shared_utils │ │ ├── emsUtils │ │ │ ├── amar_emsUtils.py │ │ │ ├── open5gs_emsUtils.py │ │ │ └── test_emsUtils.py │ │ ├── kafkaUtils │ │ │ └── kafkaUtils.py │ │ ├── mongoUtils │ │ │ ├── README.md │ │ │ └── mongoUtils.py │ │ ├── nfvoUtils │ │ │ └── osmUtils.py │ │ ├── policyUtils │ │ │ ├── neatUtils.py │ │ │ └── test_policyUtils.py │ │ ├── sliceUtils │ │ │ └── sliceUtils.py │ │ ├── vimUtils │ │ │ ├── kubernetesUtils.py │ │ │ ├── opennebulaUtils.py │ │ │ └── openstackUtils.py │ │ └── wimUtils │ │ │ ├── odl_wimUtils.py │ │ │ └── test_wimUtils.py │ └── utils │ │ ├── README.md │ │ └── sliceUtils │ │ └── sliceUtils.py └── requirements.txt ├── katana-nbi ├── .dockerignore ├── Dockerfile ├── README.md ├── config │ └── settings.py ├── katana │ ├── api │ │ ├── alerts.py │ │ ├── bootstrap.py │ │ ├── ems.py │ │ ├── function.py │ │ ├── gst.py │ │ ├── locations.py │ │ ├── nfvo.py │ │ ├── nslist.py │ │ ├── policy.py │ │ ├── resource.py │ │ ├── slice.py │ │ ├── slice_des.py │ │ ├── vim.py │ │ └── wim.py │ ├── app.py │ ├── shared_utils │ │ ├── emsUtils │ │ │ ├── amar_emsUtils.py │ │ │ ├── open5gs_emsUtils.py │ │ │ └── test_emsUtils.py │ │ ├── kafkaUtils │ │ │ └── kafkaUtils.py │ │ ├── mongoUtils │ │ │ ├── README.md │ │ │ └── mongoUtils.py │ │ ├── nfvoUtils │ │ │ └── osmUtils.py │ │ ├── policyUtils │ │ │ ├── neatUtils.py │ │ │ └── test_policyUtils.py │ │ ├── sliceUtils │ │ │ └── sliceUtils.py │ │ ├── vimUtils │ │ │ ├── kubernetesUtils.py │ │ │ ├── opennebulaUtils.py │ │ │ └── openstackUtils.py │ │ └── wimUtils │ │ │ ├── odl_wimUtils.py │ │ │ └── test_wimUtils.py │ ├── slice_mapping │ │ └── slice_mapping.py │ └── utils │ │ └── README.md └── requirements.txt ├── katana-nfv_mon ├── .env ├── Dockerfile ├── README.md ├── katana │ ├── dashboards │ │ └── katana.json │ ├── exporter.py │ └── utils │ │ ├── kafkaUtils │ │ └── kafkaUtils.py │ │ ├── mongoUtils │ │ ├── README.md │ │ └── mongoUtils.py │ │ ├── nfvoUtils │ │ └── osmUtils.py │ │ └── threadingUtis │ │ └── threadingUtils.py └── requirements.txt ├── katana-prometheus ├── Dockerfile ├── README.md ├── alerts.yml ├── prometheus.yml ├── vim_targets.json └── wim_targets.json ├── katana-swagger ├── Dockerfile ├── README.md ├── fixIP.sh ├── fixVersion.sh └── swagger.json └── templates ├── README.md ├── components ├── ems_model.json ├── nfvo_model.json ├── pdu_model.json ├── policy_model.json ├── vim_model.json └── wim_model.json ├── example_config_files ├── Functions │ ├── example_demo5gcore.json │ └── example_demo5ggnb.json ├── SB_components │ ├── example_ems.json │ ├── example_osm8.json │ ├── example_policy.json │ ├── example_vim_core.json │ ├── example_vim_edge_cosmote.json │ ├── example_vim_edge_minilab.json │ └── example_wim.json ├── bootstrap.json ├── location │ └── example_group0_edge.json └── nest │ ├── example_group0_nest.json │ └── example_group0_nest_fw.json ├── gst_model.json ├── images ├── SlicingOptions.png └── katana-logo.svg ├── nsi_model.json ├── sbi-messages ├── ems_message_model.json └── wim_message_model.json └── supported_functions_model.json /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | # build/ 12 | develop-eggs/ 13 | # dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | # lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | # var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | # *.egg-info/ 26 | .installed.cfg 27 | # *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | local_settings.py 60 | db.sqlite3 61 | db.sqlite3-journal 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # IPython 80 | profile_default/ 81 | ipython_config.py 82 | 83 | # pyenv 84 | .python-version 85 | 86 | # pipenv 87 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 88 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 89 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 90 | # install all needed dependencies. 91 | #Pipfile.lock 92 | 93 | # celery beat schedule file 94 | celerybeat-schedule 95 | 96 | # SageMath parsed files 97 | *.sage.py 98 | 99 | # Environments 100 | # .env 101 | .venv 102 | env/ 103 | venv/ 104 | ENV/ 105 | env.bak/ 106 | venv.bak/ 107 | 108 | # Spyder project settings 109 | .spyderproject 110 | .spyproject 111 | 112 | # Rope project settings 113 | .ropeproject 114 | 115 | # mkdocs documentation 116 | /site 117 | 118 | # mypy 119 | .mypy_cache/ 120 | .dmypy.json 121 | dmypy.json 122 | 123 | # Pyre type checker 124 | .pyre/ 125 | 126 | # Zookeeper - Kafka Directory 127 | zk-kafka/ 128 | 129 | # dev specific files 130 | .vscode 131 | pyright* 132 | dev/ 133 | development/dev_shared_files 134 | development/dev_config_files 135 | katana-prometheus/wim_targets.json 136 | 137 | # log files 138 | *.log.* -------------------------------------------------------------------------------- /bin/README.md: -------------------------------------------------------------------------------- 1 | # Binary Files and Scripts 2 | 3 | This directory contains scripts that automate the deployment process of Katana Slice Manager 4 | 5 | - __build.sh:__ Build the Docker Images based on the specified Docker registry, repository, user, and release tag. Additionally, the Katana CLI tool will be installed. 6 | - __deploy.sh:__ Deploy the Katana services 7 | - __stop.sh:__ Stop all the running Katana services 8 | - __uninstall.sh:__ Remove the Docker resources and installed tool related to Katana 9 | - __slice_creation_time:__ Python based tool that can be used for measuring the slice creation time. It can be installed using pip. More details in the tools [README file](slice_creation_time/README.md) 10 | -------------------------------------------------------------------------------- /bin/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for help option 4 | if [[ " $* " =~ " -h " ]] || [[ " $* " =~ " --help " ]]; 5 | then 6 | printf "Usage:\n\tbuild.sh [-r | --release ] [--docker_reg ] [--docker_repo ]\n\t\t [--docker_reg_user ] [--docker_reg_passwd ] [--push] [--dev] [-h | --help]\nOptions: 7 | \t[-r | --release ] : Define the release that will match the Docker Tag of Katana Docker images (Default: :test). 8 | \t[--docker_reg ] : Define the remote Docker registry. If no docker registry is specified, Katana will try to use the public Docker hub 9 | \t[--docker_repo ] : Define the Docker repository 10 | \t[--docker_reg_user ] : Define the user of the remote Docker registry 11 | \t[--docker_reg_passwd ] : Define the password for the user of the remote Docker registry 12 | \t[--push] : Push the images to the remote Docker registry 13 | \t[--dev] : Create a dev workspace for development purposes 14 | \t[-h | --help] : Print this message and exit\n" 15 | exit 0 16 | fi 17 | 18 | # Get the project directory 19 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && cd .. && pwd )" 20 | 21 | # Check if the user and release fare defined 22 | while [[ $# -gt 0 ]] 23 | do 24 | key=$1 25 | 26 | case $key in 27 | -r | --release) 28 | export DOCKER_TAG="$2" 29 | shift 30 | shift 31 | ;; 32 | --docker_reg) 33 | export DOCKER_REG="$2/" 34 | shift 35 | shift 36 | ;; 37 | --docker_repo) 38 | export DOCKER_REPO="$2/" 39 | shift 40 | shift 41 | ;; 42 | --docker_reg_user) 43 | export DOCKER_REG_USER="$2" 44 | shift 45 | shift 46 | ;; 47 | --docker_reg_passwd) 48 | export DOCKER_REG_PASSWD="$2" 49 | shift 50 | shift 51 | ;; 52 | --push) 53 | PUSH_IMAGE=true 54 | shift 55 | ;; 56 | --dev) 57 | # *** Install development environment *** 58 | echo "Installing development environment" 59 | # Copy hard links of the shared utils in katana-mngr and katana-nbi 60 | read -r -p "Any dev/dev_shared_utils will be lost. Continue? (Y/n) > " ans 61 | if [[ $ans =~ ^n.* ]]; 62 | then 63 | exit 9999 64 | fi 65 | echo "Creating dev/dev_shared_utils. They are hard-linked to both common files in katana-mngr and katana-nbi directories" 66 | mkdir -p "${DIR}/dev/dev_config_files" &> /dev/null && echo "Created dev folder" 67 | cp -r "${DIR}/templates/example_config_files/"* "${DIR}/dev/dev_config_files/" 68 | echo "Creating dev/dev_config_files. They can be used for actual testing. They won't be pushed to remote repository" 69 | rm -rf "${DIR}/katana-nbi/katana/shared_utils" &> /dev/null 70 | rm -rf "${DIR}/dev/dev_shared_utils" &> /dev/null 71 | cp -al "${DIR}/katana-mngr/katana/shared_utils" dev/dev_shared_utils &> /dev/null 72 | cp -al "${DIR}/katana-mngr/katana/shared_utils" katana-nbi/katana/ &> /dev/null 73 | exit 74 | ;; 75 | *) 76 | printf "Wrong option %s\nUse the --help option\n--------\n" "${key}" 77 | exit 1 78 | ;; 79 | esac 80 | done 81 | 82 | # Check if the Docker user and passwd are set. If yes, login to docker registry 83 | if [[ ! -z ${DOCKER_REG_USER+x} && ! -z ${DOCKER_REG_PASSWD+x} ]]; then 84 | docker login -u ${DOCKER_REG_USER} -p ${DOCKER_REG_PASSWD} ${DOCKER_REG} 85 | fi 86 | 87 | # Check if the docker tag is set. Otherwise set it to test 88 | if [[ -z ${DOCKER_TAG+x} ]]; then 89 | export DOCKER_TAG="test" 90 | fi 91 | 92 | # Build the images 93 | printf "********************************\n" 94 | printf "**** Building Katana images ****\n" 95 | printf "********************************\n" 96 | docker-compose -f ${DIR}/docker-compose.yaml build 97 | if [[ "${PUSH_IMAGE}" == true ]]; then 98 | printf "*************************\n" 99 | printf "**** Pushing Images ****\n" 100 | printf "*************************\n" 101 | docker-compose -f ${DIR}/docker-compose.yaml push 102 | fi 103 | 104 | # Install the katana command 105 | printf "***********************************\n" 106 | printf "**** Installing Katana Command ****\n" 107 | printf "***********************************\n" 108 | command -v katana &> /dev/null || sudo cp ${DIR}/bin/katana /usr/local/bin/ -------------------------------------------------------------------------------- /bin/katana: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | POSITIONAL=() 4 | while [[ $# -gt 0 ]] 5 | do 6 | key="$1" 7 | dock_copy=false 8 | 9 | case $key in 10 | -f|--file) 11 | FILE="$2" 12 | POSITIONAL+=($1) # save it in an array for later 13 | shift # past argument 14 | shift # past value 15 | dock_copy=true 16 | ;; 17 | logs) 18 | if [[ $2 == "-l" ]] || [[ $2 == "--limit" ]]; 19 | then 20 | limit=$3 21 | else 22 | limit="all" 23 | fi 24 | printf "\n\t\t\t%s\n\t\t\t%s\n\t\t\t%s\n" "+----------------------------+" "| katana-mngr logs |" "+----------------------------+" 25 | docker logs --tail $limit katana-mngr 26 | printf "\n\t\t\t%s\n\t\t\t%s\n\t\t\t%s\n" "+---------------------------+" "| katana-nbi logs |" "+---------------------------+" 27 | docker logs --tail $limit katana-nbi 28 | printf "\n" 29 | exit 0 30 | ;; 31 | *) # unknown option 32 | POSITIONAL+=($1) # save it in an array for later 33 | shift # past argument 34 | ;; 35 | esac 36 | done 37 | set -- "${POSITIONAL[@]}" # restore positional parameters 38 | 39 | if [ "$dock_copy" = true ]; then 40 | docker cp "${FILE}" katana-cli:/katana-cli/"$file_name" 41 | file_name=${FILE##*/} 42 | docker container exec -it katana-cli katana "${POSITIONAL[@]}" "$file_name" 43 | else 44 | docker container exec -it katana-cli katana "${POSITIONAL[@]}" 45 | fi -------------------------------------------------------------------------------- /bin/slice_creation_time/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | name = "pypi" 3 | url = "https://pypi.org/simple" 4 | verify_ssl = true 5 | 6 | [dev-packages] 7 | pycodestyle = "*" 8 | black = "*" 9 | 10 | [packages] 11 | requests = "*" 12 | influxdb = "*" 13 | 14 | [requires] 15 | python_version = "3.6" 16 | 17 | [pipenv] 18 | allow_prereleases = true 19 | -------------------------------------------------------------------------------- /bin/slice_creation_time/README.md: -------------------------------------------------------------------------------- 1 | # Service Creation Time 2 | 3 | Script that creates slice based on the provided NEST, gets the Service Creation Time measurements and 4 | exports that to an InfluxDB and to a CSV file 5 | 6 | ## Installation 7 | 8 | ```bash 9 | pip3 install . 10 | ``` 11 | 12 | ## Usage 13 | 14 | ```bash 15 | sct --url URL --nest NEST --iterations I --database DB_URL --csv CSV 16 | ``` 17 | 18 | ### Example 19 | 20 | ```bash 21 | sct --url 10.30.0.180:8000 \ 22 | --iterations 25 \ 23 | --csv output.csv \ 24 | --nest ~/nest_4g_embb.json \ 25 | --database user:password@10.30.0.238:8086/service_creation_time 26 | ``` 27 | 28 | ### Options 29 | 30 | #### --url | -u (Required) 31 | 32 | The url of Katana Slice Manaer that will be used. It must be in the form: `host_ip:port` 33 | 34 | #### --nest | -n (Required) 35 | 36 | The NEST JSON file that will be used for the created slice 37 | 38 | #### --iterations | -i (Optional) 39 | 40 | The number of the iterations. By default it is 25 41 | 42 | #### --database | -d (Optional) 43 | 44 | The URL of the Influx DB where the results will be exported. It must be in the form: `username:password@host:port/db` 45 | 46 | > If no db url is provided, the results will be exported by default on a CSV file 47 | 48 | #### --csv | -c (Optional) 49 | 50 | The CSV file where the results will be exported 51 | 52 | > If neither csv file nor database URL is provided, the results will be exported by default at ./output.csv 53 | -------------------------------------------------------------------------------- /bin/slice_creation_time/build/lib/sct/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/medianetlab/katana-slice_manager/2e7a14a41fc85bd7188d71ef9beaf51acc94015c/bin/slice_creation_time/build/lib/sct/__init__.py -------------------------------------------------------------------------------- /bin/slice_creation_time/sct.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: sct 3 | Version: 0.1.0 4 | Summary: Automation script for Service Creation Time measurements 5 | Home-page: https://github.com/medianetlab/5genesis_experimentation/tree/master/slice_creation_time 6 | Author: Themis Anagnostopoulos 7 | Author-email: them.anagno@gmail.com 8 | License: MIT 9 | Description: 10 | # Service Creation Time 11 | 12 | Script that creates slice based on the provided NEST, gets the Service Creation Time measurements and 13 | exports that to an InfluxDB and to a CSV file 14 | 15 | ## Installation 16 | 17 | ```bash 18 | pip3 install . 19 | ``` 20 | 21 | ## Usage 22 | 23 | ```bash 24 | sct --url URL --nest NEST --iterations I --database DB_URL --csv CSV 25 | ``` 26 | 27 | ### Options 28 | 29 | #### --url | -u (Required) 30 | 31 | The url of Katana Slice Manaer that will be used. It must be in the form: `host_ip:port` 32 | 33 | #### --nest | -n (Required) 34 | 35 | The NEST JSON file that will be used for the created slice 36 | 37 | #### --iterations | -i (Optional) 38 | 39 | The number of the iterations. By default it is 25 40 | 41 | #### --database | -d (Optional) 42 | 43 | The URL of the Influx DB where the results will be exported. It must be in the form: `username:password@host:port/db` 44 | 45 | > If no db url is provided, the results will be exported by default on a CSV file 46 | 47 | #### --csv | -c (Optional) 48 | 49 | The CSV file where the results will be exported 50 | 51 | > If neither csv file nor database URL is provided, the results will be exported by default at ./output.csv 52 | 53 | Platform: UNKNOWN 54 | Classifier: License :: OSI Approved :: MIT License 55 | Classifier: Programming Language :: Python 56 | Classifier: Programming Language :: Python :: 3 57 | Classifier: Programming Language :: Python :: 3.6 58 | Classifier: Programming Language :: Python :: Implementation :: CPython 59 | Classifier: Programming Language :: Python :: Implementation :: PyPy 60 | Requires-Python: >=3.6.0 61 | Description-Content-Type: text/markdown 62 | -------------------------------------------------------------------------------- /bin/slice_creation_time/sct.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | setup.py 3 | sct/__init__.py 4 | sct/sct 5 | sct.egg-info/PKG-INFO 6 | sct.egg-info/SOURCES.txt 7 | sct.egg-info/dependency_links.txt 8 | sct.egg-info/requires.txt 9 | sct.egg-info/top_level.txt -------------------------------------------------------------------------------- /bin/slice_creation_time/sct.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /bin/slice_creation_time/sct.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | requests 2 | influxdb 3 | -------------------------------------------------------------------------------- /bin/slice_creation_time/sct.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | sct 2 | -------------------------------------------------------------------------------- /bin/slice_creation_time/sct/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/medianetlab/katana-slice_manager/2e7a14a41fc85bd7188d71ef9beaf51acc94015c/bin/slice_creation_time/sct/__init__.py -------------------------------------------------------------------------------- /bin/slice_creation_time/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | # Note: To use the 'upload' functionality of this file, you must: 5 | # $ pipenv install twine --dev 6 | 7 | import io 8 | import os 9 | import sys 10 | from shutil import rmtree 11 | 12 | from setuptools import find_packages, setup, Command 13 | 14 | # Package meta-data. 15 | NAME = "sct" 16 | DESCRIPTION = "Automation script for Service Creation Time measurements" 17 | URL = "https://github.com/medianetlab/5genesis_experimentation/tree/master/slice_creation_time" 18 | EMAIL = "them.anagno@gmail.com" 19 | AUTHOR = "Themis Anagnostopoulos" 20 | REQUIRES_PYTHON = ">=3.6.0" 21 | VERSION = "0.1.0" 22 | 23 | # What packages are required for this module to be executed? 24 | REQUIRED = ["requests", "influxdb"] 25 | 26 | # What packages are optional? 27 | EXTRAS = {} 28 | 29 | # The rest you shouldn't have to touch too much :) 30 | # ------------------------------------------------ 31 | # Except, perhaps the License and Trove Classifiers! 32 | # If you do change the License, remember to change the Trove Classifier for that! 33 | 34 | here = os.path.abspath(os.path.dirname(__file__)) 35 | 36 | # Import the README and use it as the long-description. 37 | # Note: this will only work if 'README.md' is present in your MANIFEST.in file! 38 | try: 39 | with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f: 40 | long_description = "\n" + f.read() 41 | except FileNotFoundError: 42 | long_description = DESCRIPTION 43 | 44 | # Load the package's __version__.py module as a dictionary. 45 | about = {} 46 | if not VERSION: 47 | project_slug = NAME.lower().replace("-", "_").replace(" ", "_") 48 | with open(os.path.join(here, project_slug, "__version__.py")) as f: 49 | exec(f.read(), about) 50 | else: 51 | about["__version__"] = VERSION 52 | 53 | 54 | class UploadCommand(Command): 55 | """Support setup.py upload.""" 56 | 57 | description = "Build and publish the package." 58 | user_options = [] 59 | 60 | @staticmethod 61 | def status(s): 62 | """Prints things in bold.""" 63 | print(f"\033[1m{s}\033[0m") 64 | 65 | def initialize_options(self): 66 | pass 67 | 68 | def finalize_options(self): 69 | pass 70 | 71 | def run(self): 72 | try: 73 | self.status("Removing previous builds…") 74 | rmtree(os.path.join(here, "dist")) 75 | except OSError: 76 | pass 77 | 78 | self.status("Building Source and Wheel (universal) distribution…") 79 | os.system(f"{sys.executable} setup.py sdist bdist_wheel --universal") 80 | 81 | self.status("Uploading the package to PyPI via Twine…") 82 | os.system("twine upload dist/*") 83 | 84 | self.status("Pushing git tags…") 85 | os.system("git tag v{0}".format(about["__version__"])) 86 | os.system("git push --tags") 87 | 88 | sys.exit() 89 | 90 | 91 | # Where the magic happens: 92 | setup( 93 | name=NAME, 94 | version=about["__version__"], 95 | description=DESCRIPTION, 96 | long_description=long_description, 97 | long_description_content_type="text/markdown", 98 | author=AUTHOR, 99 | author_email=EMAIL, 100 | python_requires=REQUIRES_PYTHON, 101 | url=URL, 102 | packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]), 103 | # If your package is a single module, use this instead of 'packages': 104 | # py_modules=['mypackage'], 105 | # entry_points={ 106 | # 'console_scripts': ['mycli=mymodule:cli'], 107 | # }, 108 | scripts=["sct/sct"], 109 | install_requires=REQUIRED, 110 | extras_require=EXTRAS, 111 | include_package_data=True, 112 | license="MIT", 113 | classifiers=[ 114 | # Trove classifiers 115 | # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 116 | "License :: OSI Approved :: MIT License", 117 | "Programming Language :: Python", 118 | "Programming Language :: Python :: 3", 119 | "Programming Language :: Python :: 3.6", 120 | "Programming Language :: Python :: Implementation :: CPython", 121 | "Programming Language :: Python :: Implementation :: PyPy", 122 | ], 123 | # $ setup.py publish support. 124 | cmdclass={"upload": UploadCommand}, 125 | ) 126 | -------------------------------------------------------------------------------- /bin/stop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check for help option 4 | if [[ " $* " =~ " -h " ]] || [[ " $* " =~ " --help " ]]; 5 | then 6 | printf "Usage:\n\tstop.sh [-c | --clear] [-h | --help]\nOptions: 7 | \t[-c | --clear] : Remove the container volumes 8 | \t[-h | --help] : Print this message and quit\n" 9 | exit 0 10 | fi 11 | 12 | # Get the options 13 | while [[ $# -gt 0 ]] 14 | do 15 | key=$1 16 | case $key in 17 | -c | --clear) 18 | options="$options -v" 19 | shift 20 | ;; 21 | *) 22 | printf "Wrong option %s\n--------\n" "${key}" 23 | printf "Usage:\n\tstop.sh [-c | --clear] [-h | --help]\nOptions: 24 | \t[-c | --clear] : Remove the container volumes 25 | \t[-h | --help] : Print this message and quit\n" 26 | exit 0 27 | ;; 28 | esac 29 | done 30 | 31 | # Get the project directory 32 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && cd .. && pwd )" 33 | 34 | # Avoid warning messages 35 | export DOCKER_TAG="" 36 | export DOCKER_REG="" 37 | export DOCKER_REG_USER="" 38 | export DOCKER_REG_PASSWD="" 39 | export DOCKER_REPO="" 40 | export KATANA_MONITORING="" 41 | export KATANA_HOST="" 42 | export APEX="" 43 | 44 | # Check if there are any running slices 45 | running_slices=$(curl -s http://localhost:8000/api/slice) 46 | if [[ ${running_slices} != "[]" && ${running_slices} != "" ]]; then 47 | read -rp "There are running slices. Are you sure you want to stop the SM? [y/N] > " ans 48 | if [[ ${ans} != "y" ]]; then 49 | exit 1 50 | fi 51 | fi 52 | 53 | # Stop the containers 54 | docker-compose -f ${DIR}/docker-compose.yaml down ${options} 55 | 56 | # Remove the katana-log files 57 | rm -f katana-nbi/katana.log* katana-mngr/katana.log* 58 | -------------------------------------------------------------------------------- /bin/uninstall.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Uninstall katana processes 4 | 5 | # Get the project directory 6 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && cd .. && pwd )" 7 | 8 | # Stop all the containers and remove the db 9 | bash ${DIR}/bin/stop.sh -c 10 | 11 | # Remove the docker images 12 | docker rmi -f $(docker images | grep katana | awk '{print $3}') &> /dev/null 13 | 14 | # Remove katana commands 15 | sudo rm /usr/local/bin/katana || true 16 | -------------------------------------------------------------------------------- /jenkins/cd/Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | node { 4 | label "${prod_server}" 5 | } 6 | } 7 | 8 | environment { 9 | MAJOR_RELEASE="${sh(script:'git fetch --tags && git tag --sort version:refname | tail -1', returnStdout: true).trim()}" 10 | } 11 | 12 | stages { 13 | // ******************************** 14 | // *** Deploy The Latest Images *** 15 | // ******************************** 16 | stage("deploy_latest_in_production") { 17 | steps{ 18 | withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'katana-reg-creds', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]){ 19 | echo "***** Deploying new version ${MAJOR_RELEASE} *****" 20 | sh 'echo ${prod_server_ip} | bash bin/deploy.sh -m -p --release latest --docker_reg "${katana_reg}" --docker_repo "${katana_repo}" --docker_reg_user ${USERNAME} --docker_reg_passwd ${PASSWORD} --no_build' 21 | } 22 | } 23 | } 24 | } 25 | 26 | post{ 27 | failure{ 28 | slackSend (color: "#FF0000", message: "Failed to deploy Katana version ${MAJOR_RELEASE} on production environment: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})") 29 | } 30 | 31 | success{ 32 | slackSend (color: "#008000", message: "Successfully deployed Katana version ${MAJOR_RELEASE} on production environment : '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})") 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /jenkins/cd/Jenkinsfile.kill: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | node { 4 | label "${prod_server}" 5 | } 6 | } 7 | 8 | stages { 9 | // ****************************** 10 | // *** Kill Katana Containers *** 11 | // ****************************** 12 | stage("kill_katana_containers") { 13 | steps{ 14 | sh 'docker container rm -f $(docker ps --filter "name=katana" -q)' 15 | } 16 | } 17 | 18 | // ***************************** 19 | // *** Remove Katana Volumes *** 20 | // ***************************** 21 | stage("remove_katana_volumes") { 22 | steps{ 23 | sh 'docker volume rm $(docker volume ls --filter name=katana -q)' 24 | } 25 | } 26 | } 27 | } -------------------------------------------------------------------------------- /jenkins/ci/Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | node { 4 | label "${test_server}" 5 | } 6 | } 7 | 8 | environment { 9 | MAJOR_RELEASE="${sh(script:'git fetch --tags && git tag --sort version:refname | tail -1', returnStdout: true).trim()}" 10 | TAG_NUMBER="${MAJOR_RELEASE}.${env.BUILD_NUMBER}" 11 | } 12 | 13 | stages { 14 | // ************************* 15 | // *** Build Test Images *** 16 | // ************************* 17 | stage("build_test_images") { 18 | steps{ 19 | echo "**** Building Test Katana Images ****" 20 | sh 'bash bin/build.sh' 21 | } 22 | } 23 | 24 | // ****************************** 25 | // *** Test Katana Deployment *** 26 | // ****************************** 27 | stage("test_deployment") { 28 | steps{ 29 | echo "**** Test Katana Deployment ****" 30 | sh 'bash bin/deploy.sh' 31 | } 32 | } 33 | 34 | // ************************ 35 | // *** Functional Tests *** 36 | // ************************ 37 | stage("functional_tests"){ 38 | steps{ 39 | echo "**** Running Functional Test ****" 40 | sh 'bash jenkins/ci/tests/functional/list_of_slices.sh' 41 | } 42 | } 43 | 44 | // ****************************** 45 | // *** Remove Test Deployment *** 46 | // ****************************** 47 | stage("remove_test_deployment") { 48 | steps{ 49 | echo "**** Remove Test Katana Deployment ****" 50 | sh 'bash bin/stop.sh' 51 | } 52 | } 53 | 54 | // ************************ 55 | // *** Clean Everything *** 56 | // ************************ 57 | stage("clean_everything") { 58 | steps{ 59 | echo "**** Clean Everything ****" 60 | sh 'bash bin/uninstall.sh' 61 | } 62 | } 63 | 64 | // ***************************** 65 | // *** Push Images If Master *** 66 | // ***************************** 67 | stage("push_images") { 68 | when { 69 | anyOf { 70 | allOf { 71 | changeset "**/katana-**/**" 72 | environment name: "GIT_BRANCH", value: "origin/master" 73 | } 74 | expression { return "${force_push_images}" == "true" } 75 | } 76 | } 77 | steps{ 78 | withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'katana-reg-creds', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]){ 79 | echo "**** Build & Push Images ****" 80 | sh 'bash bin/build.sh --release ${TAG_NUMBER} --docker_reg "${katana_reg}" --docker_repo "${katana_repo}" --docker_reg_user ${USERNAME} --docker_reg_passwd ${PASSWORD} --push' 81 | sh 'bash bin/build.sh --release latest --docker_reg "${katana_reg}" --docker_repo "${katana_repo}" --docker_reg_user ${USERNAME} --docker_reg_passwd ${PASSWORD} --push' 82 | } 83 | } 84 | } 85 | 86 | // ******************************** 87 | // *** Deploy Staging If Master *** 88 | // ******************************** 89 | stage("deploy_latest") { 90 | when { 91 | anyOf { 92 | allOf { 93 | changeset "**/katana-**/**" 94 | environment name: "GIT_BRANCH", value: "origin/master" 95 | } 96 | expression { return "${force_push_images}" == "true" } 97 | } 98 | } 99 | agent { 100 | node { 101 | label "${stg_server}" 102 | } 103 | } 104 | steps{ 105 | withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: 'katana-reg-creds', usernameVariable: 'USERNAME', passwordVariable: 'PASSWORD']]){ 106 | echo "***** Deploying new version ${MAJOR_RELEASE} *****" 107 | sh 'bash bin/deploy.sh -m -p --release latest --docker_reg "${katana_reg}" --docker_repo "${katana_repo}" --docker_reg_user ${USERNAME} --docker_reg_passwd ${PASSWORD} --no_build' 108 | } 109 | } 110 | } 111 | } 112 | 113 | 114 | post{ 115 | success{ 116 | script { 117 | if (env.BRANCH_NAME == 'origin/master') 118 | slackSend (color: "#008000", message: "Successfully deployed Katana version ${MAJOR_RELEASE} on staging environment : '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})") 119 | } 120 | } 121 | failure{ 122 | slackSend (color: "#FF0000", message: "Katana CI Failed: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})") 123 | } 124 | fixed{ 125 | slackSend (color: "#008000", message: "Katana CI Back to normal: '${env.JOB_NAME} [${env.BUILD_NUMBER}]' (${env.BUILD_URL})") 126 | } 127 | } 128 | } -------------------------------------------------------------------------------- /jenkins/ci/tests/functional/initial_test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # *** Initial Test *** 4 | 5 | echo """ 6 | # ********************************* 7 | # **** Katana Integration Test **** 8 | # ********************************* 9 | """ 10 | 11 | # Get the katana logs to check if the 12 | response=$(docker container run --rm --network container:katana-nbi nicolaka/netshoot curl --write-out '%{http_code}' --silent --output /dev/null http://katana-nbi:8000/api/slice) 13 | 14 | if [[ $response != 200 ]]; then 15 | echo "**** TEST FAILED ****" 16 | echo "**** RESPONSE = $response *****" 17 | exit 1 18 | fi 19 | 20 | echo "**** TEST PASSED ****" -------------------------------------------------------------------------------- /jenkins/ci/tests/functional/list_of_slices.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # *** Initial Test *** 4 | 5 | echo """ 6 | # ********************************* 7 | # **** Katana Integration Test **** 8 | # ********************************* 9 | """ 10 | 11 | # Get the katana logs to check if the 12 | response=$(docker container run --rm --network container:katana-nbi nicolaka/netshoot curl --write-out '%{http_code}' --silent --output /dev/null http://katana-nbi:8000/api/slice) 13 | 14 | if [[ $response != 200 ]]; then 15 | echo "**** TEST FAILED ****" 16 | echo "**** RESPONSE = $response *****" 17 | exit 1 18 | fi 19 | 20 | echo "**** TEST PASSED ****" -------------------------------------------------------------------------------- /katana-alertmanager/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM prom/alertmanager:v0.23.0 2 | 3 | COPY katana-alertmanager/alertmanager.yml /etc/alertmanager/alertmanager.yml 4 | -------------------------------------------------------------------------------- /katana-alertmanager/README.md: -------------------------------------------------------------------------------- 1 | # Katana Prometheus Alertmanager 2 | 3 | Service that hosts the Prometheus Alertmanager server. Container name: katana-alertmanager 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-alertmanager/alertmanager.yml: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: 30s 3 | 4 | route: 5 | receiver: katana_rest 6 | repeat_interval: 30s 7 | group_wait: 30s 8 | group_interval: 30s 9 | 10 | receivers: 11 | - name: katana_rest 12 | webhook_configs: 13 | - send_resolved: false 14 | url: http://katana-nbi:8000/api/alert 15 | -------------------------------------------------------------------------------- /katana-apex/.classpath: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /katana-apex/.project: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5genesis-athens 4 | 5 | 6 | 7 | 8 | 9 | org.python.pydev.PyDevBuilder 10 | 11 | 12 | 13 | 14 | org.eclipse.jdt.core.javabuilder 15 | 16 | 17 | 18 | 19 | 20 | org.eclipse.jdt.core.javanature 21 | org.python.pydev.pythonNature 22 | 23 | 24 | -------------------------------------------------------------------------------- /katana-apex/.pydevproject: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Default 6 | 7 | 8 | python interpreter 9 | 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /katana-apex/.settings/org.eclipse.core.resources.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | encoding//scripts/REST_Tester.py=utf-8 3 | -------------------------------------------------------------------------------- /katana-apex/.settings/org.eclipse.jdt.core.prefs: -------------------------------------------------------------------------------- 1 | eclipse.preferences.version=1 2 | org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled 3 | org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8 4 | org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve 5 | org.eclipse.jdt.core.compiler.compliance=1.8 6 | org.eclipse.jdt.core.compiler.debug.lineNumber=generate 7 | org.eclipse.jdt.core.compiler.debug.localVariable=generate 8 | org.eclipse.jdt.core.compiler.debug.sourceFile=generate 9 | org.eclipse.jdt.core.compiler.problem.assertIdentifier=error 10 | org.eclipse.jdt.core.compiler.problem.enumIdentifier=error 11 | org.eclipse.jdt.core.compiler.source=1.8 12 | -------------------------------------------------------------------------------- /katana-apex/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM onap/policy-apex-pdp:2.6.0 2 | WORKDIR /home/apexuser 3 | 4 | COPY katana-apex/policy-apex/. ./policy-apex/ 5 | RUN mkdir -p ./policy-json 6 | COPY katana-apex/policy-json/. ./policy-json/ 7 | COPY katana-apex/logic/. ./logic/ 8 | COPY katana-apex/config/. ./config/ 9 | COPY katana-apex/logs/logback.xml /opt/app/policy/apex-pdp/etc/ 10 | COPY katana-apex/tosca-template/. ./tosca-template/ 11 | 12 | COPY katana-apex/scripts/. ./ 13 | 14 | EXPOSE 12345 18989 23324 8080 5000 9092 15 | 16 | RUN apexCLIToscaEditor.sh -c policy-apex/Policy.apex -ot policy-json/Policy.json -l ./test.log -ac config/config.json -t ./tosca-template/ToscaTemplate.json 17 | CMD ["./start.sh"] 18 | 19 | #CMD ["./startTest.sh"] 20 | -------------------------------------------------------------------------------- /katana-apex/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "engineServiceParameters": { 3 | "name": "APEX-Engine", 4 | "version": "0.0.1", 5 | "id": 45, 6 | "instanceCount": 4, 7 | "deploymentPort": 12345, 8 | "policyModelFileName": "policy-json/Policy.json", 9 | "engineParameters": { 10 | "executorParameters": { 11 | "JAVASCRIPT": { 12 | "parameterClassName": "org.onap.policy.apex.plugins.executor.javascript.JavascriptExecutorParameters" 13 | } 14 | } 15 | } 16 | }, 17 | "eventInputParameters": { 18 | "aConsumer": { 19 | "carrierTechnologyParameters": { 20 | "carrierTechnology": "KAFKA", 21 | "parameterClassName": "org.onap.policy.apex.plugins.event.carrier.kafka.KafkaCarrierTechnologyParameters", 22 | "parameters": { 23 | "bootstrapServers": "katana-kafka:19092", 24 | "groupId": "apex-group-id", 25 | "enableAutoCommit": true, 26 | "autoCommitTime": 1000, 27 | "sessionTimeout": 30000, 28 | "consumerPollTime": 100, 29 | "consumerTopicList": [ 30 | "apex-in-0", 31 | "apex-in-1" 32 | ], 33 | "keyDeserializer": "org.apache.kafka.common.serialization.StringDeserializer", 34 | "valueDeserializer": "org.apache.kafka.common.serialization.StringDeserializer" 35 | } 36 | }, 37 | "eventProtocolParameters": { 38 | "eventProtocol": "JSON" 39 | } 40 | }, 41 | "RestServerConsumer": { 42 | "carrierTechnologyParameters": { 43 | "carrierTechnology": "RESTSERVER", 44 | "parameterClassName": "org.onap.policy.apex.plugins.event.carrier.restserver.RestServerCarrierTechnologyParameters", 45 | "parameters": { 46 | "standalone": true, 47 | "host": "0.0.0.0", 48 | "port": 23324 49 | } 50 | }, 51 | "eventProtocolParameters": { 52 | "eventProtocol": "JSON" 53 | }, 54 | "synchronousMode": true, 55 | "synchronousPeer": "RestServerProducer", 56 | "synchronousTimeout": 1000 57 | } 58 | }, 59 | "eventOutputParameters": { 60 | "IOProducer": { 61 | "carrierTechnologyParameters": { 62 | "carrierTechnology": "FILE", 63 | "parameters": { 64 | "standardIo": true 65 | } 66 | }, 67 | "eventProtocolParameters": { 68 | "eventProtocol": "JSON" 69 | } 70 | }, 71 | "RestServerProducer": { 72 | "carrierTechnologyParameters": { 73 | "carrierTechnology": "RESTSERVER", 74 | "parameterClassName": "org.onap.policy.apex.plugins.event.carrier.restserver.RestServerCarrierTechnologyParameters" 75 | }, 76 | "eventProtocolParameters": { 77 | "eventProtocol": "JSON" 78 | }, 79 | "synchronousMode": true, 80 | "synchronousPeer": "RestServerConsumer", 81 | "synchronousTimeout": 1000 82 | }, 83 | "APEX2SMRestClientProducer": { 84 | "carrierTechnologyParameters": { 85 | "carrierTechnology": "RESTCLIENT", 86 | "parameterClassName": "org.onap.policy.apex.plugins.event.carrier.restclient.RestClientCarrierTechnologyParameters", 87 | "parameters": { 88 | "url": "http://katana-nbi:8000/api/policy/apex/action", 89 | "httpMethod": "POST", 90 | "httpHeaders": [ 91 | [ 92 | "Keep-Alive", 93 | "300" 94 | ], 95 | [ 96 | "Cache-Control", 97 | "no-cache" 98 | ] 99 | ] 100 | } 101 | }, 102 | "eventProtocolParameters": { 103 | "eventProtocol": "JSON" 104 | }, 105 | "eventNameFilter": "APEX_Wrapped_Event" 106 | } 107 | } 108 | } -------------------------------------------------------------------------------- /katana-apex/events/trigger-event.json: -------------------------------------------------------------------------------- 1 | { 2 | "name" : "SMAlert", 3 | "nameSpace" : "sm.alert.manager.events", 4 | "version" : "0.0.1", 5 | "source" : "SMAlertManager", 6 | "target" : "APEX", 7 | "sliceId" : "1234", 8 | "alertType": "failingNS", 9 | "alertMessage": { 10 | "NS_ID" : "1234", 11 | "NSD_ID" : "1234", 12 | "status" : "up" 13 | } 14 | } -------------------------------------------------------------------------------- /katana-apex/logic/SM_Alert_Policy_SSL.js: -------------------------------------------------------------------------------- 1 | var logger = executor.logger; 2 | 3 | logger.info("##START## SM_ALERT_POLICY_SSL"); 4 | 5 | logger.info("Possible Outputs: " + executor.stateOutputNames); 6 | 7 | var alertAlbum = executor.getContextAlbum("Alerts_Album"); 8 | var alertAlbumSize = alertAlbum.size(); 9 | 10 | var lastAlertData = alertAlbum.get(String(alertAlbumSize-1)); 11 | logger.info("~~Last Alert" + lastAlertData); 12 | 13 | var lastAlertSliceId = lastAlertData.sliceId; 14 | logger.info("~~Last Alert is on slice" + lastAlertData.sliceId); 15 | 16 | var alertInstance = 0; 17 | for (var i = 0; i < alertAlbum.size(); i++) { 18 | logger.info("~~Check if : " +alertAlbum.get(String(i)).sliceId + " equals " + lastAlertData.sliceId); 19 | if(alertAlbum.get(String(i)).sliceId == lastAlertData.sliceId) 20 | { 21 | alertInstance++; 22 | logger.info("~~Alert has been matched"); 23 | 24 | } 25 | } 26 | 27 | logger.info("~~Number of alertInstance: " + alertInstance); 28 | 29 | if(alertInstance === 1) 30 | { 31 | //step 1 First Occurance 32 | executor.setSelectedStateOutputName("firstCase"); 33 | 34 | } 35 | else if(alertInstance === 2) 36 | { 37 | //step 2, Happened before 38 | executor.setSelectedStateOutputName("secondCase"); 39 | 40 | } 41 | else if(alertInstance > 2) 42 | { 43 | //step 3, report fault 44 | executor.setSelectedStateOutputName("thirdCase"); 45 | 46 | } 47 | 48 | logger.info("Selected Output: " + executor.selectedStateOutputName); 49 | 50 | logger.info("##END## SM_ALERT_POLICY_SSL"); 51 | 52 | var returnValue = true; 53 | returnValue; -------------------------------------------------------------------------------- /katana-apex/logic/SM_Alert_TL.js: -------------------------------------------------------------------------------- 1 | var logger = executor.logger; 2 | var time = new Date(); 3 | 4 | logger.info("##START## SM_Alert_TL"); 5 | 6 | var sliceId = executor.inFields.get("sliceId"); 7 | logger.info("~~sliceId: " + sliceId); 8 | 9 | var alertType = executor.inFields.get("alertType"); 10 | logger.info("~~alertType: " + alertType); 11 | 12 | var alertMessage = executor.inFields.get("alertMessage"); 13 | logger.info("~~alertMessage: " + alertMessage); 14 | 15 | var timestamp = time; 16 | logger.info("~~timestamp: " + timestamp); 17 | 18 | var contextAlert = {"sliceId":sliceId, "alertType":alertType, "alertMessage":alertMessage, "timestamp":timestamp}; 19 | 20 | var alertAlbumSize = executor.getContextAlbum("Alerts_Album").size(); 21 | logger.info("~~Alert Album has: " + alertAlbumSize + " elements"); 22 | 23 | executor.getContextAlbum("Alerts_Album").put(String(alertAlbumSize), contextAlert); 24 | logger.info("~~Alert Message stored in location: " + alertAlbumSize + " in Context_Album"); 25 | 26 | var contextAlbumValues = executor.getContextAlbum("Alerts_Album").values(); 27 | logger.info("~~Alerts Album Data: " + contextAlbumValues); 28 | 29 | var contextAlbumKeys = executor.getContextAlbum("Alerts_Album").keySet(); 30 | logger.info("~~Alerts Album Keys: " + contextAlbumKeys); 31 | 32 | var alertAlbumData = executor.getContextAlbum("Alerts_Album").get(String(alertAlbumSize)); 33 | logger.info("~~Got alert message from context with this timestamp: " + alertAlbumData.timestamp); 34 | 35 | executor.outFields.put("report", "Alert is stored"); 36 | 37 | logger.info("##END## SM_Alert_TL"); 38 | 39 | var returnValue = true; 40 | returnValue; 41 | -------------------------------------------------------------------------------- /katana-apex/logic/State_One_TL.js: -------------------------------------------------------------------------------- 1 | var logger = executor.logger; 2 | var time = new Date(); 3 | 4 | logger.info("##START## State_One_TL"); 5 | 6 | var report = executor.inFields.get("report"); 7 | logger.info("~~report: " + report); 8 | 9 | var alertAlbumSize = executor.getContextAlbum("Alerts_Album").size(); 10 | logger.info("~~Alert Album has: " + alertAlbumSize + " elements"); 11 | 12 | var alertAlbumData = executor.getContextAlbum("Alerts_Album").get(String(alertAlbumSize-1)); 13 | logger.info("~~Got alert message from context with this timestamp: " + alertAlbumData.timestamp); 14 | 15 | var policyType = alertAlbumData.alertType; 16 | 17 | var action = "restart_ns"; 18 | var slice_id = alertAlbumData.sliceId; 19 | var ns_id = alertAlbumData.alertMessage.get("NS_ID"); 20 | var nsd_id = alertAlbumData.alertMessage.get("NSD_ID"); 21 | var restrictions = {}; 22 | var extra_actions = {}; 23 | 24 | var policy = { 25 | "action": action, 26 | "slice_id": slice_id, 27 | "ns_id": ns_id, 28 | "nsd_id": nsd_id, 29 | "restrictions": restrictions, 30 | "extra_actions": extra_actions 31 | }; 32 | 33 | executor.outFields.put("policyType", policyType); 34 | executor.outFields.put("policy", policy); 35 | 36 | logger.info("##END## State_One_TL"); 37 | 38 | var returnValue = true; 39 | returnValue; 40 | -------------------------------------------------------------------------------- /katana-apex/logic/State_Three_TL.js: -------------------------------------------------------------------------------- 1 | var logger = executor.logger; 2 | var time = new Date(); 3 | 4 | logger.info("##START## State_Three_TL"); 5 | 6 | var report = executor.inFields.get("report"); 7 | logger.info("~~report: " + report); 8 | 9 | var alertAlbumSize = executor.getContextAlbum("Alerts_Album").size(); 10 | logger.info("~~Alert Album has: " + alertAlbumSize + " elements"); 11 | 12 | var alertAlbumData = executor.getContextAlbum("Alerts_Album").get(String(alertAlbumSize-1)); 13 | logger.info("~~Got alert message from context with this timestamp: " + alertAlbumData.timestamp); 14 | 15 | var policyType = alertAlbumData.alertType; 16 | 17 | var action = "stop_slice"; 18 | var slice_id = alertAlbumData.sliceId; 19 | var ns_id = alertAlbumData.alertMessage.get("NS_ID"); 20 | var nsd_id = alertAlbumData.alertMessage.get("NSD_ID"); 21 | var restrictions = {}; 22 | var extra_actions = { "notify_neat": true, "notify_admin": true }; 23 | 24 | var policy = { 25 | "action": action, 26 | "slice_id": slice_id, 27 | "ns_id": ns_id, 28 | "nsd_id": nsd_id, 29 | "restrictions": restrictions, 30 | "extra_actions": extra_actions 31 | }; 32 | 33 | executor.outFields.put("policyType", policyType); 34 | executor.outFields.put("policy", policy); 35 | 36 | logger.info("##END## State_Three_TL"); 37 | 38 | var returnValue = true; 39 | returnValue; 40 | -------------------------------------------------------------------------------- /katana-apex/logic/State_Two_TL.js: -------------------------------------------------------------------------------- 1 | var logger = executor.logger; 2 | var time = new Date(); 3 | 4 | logger.info("##START## State_Two_TL"); 5 | 6 | var report = executor.inFields.get("report"); 7 | logger.info("~~report: " + report); 8 | 9 | var alertAlbumSize = executor.getContextAlbum("Alerts_Album").size(); 10 | logger.info("~~Alert Album has: " + alertAlbumSize + " elements"); 11 | 12 | var alertAlbumData = executor.getContextAlbum("Alerts_Album").get(String(alertAlbumSize-1)); 13 | logger.info("~~Got alert message from context with this timestamp: " + alertAlbumData.timestamp); 14 | 15 | var policyType = alertAlbumData.alertType; 16 | 17 | var action = "restart_slice"; 18 | var slice_id = alertAlbumData.sliceId; 19 | var ns_id = alertAlbumData.alertMessage.get("NS_ID"); 20 | var nsd_id = alertAlbumData.alertMessage.get("NSD_ID"); 21 | var restrictions = { "relocate_ns" : true }; 22 | var extra_actions = { "notify_NEAT": true }; 23 | 24 | var policy = { 25 | "action": action, 26 | "slice_id": slice_id, 27 | "ns_id": ns_id, 28 | "nsd_id": nsd_id, 29 | "restrictions": restrictions, 30 | "extra_actions": extra_actions 31 | }; 32 | 33 | executor.outFields.put("policyType", policyType); 34 | executor.outFields.put("policy", policy); 35 | 36 | logger.info("##END## State_Two_TL"); 37 | 38 | var returnValue = true; 39 | returnValue; 40 | -------------------------------------------------------------------------------- /katana-apex/logs/logback.xml: -------------------------------------------------------------------------------- 1 | 17 | 18 | 19 | 20 | 21 | /var/log/onap/policy/apex-pdp/error.log 22 | 23 | /var/log/onap/policy/apex-pdp/error.%d{yyyy-MM-dd}.%i.log.zip 24 | 25 | 50MB 26 | 30 27 | 10GB 28 | 29 | 30 | WARN 31 | 32 | 33 | [%d{yyyy-MM-dd'T'HH:mm:ss.SSS+00:00, UTC}|%level|%logger{0}|%thread] %msg%n 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | /var/log/onap/policy/apex-pdp/debug.log 43 | 44 | /var/log/onap/policy/apex-pdp/debug.%d{yyyy-MM-dd}.%i.log.zip 45 | 46 | 50MB 47 | 30 48 | 10GB 49 | 50 | 51 | [%d{yyyy-MM-dd'T'HH:mm:ss.SSS+00:00, UTC}|%level|%logger{0}|%thread] %msg%n 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | /var/log/onap/policy/apex-pdp/network.log 61 | 62 | /var/log/onap/policy/apex-pdp/network.%d{yyyy-MM-dd}.%i.log.zip 63 | 64 | 50MB 65 | 30 66 | 10GB 67 | 68 | 69 | [%d{yyyy-MM-dd'T'HH:mm:ss.SSS+00:00, UTC}|%t]%m%n 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | -------------------------------------------------------------------------------- /katana-apex/policy-json/README.md: -------------------------------------------------------------------------------- 1 | # APEX Policy JSON Files 2 | -------------------------------------------------------------------------------- /katana-apex/run.bat: -------------------------------------------------------------------------------- 1 | docker build --no-cache -t 5genesis-athens-apex . 2 | 3 | docker run -it -p 23324:23324 -p 5000:5000 -p 9092:9092 --name apex-engine --rm 5genesis-athens-apex 4 | 5 | pause 6 | -------------------------------------------------------------------------------- /katana-apex/scripts/REST_Tester.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # encoding: utf-8 3 | import json 4 | import time 5 | import requests 6 | 7 | from flask import Flask 8 | from flask import request 9 | from flask import jsonify 10 | 11 | app = Flask(__name__) 12 | @app.route('/') 13 | def index(): 14 | time.sleep(5) 15 | return json.dumps({ 16 | "nameSpace": "apex.event.context", 17 | "name": "Context_Trigger", 18 | "version": "0.0.1", 19 | "source": "External", 20 | "target": "APEX", 21 | "report": "This is a Context Event" 22 | } 23 | ) 24 | 25 | @app.route('/api/slice/1234', methods=['PUT']) 26 | def testStateOne(): 27 | time.sleep(5) 28 | print("State One has been Triggered") 29 | return json.dumps({"response" : "State One success"}) 30 | 31 | @app.route('/api/slice/1234/restart', methods=['POST']) 32 | def testStateTwo(): 33 | time.sleep(5) 34 | print("State Two has been Triggered") 35 | return json.dumps({"response" : "State Two success"}) 36 | 37 | @app.route('/api/slice/1234', methods=['POST']) 38 | def testStateThree(): 39 | time.sleep(5) 40 | print("State Three has been Triggered") 41 | return json.dumps({"response" : "State Three success"}) 42 | 43 | @app.route('/RESTIssuer', methods=['GET', 'POST', 'PUT', 'DELETE']) 44 | def RESTIssuer(): 45 | message = request.get_json() 46 | 47 | path = "http://" + message["path"] 48 | method = message["method"] 49 | body = message["body"] 50 | 51 | print("Path = " , path) 52 | print("Method = " , method) 53 | print("Body = " , body) 54 | headers = {'Content-Type': 'application/json'} 55 | 56 | if(method == "get"): 57 | response = requests.get(path, data=json.dumps(body)) 58 | print("GET Called") 59 | if(method == "post"): 60 | response = requests.post(path, data=json.dumps(body)) 61 | print("POST Called") 62 | if(method == "put"): 63 | response = requests.put(path, body) 64 | print("PUT Called") 65 | if(method == "delete"): 66 | response = requests.post(path) 67 | print("DELETE Called") 68 | 69 | print("Response = " , response) 70 | print("Response = " , response.text) 71 | 72 | return 'POST RECEIVED' 73 | 74 | app.run(debug=True,host="0.0.0.0") 75 | -------------------------------------------------------------------------------- /katana-apex/scripts/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | apexApps.sh engine -p policy-json/Policy.json -------------------------------------------------------------------------------- /katana-apex/scripts/startTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | apexCLIEditor.sh -c ./policy-apex/Policy.apex -o ./examples/policy-json/Policy.json -------------------------------------------------------------------------------- /katana-apex/tosca-template/ToscaTemplate.json: -------------------------------------------------------------------------------- 1 | { 2 | "tosca_definitions_version": "tosca_simple_yaml_1_1_0", 3 | "topology_template": { 4 | "policies": [ 5 | { 6 | "native.sampledomain": { 7 | "type": "onap.policies.native.Apex", 8 | "type_version": "1.0.0", 9 | "name": "native.sampledomain", 10 | "version": "1.0.0", 11 | "properties": { 12 | } 13 | } 14 | } 15 | ] 16 | } 17 | } -------------------------------------------------------------------------------- /katana-cli/.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .dockerignore -------------------------------------------------------------------------------- /katana-cli/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7.4-slim 2 | 3 | ENV INSTALL_PATH /katana-cli 4 | RUN mkdir -p $INSTALL_PATH 5 | 6 | WORKDIR $INSTALL_PATH 7 | 8 | COPY katana-cli/. . 9 | RUN pip install --upgrade pip 10 | RUN pip install -r requirements.txt 11 | 12 | RUN pip install --editable . 13 | 14 | CMD /bin/bash -------------------------------------------------------------------------------- /katana-cli/README.md: -------------------------------------------------------------------------------- 1 | # Katana CLI Tool 2 | 3 | CLI tool for 5GENESIS/katana. Name of the container: katana-cli 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-cli/cli/cli.py: -------------------------------------------------------------------------------- 1 | """ Katana Command Line Tool """ 2 | 3 | import os 4 | import click 5 | 6 | cmd_folder = os.path.join(os.path.dirname(__file__), "commands") 7 | cmd_prefix = "cmd_" 8 | 9 | 10 | class CLI(click.MultiCommand): 11 | def list_commands(self, ctx): 12 | """ 13 | Obtain a list of all available commands. 14 | 15 | :param ctx: Click context 16 | :return: List of sorted commands 17 | """ 18 | commands = [] 19 | 20 | for filename in os.listdir(cmd_folder): 21 | if filename.endswith(".py") and filename.startswith(cmd_prefix): 22 | commands.append(filename[4:-3]) 23 | 24 | commands.sort() 25 | 26 | return commands 27 | 28 | def get_command(self, ctx, name): 29 | """ 30 | Get a specific command by looking up the module. 31 | 32 | :param ctx: Click context 33 | :param name: Command name 34 | :return: Module's cli function 35 | """ 36 | ns = {} 37 | 38 | filename = os.path.join(cmd_folder, cmd_prefix + name + ".py") 39 | 40 | try: 41 | f = open(filename) 42 | except FileNotFoundError: 43 | raise click.ClickException( 44 | f"Wrong command: {name} \nAvailable commands: {self.list_commands(ctx)}" 45 | ) 46 | with f: 47 | code = compile(f.read(), filename, "exec") 48 | eval(code, ns, ns) 49 | 50 | return ns["cli"] 51 | 52 | 53 | @click.command(cls=CLI) 54 | def cli(): 55 | """ 56 | katana-cli is a command-line tool that interacts with the katana 57 | slice manager 58 | """ 59 | pass 60 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_base_slice_des.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import yaml 4 | import click 5 | 6 | 7 | @click.group() 8 | def cli(): 9 | """Query Slice Descriptors""" 10 | pass 11 | 12 | 13 | @click.command() 14 | def ls(): 15 | """ 16 | List Slice Descriptors 17 | """ 18 | 19 | url = "http://localhost:8000/api/base_slice_des" 20 | r = None 21 | try: 22 | r = requests.get(url, timeout=30) 23 | r.raise_for_status() 24 | json_data = json.loads(r.content) 25 | print(console_formatter("DB_ID", "Base SD ID")) 26 | for i in range(len(json_data)): 27 | try: 28 | print(console_formatter(json_data[i]["_id"], json_data[i]["base_slice_des_id"])) 29 | except KeyError: 30 | print(console_formatter(json_data[i]["_id"], json_data[i]["base_slice_des_ref"])) 31 | except requests.exceptions.HTTPError as errh: 32 | print("Http Error:", errh) 33 | click.echo(r.content) 34 | except requests.exceptions.ConnectionError as errc: 35 | print("Error Connecting:", errc) 36 | except requests.exceptions.Timeout as errt: 37 | print("Timeout Error:", errt) 38 | except requests.exceptions.RequestException as err: 39 | print("Error:", err) 40 | 41 | 42 | @click.command() 43 | @click.argument("id") 44 | def inspect(id): 45 | """ 46 | Display detailed information of Slice Descriptor 47 | """ 48 | url = "http://localhost:8000/api/base_slice_des/" + id 49 | r = None 50 | try: 51 | r = requests.get(url, timeout=30) 52 | r.raise_for_status() 53 | json_data = json.loads(r.content) 54 | click.echo(json.dumps(json_data, indent=2)) 55 | if not json_data: 56 | click.echo("Error: No such Slice Descriptor: {}".format(id)) 57 | except requests.exceptions.HTTPError as errh: 58 | print("Http Error:", errh) 59 | click.echo(r.content) 60 | except requests.exceptions.ConnectionError as errc: 61 | print("Error Connecting:", errc) 62 | except requests.exceptions.Timeout as errt: 63 | print("Timeout Error:", errt) 64 | except requests.exceptions.RequestException as err: 65 | print("Error:", err) 66 | 67 | 68 | @click.command() 69 | @click.option( 70 | "-f", "--file", required=True, type=str, help="yaml file with Base Slice Descriptor details" 71 | ) 72 | def add(file): 73 | """ 74 | Add new Base Slice Descriptor 75 | """ 76 | try: 77 | stream = open(file, mode="r") 78 | except FileNotFoundError: 79 | raise click.ClickException(f"File {file} not found") 80 | 81 | with stream: 82 | data = yaml.safe_load(stream) 83 | 84 | url = "http://localhost:8000/api/base_slice_des" 85 | r = None 86 | try: 87 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=30) 88 | r.raise_for_status() 89 | 90 | click.echo(r.content) 91 | except requests.exceptions.HTTPError as errh: 92 | print("Http Error:", errh) 93 | click.echo(r.content) 94 | except requests.exceptions.ConnectionError as errc: 95 | print("Error Connecting:", errc) 96 | except requests.exceptions.Timeout as errt: 97 | print("Timeout Error:", errt) 98 | except requests.exceptions.RequestException as err: 99 | print("Error:", err) 100 | 101 | 102 | @click.command() 103 | @click.argument("id") 104 | def rm(id): 105 | """ 106 | Remove a Base Slice Descriptor 107 | """ 108 | url = "http://localhost:8000/api/base_slice_des/" + id 109 | r = None 110 | try: 111 | r = requests.delete(url, timeout=30) 112 | r.raise_for_status() 113 | click.echo(r.content) 114 | except requests.exceptions.HTTPError as errh: 115 | print("Http Error:", errh) 116 | click.echo(r.content) 117 | except requests.exceptions.ConnectionError as errc: 118 | print("Error Connecting:", errc) 119 | except requests.exceptions.Timeout as errt: 120 | print("Timeout Error:", errt) 121 | except requests.exceptions.RequestException as err: 122 | print("Error:", err) 123 | 124 | 125 | @click.command() 126 | @click.option( 127 | "-f", "--file", required=True, type=str, help="yaml file with Base Slice Descriptor details" 128 | ) 129 | @click.argument("id") 130 | def update(file, id): 131 | """ 132 | Update Base Slice Descriptor 133 | """ 134 | with open(file, "r") as stream: 135 | data = yaml.safe_load(stream) 136 | 137 | url = "http://localhost:8000/api/base_slice_des/" + id 138 | r = None 139 | try: 140 | r = requests.put(url, json=json.loads(json.dumps(data)), timeout=30) 141 | r.raise_for_status() 142 | 143 | click.echo(r.content) 144 | except requests.exceptions.HTTPError as errh: 145 | print("Http Error:", errh) 146 | click.echo(r.content) 147 | except requests.exceptions.ConnectionError as errc: 148 | print("Error Connecting:", errc) 149 | except requests.exceptions.Timeout as errt: 150 | print("Timeout Error:", errt) 151 | except requests.exceptions.RequestException as err: 152 | print("Error:", err) 153 | 154 | 155 | cli.add_command(ls) 156 | cli.add_command(inspect) 157 | cli.add_command(add) 158 | cli.add_command(rm) 159 | cli.add_command(update) 160 | 161 | 162 | def console_formatter(uuid, slice_des_id): 163 | return "{0: <40}{1: <25}".format(uuid, slice_des_id) 164 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_bootstrap.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import yaml 4 | import click 5 | 6 | 7 | @click.command() 8 | @click.option("-f", "--file", required=True, type=str, help="file with config details") 9 | def cli(file): 10 | """Bootstrap Katana""" 11 | try: 12 | stream = open(file, mode="r") 13 | except FileNotFoundError: 14 | raise click.ClickException(f"File {file} not found") 15 | 16 | with stream: 17 | data = yaml.safe_load(stream) 18 | 19 | url = "http://localhost:8000/api/bootstrap" 20 | r = None 21 | try: 22 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=30) 23 | r.raise_for_status() 24 | 25 | click.echo(r.content) 26 | except requests.exceptions.HTTPError as errh: 27 | print("Http Error:", errh) 28 | click.echo(r.content) 29 | except requests.exceptions.ConnectionError as errc: 30 | print("Error Connecting:", errc) 31 | except requests.exceptions.Timeout as errt: 32 | print("Timeout Error:", errt) 33 | except requests.exceptions.RequestException as err: 34 | print("Error:", err) 35 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_gst.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import click 4 | 5 | 6 | @click.group() 7 | def cli(): 8 | """Query added GSTs""" 9 | pass 10 | 11 | 12 | @click.command() 13 | def ls(): 14 | """ 15 | List GSTs 16 | """ 17 | url = "http://localhost:8000/api/gst" 18 | r = None 19 | try: 20 | r = requests.get(url, timeout=30) 21 | r.raise_for_status() 22 | json_data = json.loads(r.content) 23 | print(console_formatter("GST_ID")) 24 | for i in range(len(json_data)): 25 | print(console_formatter(json_data[i]["_id"])) 26 | except requests.exceptions.HTTPError as errh: 27 | print("Http Error:", errh) 28 | click.echo(r.content) 29 | except requests.exceptions.ConnectionError as errc: 30 | print("Error Connecting:", errc) 31 | except requests.exceptions.Timeout as errt: 32 | print("Timeout Error:", errt) 33 | except requests.exceptions.RequestException as err: 34 | print("Error:", err) 35 | 36 | 37 | @click.command() 38 | @click.argument("id") 39 | def inspect(id): 40 | """ 41 | Display detailed information of GST 42 | """ 43 | url = "http://localhost:8000/api/gst/" + id 44 | r = None 45 | try: 46 | r = requests.get(url, timeout=30) 47 | r.raise_for_status() 48 | json_data = json.loads(r.content) 49 | click.echo(json.dumps(json_data, indent=2)) 50 | if not json_data: 51 | click.echo("Error: No such GST: {}".format(id)) 52 | except requests.exceptions.HTTPError as errh: 53 | print("Http Error:", errh) 54 | click.echo(r.content) 55 | except requests.exceptions.ConnectionError as errc: 56 | print("Error Connecting:", errc) 57 | except requests.exceptions.Timeout as errt: 58 | print("Timeout Error:", errt) 59 | except requests.exceptions.RequestException as err: 60 | print("Error:", err) 61 | 62 | 63 | cli.add_command(ls) 64 | cli.add_command(inspect) 65 | 66 | 67 | def console_formatter(uuid): 68 | return "{0: <40}".format(uuid) 69 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_ns.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import click 4 | 5 | 6 | @click.group() 7 | def cli(): 8 | """Query Network Service Descriptors""" 9 | pass 10 | 11 | 12 | @click.command() 13 | def ls(): 14 | """ 15 | List all network services 16 | """ 17 | url = "http://localhost:8000/api/nslist" 18 | r = None 19 | try: 20 | r = requests.get(url, timeout=30) 21 | r.raise_for_status() 22 | json_data = json.loads(r.content) 23 | click.echo(json.dumps(json_data, indent=2)) 24 | except requests.exceptions.HTTPError as errh: 25 | print("Http Error:", errh) 26 | click.echo(r.content) 27 | except requests.exceptions.ConnectionError as errc: 28 | print("Error Connecting:", errc) 29 | except requests.exceptions.Timeout as errt: 30 | print("Timeout Error:", errt) 31 | except requests.exceptions.RequestException as err: 32 | print("Error:", err) 33 | 34 | 35 | cli.add_command(ls) 36 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_resources.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import click 4 | 5 | 6 | @click.group() 7 | def cli(): 8 | """Query Resources""" 9 | pass 10 | 11 | 12 | @click.command() 13 | def ls(): 14 | """ 15 | List all resources 16 | """ 17 | url = "http://localhost:8000/api/resources" 18 | r = None 19 | try: 20 | r = requests.get(url, timeout=30) 21 | r.raise_for_status() 22 | json_data = json.loads(r.content) 23 | click.echo(json.dumps(json_data, indent=2)) 24 | except requests.exceptions.HTTPError as errh: 25 | print("Http Error:", errh) 26 | click.echo(r.content) 27 | except requests.exceptions.ConnectionError as errc: 28 | print("Error Connecting:", errc) 29 | except requests.exceptions.Timeout as errt: 30 | print("Timeout Error:", errt) 31 | except requests.exceptions.RequestException as err: 32 | print("Error:", err) 33 | 34 | 35 | @click.command() 36 | @click.argument("location") 37 | def location(location): 38 | """ 39 | List all resources in the specific location 40 | """ 41 | url = "http://localhost:8000/api/resources/" + location 42 | r = None 43 | try: 44 | r = requests.get(url, timeout=30) 45 | r.raise_for_status() 46 | json_data = json.loads(r.content) 47 | click.echo(json.dumps(json_data, indent=2)) 48 | except requests.exceptions.HTTPError as errh: 49 | print("Http Error:", errh) 50 | click.echo(r.content) 51 | except requests.exceptions.ConnectionError as errc: 52 | print("Error Connecting:", errc) 53 | except requests.exceptions.Timeout as errt: 54 | print("Timeout Error:", errt) 55 | except requests.exceptions.RequestException as err: 56 | print("Error:", err) 57 | 58 | 59 | @click.command() 60 | def updatedb(): 61 | """ 62 | Update the resource database 63 | """ 64 | url = "http://localhost:8000/api/resources/update" 65 | r = None 66 | try: 67 | r = requests.get(url, timeout=30) 68 | r.raise_for_status() 69 | click.echo(r.content) 70 | except requests.exceptions.HTTPError as errh: 71 | print("Http Error:", errh) 72 | click.echo(r.content) 73 | except requests.exceptions.ConnectionError as errc: 74 | print("Error Connecting:", errc) 75 | except requests.exceptions.Timeout as errt: 76 | print("Timeout Error:", errt) 77 | except requests.exceptions.RequestException as err: 78 | print("Error:", err) 79 | 80 | 81 | cli.add_command(updatedb) 82 | cli.add_command(ls) 83 | cli.add_command(location) 84 | -------------------------------------------------------------------------------- /katana-cli/cli/commands/cmd_vim.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import yaml 4 | 5 | import click 6 | import datetime 7 | 8 | 9 | @click.group() 10 | def cli(): 11 | """Manage VIMs""" 12 | pass 13 | 14 | 15 | @click.command() 16 | def ls(): 17 | """ 18 | List vims 19 | """ 20 | 21 | url = "http://localhost:8000/api/vim" 22 | r = None 23 | try: 24 | r = requests.get(url, timeout=30) 25 | r.raise_for_status() 26 | json_data = json.loads(r.content) 27 | # indent=2 "beautifies" json 28 | # click.echo(json.dumps(json_data, indent=2)) 29 | print(console_formatter("DB_ID", "VIM_ID", "TYPE", "CREATED AT")) 30 | for i in range(len(json_data)): 31 | print(console_formatter(json_data[i]["_id"], json_data[i]["vim_id"], json_data[i]["type"], datetime.datetime.fromtimestamp(json_data[i]["created_at"]).strftime("%Y-%m-%d %H:%M:%S"),)) 32 | except requests.exceptions.HTTPError as errh: 33 | print("Http Error:", errh) 34 | click.echo(r.content) 35 | except requests.exceptions.ConnectionError as errc: 36 | print("Error Connecting:", errc) 37 | except requests.exceptions.Timeout as errt: 38 | print("Timeout Error:", errt) 39 | except requests.exceptions.RequestException as err: 40 | print("Error:", err) 41 | 42 | 43 | @click.command() 44 | @click.argument("id") 45 | def inspect(id): 46 | """ 47 | Display detailed information of VIM 48 | """ 49 | url = "http://localhost:8000/api/vim/" + id 50 | r = None 51 | try: 52 | r = requests.get(url, timeout=30) 53 | r.raise_for_status() 54 | json_data = json.loads(r.content) 55 | # indent=2 "beautifies" json 56 | click.echo(json.dumps(json_data, indent=2)) 57 | if not json_data: 58 | click.echo(f"Error: No such vim: {id}") 59 | except requests.exceptions.HTTPError as errh: 60 | print("Http Error:", errh) 61 | click.echo(r.content) 62 | except requests.exceptions.ConnectionError as errc: 63 | print("Error Connecting:", errc) 64 | except requests.exceptions.Timeout as errt: 65 | print("Timeout Error:", errt) 66 | except requests.exceptions.RequestException as err: 67 | print("Error:", err) 68 | 69 | 70 | @click.command() 71 | @click.option("-f", "--file", required=True, type=str, help="yaml file with VIM details") 72 | def add(file): 73 | """ 74 | Add new VIM 75 | """ 76 | try: 77 | stream = open(file, mode="r") 78 | except FileNotFoundError: 79 | raise click.ClickException(f"File {file} not found") 80 | 81 | with stream: 82 | data = yaml.safe_load(stream) 83 | 84 | url = "http://localhost:8000/api/vim" 85 | r = None 86 | try: 87 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=30) 88 | r.raise_for_status() 89 | 90 | click.echo(r.content) 91 | except requests.exceptions.HTTPError as errh: 92 | print("Http Error:", errh) 93 | click.echo(r.content) 94 | except requests.exceptions.ConnectionError as errc: 95 | print("Error Connecting:", errc) 96 | except requests.exceptions.Timeout as errt: 97 | print("Timeout Error:", errt) 98 | except requests.exceptions.RequestException as err: 99 | print("Error:", err) 100 | 101 | 102 | @click.command() 103 | @click.argument("id") 104 | def rm(id): 105 | """ 106 | Remove VIM 107 | """ 108 | url = "http://localhost:8000/api/vim/" + id 109 | r = None 110 | try: 111 | r = requests.delete(url, timeout=30) 112 | r.raise_for_status() 113 | click.echo(r.content) 114 | except requests.exceptions.HTTPError as errh: 115 | print("Http Error:", errh) 116 | click.echo(r.content) 117 | except requests.exceptions.ConnectionError as errc: 118 | print("Error Connecting:", errc) 119 | except requests.exceptions.Timeout as errt: 120 | print("Timeout Error:", errt) 121 | except requests.exceptions.RequestException as err: 122 | print("Error:", err) 123 | 124 | 125 | @click.command() 126 | @click.option("-f", "--file", required=True, type=str, help="yaml file with VIM details") 127 | @click.argument("id") 128 | def update(file, id): 129 | """ 130 | Update VIM 131 | """ 132 | try: 133 | stream = open(file, mode="r") 134 | except FileNotFoundError: 135 | raise click.ClickException(f"File {file} not found") 136 | 137 | with stream: 138 | data = yaml.safe_load(stream) 139 | 140 | url = "http://localhost:8000/api/vim/" + id 141 | r = None 142 | try: 143 | r = requests.put(url, json=json.loads(json.dumps(data)), timeout=30) 144 | r.raise_for_status() 145 | 146 | click.echo(r.content) 147 | except requests.exceptions.HTTPError as errh: 148 | print("Http Error:", errh) 149 | click.echo(r.content) 150 | except requests.exceptions.ConnectionError as errc: 151 | print("Error Connecting:", errc) 152 | except requests.exceptions.Timeout as errt: 153 | print("Timeout Error:", errt) 154 | except requests.exceptions.RequestException as err: 155 | print("Error:", err) 156 | 157 | 158 | cli.add_command(ls) 159 | cli.add_command(inspect) 160 | cli.add_command(add) 161 | cli.add_command(rm) 162 | cli.add_command(update) 163 | 164 | 165 | def console_formatter(uuid, vim_id, vimtype, created_at): 166 | return "{0: <40}{1: <20}{2: <20}{3: <25}".format(uuid, vim_id, vimtype, created_at) 167 | -------------------------------------------------------------------------------- /katana-cli/requirements.txt: -------------------------------------------------------------------------------- 1 | # CLI. 2 | Click==6.7 3 | 4 | # requests (for HTTP) 5 | requests 6 | 7 | # yaml 8 | PyYAML==5.4 -------------------------------------------------------------------------------- /katana-cli/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup( 4 | name='KATANA-CLI', 5 | version='1.0', 6 | packages=['cli', 'cli.commands'], 7 | include_package_data=True, 8 | install_requires=[ 9 | 'click', 10 | ], 11 | entry_points=""" 12 | [console_scripts] 13 | katana=cli.cli:cli 14 | """, 15 | ) 16 | -------------------------------------------------------------------------------- /katana-grafana/.env: -------------------------------------------------------------------------------- 1 | GF_SECURITY_ADMIN_PASSWORD=genesis 2 | GF_SECURITY_ADMIN_USER=genesis 3 | -------------------------------------------------------------------------------- /katana-grafana/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM grafana/grafana:7.3.1 2 | 3 | COPY katana-grafana/prometheus.yml /etc/grafana/provisioning/datasources/prometheus.yml -------------------------------------------------------------------------------- /katana-grafana/README.md: -------------------------------------------------------------------------------- 1 | # Katana Grafana 2 | 3 | Implements the Dashboard of the monitoring module. Container name: katana-grafana 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-grafana/prometheus.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: katana-prometheus 5 | type: prometheus 6 | access: proxy 7 | url: http://katana-prometheus:9090 8 | -------------------------------------------------------------------------------- /katana-grafana/templates/new_dashboard.json: -------------------------------------------------------------------------------- 1 | { 2 | "meta": { 3 | "type": "db", 4 | "canSave": true, 5 | "canEdit": true, 6 | "canAdmin": true, 7 | "canStar": true, 8 | "version": 1, 9 | "hasAcl": false, 10 | "isFolder": false, 11 | "folderId": 0, 12 | "folderTitle": "General", 13 | "folderUrl": "", 14 | "provisioned": false, 15 | "provisionedExternalId": "" 16 | }, 17 | "dashboard": { 18 | "annotations": { 19 | "list": [ 20 | { 21 | "builtIn": 1, 22 | "datasource": "-- Grafana --", 23 | "enable": true, 24 | "hide": true, 25 | "iconColor": "rgba(0, 211, 255, 1)", 26 | "name": "Annotations & Alerts", 27 | "type": "dashboard" 28 | } 29 | ] 30 | }, 31 | "editable": true, 32 | "gnetId": null, 33 | "graphTooltip": 0, 34 | "hideControls": false, 35 | "id": "katana", 36 | "links": [], 37 | "panels": [], 38 | "schemaVersion": 25, 39 | "style": "dark", 40 | "tags": [ 41 | "slice" 42 | ], 43 | "templating": { 44 | "list": [] 45 | }, 46 | "time": { 47 | "from": "now-5m", 48 | "to": "now" 49 | }, 50 | "timepicker": {}, 51 | "timezone": "", 52 | "title": "SLICE_ID", 53 | "uid": "SLICE_ID", 54 | "version": 1 55 | } 56 | } -------------------------------------------------------------------------------- /katana-grafana/templates/new_ns_status_panel.json: -------------------------------------------------------------------------------- 1 | { 2 | "datasource": "katana-prometheus", 3 | "fieldConfig": { 4 | "defaults": { 5 | "custom": { 6 | "align": null, 7 | "displayMode": "color-background" 8 | }, 9 | "thresholds": { 10 | "mode": "absolute", 11 | "steps": [ 12 | { 13 | "color": "purple", 14 | "value": null 15 | }, 16 | { 17 | "color": "green", 18 | "value": 1 19 | }, 20 | { 21 | "color": "red", 22 | "value": 2 23 | }, 24 | { 25 | "color": "red", 26 | "value": 3 27 | }, 28 | { 29 | "color": "yellow", 30 | "value": 4 31 | } 32 | ] 33 | }, 34 | "mappings": [ 35 | { 36 | "id": 0, 37 | "text": "Running", 38 | "type": 1, 39 | "value": "1" 40 | }, 41 | { 42 | "id": 1, 43 | "text": "Terminated", 44 | "type": 1, 45 | "value": "2" 46 | }, 47 | { 48 | "id": 2, 49 | "text": "Error", 50 | "type": 1, 51 | "value": "3" 52 | }, 53 | { 54 | "id": 3, 55 | "text": "Terminating", 56 | "type": 1, 57 | "value": "4" 58 | }, 59 | { 60 | "id": 4, 61 | "text": "AdminStopped", 62 | "type": 1, 63 | "value": "5" 64 | } 65 | ] 66 | }, 67 | "overrides": [] 68 | }, 69 | "gridPos": { 70 | "h": 8, 71 | "w": 12, 72 | "x": 0, 73 | "y": 0 74 | }, 75 | "id": 1, 76 | "options": { 77 | "showHeader": true 78 | }, 79 | "pluginVersion": "7.0.4", 80 | "targets": [], 81 | "timeFrom": null, 82 | "timeShift": null, 83 | "title": "Network Service Status", 84 | "transformations": [ 85 | { 86 | "id": "organize", 87 | "options": { 88 | "excludeByName": { 89 | "Time": true, 90 | "__name__": true, 91 | "instance": true, 92 | "job": true, 93 | "slice_id": true 94 | }, 95 | "indexByName": {}, 96 | "renameByName": { 97 | "Value": "Status", 98 | "ns_name": "NS Name" 99 | } 100 | } 101 | } 102 | ], 103 | "type": "table" 104 | } -------------------------------------------------------------------------------- /katana-grafana/templates/new_vm_monitoring_panel.json: -------------------------------------------------------------------------------- 1 | { 2 | "aliasColors": {}, 3 | "bars": false, 4 | "dashLength": 10, 5 | "dashes": false, 6 | "datasource": "katana-prometheus", 7 | "fieldConfig": { 8 | "defaults": { 9 | "custom": {} 10 | }, 11 | "overrides": [] 12 | }, 13 | "fill": 1, 14 | "fillGradient": 0, 15 | "gridPos": { 16 | "h": 8, 17 | "w": 12, 18 | "x": 0, 19 | "y": 9 20 | }, 21 | "hiddenSeries": false, 22 | "id": 0, 23 | "legend": { 24 | "avg": false, 25 | "current": false, 26 | "max": false, 27 | "min": false, 28 | "show": true, 29 | "total": false, 30 | "values": false 31 | }, 32 | "lines": true, 33 | "linewidth": 1, 34 | "nullPointMode": "null", 35 | "options": { 36 | "dataLinks": [] 37 | }, 38 | "percentage": false, 39 | "pointradius": 2, 40 | "points": false, 41 | "renderer": "flot", 42 | "seriesOverrides": [], 43 | "spaceLength": 10, 44 | "stack": false, 45 | "steppedLine": false, 46 | "targets": [], 47 | "thresholds": [], 48 | "timeFrom": null, 49 | "timeRegions": [], 50 | "timeShift": null, 51 | "title": "WIM Flows Metrics", 52 | "tooltip": { 53 | "shared": true, 54 | "sort": 0, 55 | "value_type": "individual" 56 | }, 57 | "type": "graph", 58 | "xaxis": { 59 | "buckets": null, 60 | "mode": "time", 61 | "name": null, 62 | "show": true, 63 | "values": [] 64 | }, 65 | "yaxes": [ 66 | { 67 | "format": "short", 68 | "label": null, 69 | "logBase": 1, 70 | "max": null, 71 | "min": null, 72 | "show": true 73 | }, 74 | { 75 | "format": "short", 76 | "label": null, 77 | "logBase": 1, 78 | "max": null, 79 | "min": null, 80 | "show": true 81 | } 82 | ], 83 | "yaxis": { 84 | "align": false, 85 | "alignLevel": null 86 | } 87 | } -------------------------------------------------------------------------------- /katana-grafana/templates/new_wim_panel.json: -------------------------------------------------------------------------------- 1 | { 2 | "aliasColors": {}, 3 | "bars": false, 4 | "dashLength": 10, 5 | "dashes": false, 6 | "datasource": "katana-prometheus", 7 | "fieldConfig": { 8 | "defaults": { 9 | "custom": {} 10 | }, 11 | "overrides": [] 12 | }, 13 | "fill": 1, 14 | "fillGradient": 0, 15 | "gridPos": { 16 | "h": 8, 17 | "w": 12, 18 | "x": 0, 19 | "y": 9 20 | }, 21 | "hiddenSeries": false, 22 | "id": 2, 23 | "legend": { 24 | "avg": false, 25 | "current": false, 26 | "max": false, 27 | "min": false, 28 | "show": true, 29 | "total": false, 30 | "values": false 31 | }, 32 | "lines": true, 33 | "linewidth": 1, 34 | "nullPointMode": "null", 35 | "options": { 36 | "dataLinks": [] 37 | }, 38 | "percentage": false, 39 | "pointradius": 2, 40 | "points": false, 41 | "renderer": "flot", 42 | "seriesOverrides": [], 43 | "spaceLength": 10, 44 | "stack": false, 45 | "steppedLine": false, 46 | "targets": [], 47 | "thresholds": [], 48 | "timeFrom": null, 49 | "timeRegions": [], 50 | "timeShift": null, 51 | "title": "WIM Flows Metrics", 52 | "tooltip": { 53 | "shared": true, 54 | "sort": 0, 55 | "value_type": "individual" 56 | }, 57 | "type": "graph", 58 | "xaxis": { 59 | "buckets": null, 60 | "mode": "time", 61 | "name": null, 62 | "show": true, 63 | "values": [] 64 | }, 65 | "yaxes": [ 66 | { 67 | "format": "short", 68 | "label": null, 69 | "logBase": 1, 70 | "max": null, 71 | "min": null, 72 | "show": true 73 | }, 74 | { 75 | "format": "short", 76 | "label": null, 77 | "logBase": 1, 78 | "max": null, 79 | "min": null, 80 | "show": true 81 | } 82 | ], 83 | "yaxis": { 84 | "align": false, 85 | "alignLevel": null 86 | } 87 | } -------------------------------------------------------------------------------- /katana-mngr/.env: -------------------------------------------------------------------------------- 1 | PYTHONWARNINGS=ignore:Unverified HTTPS request -------------------------------------------------------------------------------- /katana-mngr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7.4-slim 2 | 3 | RUN apt-get update && apt-get install -qq -y \ 4 | build-essential libpq-dev --no-install-recommends 5 | 6 | ENV INSTALL_PATH /katana-mngr 7 | ENV PYTHONPATH ${INSTALL_PATH} 8 | RUN mkdir -p $INSTALL_PATH 9 | 10 | WORKDIR $INSTALL_PATH 11 | 12 | COPY katana-grafana/templates /katana-grafana/templates 13 | 14 | COPY katana-mngr/. . 15 | RUN pip install --upgrade pip 16 | RUN pip install -r requirements.txt 17 | 18 | CMD python3 katana/katana-mngr.py -------------------------------------------------------------------------------- /katana-mngr/README.md: -------------------------------------------------------------------------------- 1 | # Katana Manager Service 2 | 3 | The core service of Katana SM, implements the Slice LCM operations. Container name: katana-mngr 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-mngr/katana/katana-mngr.py: -------------------------------------------------------------------------------- 1 | """ Katana Manager Base Application """ 2 | 3 | import logging 4 | import logging.handlers 5 | import uuid 6 | import time 7 | import pymongo 8 | 9 | from katana.shared_utils.kafkaUtils import kafkaUtils 10 | from katana.shared_utils.mongoUtils import mongoUtils 11 | from katana.utils.sliceUtils import sliceUtils 12 | 13 | 14 | # Logging Parameters 15 | logger = logging.getLogger(__name__) 16 | file_handler = logging.handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 17 | stream_handler = logging.StreamHandler() 18 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 19 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 20 | file_handler.setFormatter(formatter) 21 | stream_handler.setFormatter(stream_formatter) 22 | logger.setLevel(logging.DEBUG) 23 | logger.addHandler(file_handler) 24 | logger.addHandler(stream_handler) 25 | 26 | # Create Kafka topic 27 | kafkaUtils.create_topic("slice") 28 | 29 | # Create the Kafka Consumer 30 | consumer = kafkaUtils.create_consumer("slice") 31 | 32 | # Create the initial core location 33 | try: 34 | new_uuid = str(uuid.uuid4()) 35 | core_location_data = { 36 | "_id": new_uuid, 37 | "id": "core", 38 | "created_at": time.time(), 39 | "description": "The default Core location", 40 | "vims": [], 41 | "functions": [], 42 | } 43 | mongoUtils.add("location", core_location_data) 44 | except pymongo.errors.DuplicateKeyError: 45 | pass 46 | 47 | # Check for new messages 48 | if consumer: 49 | for message in consumer: 50 | logger.info("--- New Message ---") 51 | logger.info(f"Topic: {message.topic} | Partition: {message.partition} | Offset: {message.offset}") 52 | # Commit the latest received message 53 | consumer.commit() 54 | action = message.value["action"] 55 | # Add slice 56 | if action == "add": 57 | payload = message.value["message"] 58 | sliceUtils.add_slice(payload) 59 | # Delete slice 60 | elif action == "delete": 61 | payload = message.value["message"] 62 | force = message.value["force"] 63 | sliceUtils.delete_slice(slice_id=payload, force=force) 64 | # Update slice 65 | elif action == "update": 66 | slice_id = message.value["slice_id"] 67 | updates = message.value["updates"] 68 | sliceUtils.update_slice(nest_id=slice_id, updates=updates) 69 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/emsUtils/amar_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "/slice" 37 | url = ems_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = emsd 40 | r = None 41 | try: 42 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, emsd): 55 | """ 56 | Delete a configured radio slice 57 | """ 58 | logger.info("Deleting Radio Slice Configuration") 59 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/emsUtils/open5gs_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "/slice/" 37 | slice_id = emsd["slice_id"] 38 | url = ems_url + api_prefix + slice_id 39 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 40 | data = emsd 41 | r = None 42 | try: 43 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 44 | logger.info(r.json()) 45 | r.raise_for_status() 46 | except requests.exceptions.HTTPError as errh: 47 | logger.exception("Http Error:", errh) 48 | except requests.exceptions.ConnectionError as errc: 49 | logger.exception("Error Connecting:", errc) 50 | except requests.exceptions.Timeout as errt: 51 | logger.exception("Timeout Error:", errt) 52 | except requests.exceptions.RequestException as err: 53 | logger.exception("Error:", err) 54 | 55 | def del_slice(self, emsd): 56 | """ 57 | Delete a configured radio slice 58 | """ 59 | logger.info("Deleting Radio Slice Configuration") 60 | ems_url = self.url 61 | api_prefix = "/slice/" 62 | for iemsd in emsd: 63 | slice_id = iemsd["slice_id"] 64 | url = ems_url + api_prefix + slice_id 65 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 66 | r = None 67 | try: 68 | r = requests.delete(url, timeout=360, headers=headers) 69 | logger.info(r.json()) 70 | r.raise_for_status() 71 | except requests.exceptions.HTTPError as errh: 72 | logger.exception("Http Error:", errh) 73 | except requests.exceptions.ConnectionError as errc: 74 | logger.exception("Error Connecting:", errc) 75 | except requests.exceptions.Timeout as errt: 76 | logger.exception("Timeout Error:", errt) 77 | except requests.exceptions.RequestException as err: 78 | logger.exception("Error:", err) 79 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/emsUtils/test_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "" 37 | url = ems_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = emsd 40 | r = None 41 | try: 42 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, emsd): 55 | """ 56 | Delete a configured radio slice 57 | """ 58 | logger.info("Deleting Radio Slice Configuration") 59 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/kafkaUtils/kafkaUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | import time 5 | 6 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, admin, errors 7 | 8 | # Logging Parameters 9 | logger = logging.getLogger(__name__) 10 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 11 | stream_handler = logging.StreamHandler() 12 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | file_handler.setFormatter(formatter) 15 | stream_handler.setFormatter(stream_formatter) 16 | logger.setLevel(logging.DEBUG) 17 | logger.addHandler(file_handler) 18 | logger.addHandler(stream_handler) 19 | 20 | # NOTE: It is required to have global parameters for kafka objects 21 | consumer, producer, topic = None, None, None 22 | 23 | 24 | def create_consumer(topic_name, bootstrap_servers=None): 25 | global consumer 26 | 27 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 28 | # Create the kafka consumer 29 | tries = 30 30 | exit = False 31 | while not exit: 32 | try: 33 | consumer = KafkaConsumer( 34 | topic_name, 35 | bootstrap_servers=bootstrap_servers, 36 | auto_offset_reset="earliest", 37 | enable_auto_commit=True, 38 | auto_commit_interval_ms=10000, 39 | group_id="katana-mngr-group", 40 | value_deserializer=lambda m: json.loads(m.decode("utf-8")), 41 | ) 42 | except errors.NoBrokersAvailable as KafkaError: 43 | if tries > 0: 44 | tries -= 1 45 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 46 | time.sleep(5) 47 | else: 48 | logger.error(KafkaError) 49 | else: 50 | logger.info("New consumer") 51 | exit = True 52 | tries = 30 53 | return consumer 54 | 55 | 56 | def create_producer(bootstrap_servers=None): 57 | global producer 58 | 59 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 60 | # Create the kafka producer 61 | tries = 30 62 | exit = False 63 | while not exit: 64 | try: 65 | producer = KafkaProducer(bootstrap_servers=bootstrap_servers, value_serializer=lambda m: json.dumps(m).encode("utf-8"),) 66 | except errors.NoBrokersAvailable as KafkaError: 67 | if tries > 0: 68 | tries -= 1 69 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 70 | time.sleep(5) 71 | else: 72 | logger.error(KafkaError) 73 | else: 74 | logger.info("New producer") 75 | exit = True 76 | tries = 30 77 | return producer 78 | 79 | 80 | def create_topic(topic_name, bootstrap_servers=None): 81 | global topic 82 | 83 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 84 | # Create the kafka topic 85 | tries = 30 86 | exit = False 87 | while not exit: 88 | try: 89 | try: 90 | topic = admin.NewTopic(name=topic_name, num_partitions=1, replication_factor=1) 91 | broker = KafkaAdminClient(bootstrap_servers=bootstrap_servers) 92 | broker.create_topics([topic]) 93 | except errors.TopicAlreadyExistsError: 94 | logger.warning("Topic exists already") 95 | else: 96 | logger.info("New topic") 97 | except errors.NoBrokersAvailable as KafkaError: 98 | if tries > 0: 99 | tries -= 1 100 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 101 | time.sleep(5) 102 | else: 103 | logger.error(KafkaError) 104 | else: 105 | exit = True 106 | tries = 30 107 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/mongoUtils/README.md: -------------------------------------------------------------------------------- 1 | # Mongo Collections 2 | 3 | ## Platform Components 4 | * nfvo 5 | * vim 6 | * wim 7 | * ems 8 | 9 | ## Platform Components Binary Objects 10 | * nfvo 11 | * vim 12 | * wim 13 | * ems 14 | 15 | ## Slice Related 16 | * func 17 | * slice 18 | 19 | ## Network Services Related 20 | * nsd 21 | * vnfd -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/mongoUtils/mongoUtils.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient, ASCENDING 2 | 3 | 4 | client = MongoClient("mongodb://mongo") 5 | db = client.katana 6 | 7 | # Initialize all collections and create indexes 8 | db.vim.create_index([("id", ASCENDING)], unique=True) 9 | db.nfvo.create_index([("id", ASCENDING)], unique=True) 10 | db.wim.create_index([("id", ASCENDING)], unique=True) 11 | db.ems.create_index([("id", ASCENDING)], unique=True) 12 | db.policy.create_index([("id", ASCENDING)], unique=True) 13 | db.nsd.create_index([("nsd-id", ASCENDING)], unique=True) 14 | db.vnfd.create_index([("vnfd-id", ASCENDING)], unique=True) 15 | db.func.create_index([("id", ASCENDING)], unique=True) 16 | db.location.create_index([("id", ASCENDING)], unique=True) 17 | 18 | 19 | def index(collection_name): 20 | collection = db[collection_name] 21 | return collection.find({}) 22 | 23 | 24 | def get(collection_name, uuid): 25 | collection = db[collection_name] 26 | return collection.find_one({"_id": uuid}) 27 | 28 | 29 | def add(collection_name, json_data): 30 | collection = db[collection_name] 31 | return collection.insert_one(json_data).inserted_id 32 | 33 | 34 | def add_many(collection_name, list_data): 35 | collection = db[collection_name] 36 | return collection.insert_many(list_data).inserted_ids 37 | 38 | 39 | def delete(collection_name, uuid): 40 | result = db[collection_name].delete_one({"_id": uuid}).deleted_count 41 | return result 42 | 43 | 44 | def update(collection_name, uuid, json_data): 45 | collection = db[collection_name] 46 | return collection.replace_one({"_id": uuid}, json_data).modified_count 47 | 48 | 49 | def count(collection_name): 50 | collection = db[collection_name] 51 | return collection.count_documents({}) 52 | 53 | 54 | def find(collection_name, data={}): 55 | collection = db[collection_name] 56 | return collection.find_one(data) 57 | 58 | 59 | def find_all(collection_name, data={}): 60 | collection = db[collection_name] 61 | return collection.find(data) 62 | 63 | 64 | def delete_all(collection_name, data={}): 65 | collection = db[collection_name] 66 | return collection.delete_many(data) 67 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/policyUtils/neatUtils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | from logging import handlers 5 | 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Policy: 21 | """ 22 | Class implementing the communication API with Policy System 23 | """ 24 | 25 | def __init__(self, url, id): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def send_nest(self, data): 32 | """ 33 | Send the nest parameters 34 | """ 35 | api_prefix = "" 36 | url = self.url + api_prefix 37 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 38 | r = None 39 | try: 40 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 41 | logger.info(r.json()) 42 | r.raise_for_status() 43 | except requests.exceptions.HTTPError as errh: 44 | logger.exception("Http Error:", errh) 45 | except requests.exceptions.ConnectionError as errc: 46 | logger.exception("Error Connecting:", errc) 47 | except requests.exceptions.Timeout as errt: 48 | logger.exception("Timeout Error:", errt) 49 | except requests.exceptions.RequestException as err: 50 | logger.exception("Error:", err) 51 | 52 | def notify(self, alert_type, slice_id, status): 53 | """ 54 | Notify NEAT policy engine 55 | """ 56 | neat_message = { 57 | "slice_id": slice_id, 58 | "type": alert_type, 59 | "value": status, 60 | "ttl": -1, 61 | } 62 | neat_url = self.url + "/event" 63 | neat_headers = {"Content-type": "Application/JSON"} 64 | r = requests.put(neat_url, json=json.loads(json.dumps(neat_message)), headers=neat_headers) 65 | logger.info(f"Notifying NEAT, message: {neat_message}") 66 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/policyUtils/test_policyUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Policy: 21 | """ 22 | Class implementing the communication API with Policy System 23 | """ 24 | 25 | def __init__(self, url, id): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def send_nest(self, data): 32 | """ 33 | Send the nest parameters 34 | """ 35 | api_prefix = "" 36 | url = self.url + api_prefix 37 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 38 | r = None 39 | try: 40 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 41 | logger.info(r.json()) 42 | r.raise_for_status() 43 | except requests.exceptions.HTTPError as errh: 44 | logger.exception("Http Error:", errh) 45 | except requests.exceptions.ConnectionError as errc: 46 | logger.exception("Error Connecting:", errc) 47 | except requests.exceptions.Timeout as errt: 48 | logger.exception("Timeout Error:", errt) 49 | except requests.exceptions.RequestException as err: 50 | logger.exception("Error:", err) 51 | 52 | def del_slice(self, data): 53 | """ 54 | Delete a configured radio slice 55 | """ 56 | logger.info("Deleting Radio Slice Configuration") 57 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/sliceUtils/sliceUtils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pickle 3 | import os 4 | from logging import handlers 5 | 6 | from katana.shared_utils.mongoUtils import mongoUtils 7 | from katana.shared_utils.kafkaUtils.kafkaUtils import create_producer 8 | 9 | # Logging Parameters 10 | logger = logging.getLogger(__name__) 11 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 12 | stream_handler = logging.StreamHandler() 13 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | file_handler.setFormatter(formatter) 16 | stream_handler.setFormatter(stream_formatter) 17 | logger.setLevel(logging.DEBUG) 18 | logger.addHandler(file_handler) 19 | logger.addHandler(stream_handler) 20 | 21 | 22 | def check_runtime_errors(nest): 23 | """ 24 | Function that checks about slice runtime errors and updates the slice status 25 | """ 26 | 27 | slice_id = nest["_id"] 28 | if nest["runtime_errors"]: 29 | nest_status = "runtime_error" 30 | else: 31 | nest_status = "Running" 32 | # Notify NEAT 33 | isapex = os.getenv("APEX", None) 34 | if isapex: 35 | neat_list = mongoUtils.find_all("policy", {"type": "neat"}) 36 | for ineat in neat_list: 37 | # Get the NEAT object 38 | neat_obj = pickle.loads(mongoUtils.get("policy_obj", ineat["_id"])["obj"]) 39 | neat_obj.notify(alert_type="FailingNS", slice_id=slice_id, status=False) 40 | nest["status"] = nest_status 41 | mongoUtils.update("slice", slice_id, nest) 42 | # Update monitoring status 43 | if nest["slice_monitoring"]: 44 | mon_producer = create_producer() 45 | mon_producer.send( 46 | "nfv_mon", 47 | value={ 48 | "action": "katana_mon", 49 | "slice_info": {"slice_id": nest["_id"], "status": nest_status}, 50 | }, 51 | ) 52 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/vimUtils/kubernetesUtils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | from logging import handlers 4 | from multiprocessing import Process 5 | 6 | import kubernetes 7 | 8 | # Logging Parameters 9 | logger = logging.getLogger(__name__) 10 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 11 | stream_handler = logging.StreamHandler() 12 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | file_handler.setFormatter(formatter) 15 | stream_handler.setFormatter(stream_formatter) 16 | logger.setLevel(logging.DEBUG) 17 | logger.addHandler(file_handler) 18 | logger.addHandler(stream_handler) 19 | 20 | 21 | def timeout(func): 22 | """ 23 | Wrapper for function, terminate after 5 seconds 24 | """ 25 | 26 | @functools.wraps(func) 27 | def wrapper(*args, **kwargs): 28 | action = Process(target=func, args=args, kwargs=kwargs) 29 | action.start() 30 | action.join(timeout=5) 31 | if action.is_alive(): 32 | # terminate function 33 | action.terminate() 34 | # clean up 35 | action.join() 36 | raise (TimeoutError) 37 | # if process is not 0, is not succesfull 38 | if action.exitcode != 0: 39 | # raise Attirbute Error, which is the most probable 40 | raise (AttributeError) 41 | 42 | return wrapper 43 | 44 | class Kubernetes: 45 | """ 46 | Class implementing the communication API with Kubernetes 47 | """ -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/wimUtils/odl_wimUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | from katana.shared_utils.kafkaUtils import kafkaUtils 8 | 9 | # Logging Parameters 10 | logger = logging.getLogger(__name__) 11 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 12 | stream_handler = logging.StreamHandler() 13 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | file_handler.setFormatter(formatter) 16 | stream_handler.setFormatter(stream_formatter) 17 | logger.setLevel(logging.DEBUG) 18 | logger.addHandler(file_handler) 19 | logger.addHandler(stream_handler) 20 | 21 | 22 | class Wim: 23 | """ 24 | Class implementing the communication API with WIM 25 | """ 26 | 27 | def __init__(self, url): 28 | """ 29 | Initialize an object of the class 30 | """ 31 | self.url = url 32 | 33 | def create_slice(self, wsd): 34 | """ 35 | Create the transport network slice 36 | """ 37 | wim_message = {"action": "create", "data": wsd} 38 | 39 | # Create the kafka producer 40 | bootstrap_servers = [f"{self.url}:9092"] 41 | producer = kafkaUtils.create_producer(bootstrap_servers=bootstrap_servers) 42 | producer.send("wan-slice", value=wim_message) 43 | logger.info("Sent WAN Slice Creation request to WIM") 44 | 45 | def del_slice(self, slice_id): 46 | """ 47 | Delete the transport network slice 48 | """ 49 | wim_message = {"action": "terminate", "data": slice_id} 50 | bootstrap_servers = [f"{self.url}:9092"] 51 | producer = kafkaUtils.create_producer(bootstrap_servers=bootstrap_servers) 52 | producer.send("wan-slice", value=wim_message) 53 | logger.info("Sent WAN Slice Termination request to WIM") 54 | -------------------------------------------------------------------------------- /katana-mngr/katana/shared_utils/wimUtils/test_wimUtils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | 5 | from logging import handlers 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Wim: 21 | """ 22 | Class implementing the communication API with WIM 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def create_slice(self, wsd): 32 | """ 33 | Create the transport network slice 34 | """ 35 | wim_url = self.url 36 | api_prefix = "" 37 | url = wim_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = wsd 40 | r = None 41 | try: 42 | r = requests.post(url, headers=headers, json=json.loads(json.dumps(data)), timeout=10) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, wsd): 55 | """ 56 | Delete the transport network slice 57 | """ 58 | logger.info("Deleting Transport Network Slice") 59 | -------------------------------------------------------------------------------- /katana-mngr/katana/utils/README.md: -------------------------------------------------------------------------------- 1 | # In this directory will Utility files used by katana-mngr 2 | -------------------------------------------------------------------------------- /katana-mngr/requirements.txt: -------------------------------------------------------------------------------- 1 | # PyMongo 2 | pymongo==3.7.2 3 | 4 | # Kafka 5 | kafka-python==1.4.7 6 | 7 | # Requests 8 | requests 9 | 10 | openstacksdk==0.23.0 11 | 12 | #PyONE - OpenNebula 13 | pyone==5.8.2 14 | -------------------------------------------------------------------------------- /katana-nbi/.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .dockerignore -------------------------------------------------------------------------------- /katana-nbi/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7.4-slim 2 | 3 | RUN apt-get update && apt-get install -qq -y \ 4 | build-essential libpq-dev --no-install-recommends 5 | 6 | ENV INSTALL_PATH /katana-nbi 7 | RUN mkdir -p $INSTALL_PATH 8 | 9 | WORKDIR $INSTALL_PATH 10 | 11 | COPY katana-prometheus/wim_targets.json /targets/wim_targets.json 12 | COPY katana-prometheus/vim_targets.json /targets/vim_targets.json 13 | 14 | COPY katana-nbi/. . 15 | RUN pip install --upgrade pip 16 | RUN pip install -r requirements.txt 17 | 18 | CMD gunicorn -b 0.0.0.0:8000 --access-logfile - --reload "katana.app:create_app()" -------------------------------------------------------------------------------- /katana-nbi/README.md: -------------------------------------------------------------------------------- 1 | # Katana NBI 2 | 3 | Service that implements the NBI of Katana Slice Manager. Container name: katana-nbi 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-nbi/config/settings.py: -------------------------------------------------------------------------------- 1 | DEBUG = True 2 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/alerts.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from logging import handlers 4 | from flask import request 5 | from flask_classful import FlaskView 6 | from time import sleep 7 | 8 | from katana.shared_utils.mongoUtils import mongoUtils 9 | from katana.shared_utils.sliceUtils.sliceUtils import check_runtime_errors 10 | from katana.shared_utils.kafkaUtils.kafkaUtils import create_producer 11 | 12 | # Logging Parameters 13 | logger = logging.getLogger(__name__) 14 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 15 | stream_handler = logging.StreamHandler() 16 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 17 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 18 | file_handler.setFormatter(formatter) 19 | stream_handler.setFormatter(stream_formatter) 20 | logger.setLevel(logging.DEBUG) 21 | logger.addHandler(file_handler) 22 | logger.addHandler(stream_handler) 23 | 24 | 25 | class AlertView(FlaskView): 26 | route_prefix = "/api/" 27 | 28 | def post(self): 29 | """ 30 | Get a new alert 31 | """ 32 | alert_message = request.json 33 | # Check the alert type 34 | for ialert in alert_message["alerts"]: 35 | if ialert["labels"]["alertname"] == "NSFailing": 36 | ns_id = ialert["labels"]["ns_name"].split("__")[1].replace("_", "-") 37 | location = ialert["labels"]["ns_name"].split("__")[2] 38 | slice_id = ialert["labels"]["slice_id"] 39 | logger.warning( 40 | f"Failing Network Service {ns_id} in {location} for slice {slice_id}" 41 | ) 42 | # Update the NEST 43 | nest = mongoUtils.get("slice", slice_id) 44 | try: 45 | nest["ns_inst_info"][ns_id][location]["status"] = "Error" 46 | except KeyError: 47 | pass 48 | # Add the error to the runtime errors 49 | ns_errors = nest["runtime_errors"].get("ns", []) 50 | ns_errors.append(ns_id) 51 | nest["runtime_errors"]["ns"] = ns_errors 52 | check_runtime_errors(nest) 53 | # Notify APEX 54 | isapex = os.getenv("APEX", None) 55 | if isapex: 56 | apex_message = { 57 | "name": "SMAlert", 58 | "nameSpace": "sm.alert.manager.events", 59 | "version": "0.0.1", 60 | "source": "SMAlertManager", 61 | "target": "APEX", 62 | "sliceId": slice_id, 63 | "alertType": "FailingNS", 64 | "alertMessage": {"NS_ID": ns_id, "NSD_ID": location, "status": "down"}, 65 | } 66 | sleep(10) 67 | apex_producer = create_producer() 68 | logger.info(f"Sending alert to APEX {apex_message}") 69 | apex_producer.send("apex-in-0", value=apex_message) 70 | return "Alert received", 200 71 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/bootstrap.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | import requests 4 | import json 5 | from logging import handlers 6 | 7 | from flask import request 8 | from flask_classful import FlaskView 9 | 10 | # Logging Parameters 11 | logger = logging.getLogger(__name__) 12 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 13 | stream_handler = logging.StreamHandler() 14 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 16 | file_handler.setFormatter(formatter) 17 | stream_handler.setFormatter(stream_formatter) 18 | logger.setLevel(logging.DEBUG) 19 | logger.addHandler(file_handler) 20 | logger.addHandler(stream_handler) 21 | 22 | 23 | class BootstrapView(FlaskView): 24 | route_prefix = "/api/" 25 | 26 | def post(self): 27 | """ 28 | Add a new configuration file to the SM. 29 | used by: `katana bootstrap -f [file]` 30 | """ 31 | data_fields = ["vim", "nfvo", "ems", "wim", "function"] 32 | data = request.json 33 | for field in data_fields: 34 | component_list = data.get(field, []) 35 | for component_data in component_list: 36 | url = f"http://localhost:8000/api/{field}" 37 | r = None 38 | try: 39 | r = requests.post(url, json=component_data, timeout=30) 40 | r.raise_for_status() 41 | logger.info(r.text) 42 | except requests.exceptions.HTTPError as errh: 43 | print("Http Error:", errh) 44 | logger.info(r.text) 45 | except requests.exceptions.ConnectionError as errc: 46 | print("Error Connecting:", errc) 47 | except requests.exceptions.Timeout as errt: 48 | print("Timeout Error:", errt) 49 | except requests.exceptions.RequestException as err: 50 | print("Error:", err) 51 | return "Succesfully configured SM", 201 52 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/gst.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | from logging import handlers 4 | 5 | from bson.json_util import dumps 6 | from flask_classful import FlaskView 7 | 8 | from katana.shared_utils.mongoUtils import mongoUtils 9 | 10 | # Logging Parameters 11 | logger = logging.getLogger(__name__) 12 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 13 | stream_handler = logging.StreamHandler() 14 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 16 | file_handler.setFormatter(formatter) 17 | stream_handler.setFormatter(stream_formatter) 18 | logger.setLevel(logging.DEBUG) 19 | logger.addHandler(file_handler) 20 | logger.addHandler(stream_handler) 21 | 22 | 23 | class GstView(FlaskView): 24 | route_prefix = "/api/" 25 | 26 | def index(self): 27 | """ 28 | Returns a list of GST and their details, 29 | used by: `katana gst ls` 30 | """ 31 | gst_data = mongoUtils.index("gst") 32 | return_data = [] 33 | for gst in gst_data: 34 | return_data.append(dict(_id=gst["_id"])) 35 | return dumps(return_data), 200 36 | 37 | def get(self, uuid): 38 | """ 39 | Returns the details of specific GST, 40 | used by: `katana gst inspect [uuid]` 41 | """ 42 | data = mongoUtils.get("gst", uuid) 43 | if data: 44 | return dumps(data), 200 45 | else: 46 | return "Not Found", 404 47 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/locations.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from logging import handlers 3 | import time 4 | import uuid 5 | import pymongo 6 | 7 | from bson.json_util import dumps 8 | from flask.globals import request 9 | from flask_classful import FlaskView 10 | 11 | from katana.shared_utils.mongoUtils import mongoUtils 12 | 13 | # Logging Parameters 14 | logger = logging.getLogger(__name__) 15 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 16 | stream_handler = logging.StreamHandler() 17 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 18 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 19 | file_handler.setFormatter(formatter) 20 | stream_handler.setFormatter(stream_formatter) 21 | logger.setLevel(logging.DEBUG) 22 | logger.addHandler(file_handler) 23 | logger.addHandler(stream_handler) 24 | 25 | 26 | class LocationView(FlaskView): 27 | route_prefix = "/api/" 28 | req_fields = ["id"] 29 | 30 | def index(self): 31 | """ 32 | Returns a list of the registered platform locations that will be covered by Katana 33 | used by `katana location ls` 34 | """ 35 | location_data = mongoUtils.index("location") 36 | return dumps(location_data), 200 37 | 38 | def get(self, uuid): 39 | """ 40 | Returns the details of a specific platform location 41 | used by: `katana location inspect [uuid]` 42 | """ 43 | data = mongoUtils.get("location", uuid) 44 | if data: 45 | return dumps(data), 200 46 | else: 47 | return f"Location {uuid} not found", 404 48 | 49 | def post(self): 50 | """ 51 | Register a new platform location 52 | used by: `katana location add -f [file]` 53 | """ 54 | # Generate a new uuid 55 | new_uuid = str(uuid.uuid4()) 56 | request.json["_id"] = new_uuid 57 | request.json["created_at"] = time.time() # unix epoch 58 | request.json["vims"] = [] 59 | request.json["functions"] = [] 60 | for ifield in self.req_fields: 61 | if not request.json.get(ifield, None): 62 | return f"Field {ifield} is missing" 63 | else: 64 | # Lowercase the location 65 | request.json["id"] = request.json["id"].lower() 66 | try: 67 | new_uuid = mongoUtils.add("location", request.json) 68 | except pymongo.errors.DuplicateKeyError: 69 | return (f"Location {request.json['id']} is already registered", 400) 70 | return new_uuid, 201 71 | 72 | def delete(self, uuid): 73 | """ 74 | Delete a registered platform location 75 | used by: `katana location rm [uuid] 76 | """ 77 | del_location = mongoUtils.get("location", uuid) 78 | if del_location["id"] == "core": 79 | return "You cannot delete core location", 400 80 | if del_location: 81 | if del_location["vims"] or del_location["functions"]: 82 | return ( 83 | f"Location {uuid} is in use by another component, cannot update it", 84 | 400, 85 | ) 86 | del_location = mongoUtils.delete("location", uuid) 87 | return f"Deleted location {uuid}", 200 88 | else: 89 | return f"Error: No such location {uuid}", 404 90 | 91 | def put(self, uuid): 92 | """ 93 | Update a registered platform location 94 | used by: `katana location update [uuid] -f [file]` 95 | """ 96 | for ifield in self.req_fields: 97 | if not request.json.get(ifield, None): 98 | return f"Field {ifield} is missing" 99 | else: 100 | # Lowercase the location 101 | request.json["id"] = request.json["id"].lower() 102 | data = request.json 103 | data["_id"] = uuid 104 | old_data = mongoUtils.get("location", uuid) 105 | if old_data: 106 | if old_data["vims"] or old_data["functions"]: 107 | return ( 108 | f"Location {data['_id']} is in use by another component, cannot update it", 109 | 400, 110 | ) 111 | data["created_at"] = old_data["created_at"] 112 | data["vims"] = [] 113 | data["functions"] = [] 114 | mongoUtils.update("location", uuid, data) 115 | return f"Modified location {data['id']}", 200 116 | else: 117 | data["created_at"] = time.time() # unix epoch 118 | data["vims"] = [] 119 | data["functions"] = [] 120 | new_uuid = mongoUtils.add("location", request.json) 121 | return new_uuid, 201 122 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/nslist.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | from logging import handlers 4 | import pickle 5 | 6 | from bson.json_util import dumps 7 | from flask_classful import FlaskView 8 | from flask import request 9 | 10 | from katana.shared_utils.mongoUtils import mongoUtils 11 | from katana.shared_utils.nfvoUtils import osmUtils 12 | 13 | # Logging Parameters 14 | logger = logging.getLogger(__name__) 15 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 16 | stream_handler = logging.StreamHandler() 17 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 18 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 19 | file_handler.setFormatter(formatter) 20 | stream_handler.setFormatter(stream_formatter) 21 | logger.setLevel(logging.DEBUG) 22 | logger.addHandler(file_handler) 23 | logger.addHandler(stream_handler) 24 | 25 | 26 | class NslistView(FlaskView): 27 | route_prefix = "/api/" 28 | 29 | def get(self): 30 | """ 31 | Returns a list with all the onboarded nsds, 32 | used by: `katana ns ls` 33 | """ 34 | 35 | # Bootstrap the NFVO 36 | nfvo_obj_list = list(mongoUtils.find_all("nfvo_obj")) 37 | for infvo in nfvo_obj_list: 38 | nfvo = pickle.loads(infvo["obj"]) 39 | nfvo.bootstrapNfvo() 40 | 41 | nsd_id = request.args.get("nsd-id", None) 42 | nfvo_id = request.args.get("nfvo-id", None) 43 | search_params = {} 44 | if nsd_id: 45 | search_params["nsd-id"] = nsd_id 46 | if nfvo_id: 47 | search_params["nfvo_id"] = nfvo_id 48 | 49 | # Return the list 50 | ns_list = mongoUtils.find_all("nsd", search_params) 51 | return dumps(ns_list), 200 52 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/resource.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | from logging import handlers 4 | import pickle 5 | from threading import Thread 6 | 7 | from bson.json_util import dumps 8 | from flask_classful import FlaskView, route 9 | 10 | from katana.shared_utils.mongoUtils import mongoUtils 11 | 12 | # Logging Parameters 13 | logger = logging.getLogger(__name__) 14 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 15 | stream_handler = logging.StreamHandler() 16 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 17 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 18 | file_handler.setFormatter(formatter) 19 | stream_handler.setFormatter(stream_formatter) 20 | logger.setLevel(logging.DEBUG) 21 | logger.addHandler(file_handler) 22 | logger.addHandler(stream_handler) 23 | 24 | 25 | def get_vims(filter_data=None): 26 | """ 27 | Return the list of available VIMs 28 | """ 29 | vims = [] 30 | for vim in mongoUtils.find_all("vim", data=filter_data): 31 | vims.append( 32 | { 33 | "name": vim["name"], 34 | "id": vim["id"], 35 | "location": vim["location"], 36 | "type": vim["type"], 37 | "tenants": vim["tenants"], 38 | "resources": vim["resources"], 39 | } 40 | ) 41 | return vims 42 | 43 | 44 | def get_func(filter_data={}): 45 | """ 46 | Return the list of available Network Functions 47 | """ 48 | filter_data["type"] = 1 49 | data = mongoUtils.find_all("func", data=filter_data) 50 | functions = [] 51 | for iserv in data: 52 | functions.append( 53 | dict( 54 | DB_ID=iserv["_id"], 55 | gen=(lambda x: "4G" if x == 4 else "5G")(iserv["gen"]), 56 | functionality=(lambda x: "Core" if x == 0 else "Radio")(iserv["func"]), 57 | pnf_list=iserv.get("pnf_list", []), 58 | function_id=iserv["id"], 59 | location=iserv["location"], 60 | tenants=iserv["tenants"], 61 | shared=iserv["shared"], 62 | created_at=iserv["created_at"], 63 | ) 64 | ) 65 | return functions 66 | 67 | 68 | def vim_update(): 69 | """ 70 | Gets the resources of the stored VIMs 71 | """ 72 | for vim in mongoUtils.find_all("vim"): 73 | if vim["type"] == "openstack": 74 | vim_obj = pickle.loads(mongoUtils.get("vim_obj", vim["_id"])["obj"]) 75 | resources = vim_obj.get_resources() 76 | vim["resources"] = resources 77 | mongoUtils.update("vim", vim["_id"], vim) 78 | else: 79 | resources = "N/A" 80 | 81 | 82 | class ResourcesView(FlaskView): 83 | route_prefix = "/api/" 84 | 85 | def index(self): 86 | """ 87 | Returns the available resources on platform, 88 | used by: `katana resource ls` 89 | """ 90 | # Get VIMs 91 | vims = get_vims() 92 | # Get Functions 93 | functions = get_func() 94 | # Get locations 95 | locations = mongoUtils.find_all("location") 96 | resources = {"VIMs": vims, "Functions": functions, "Locations": locations} 97 | return dumps(resources), 200 98 | 99 | def get(self, uuid): 100 | """ 101 | Returns the available resources on platform, 102 | used by: `katana resource location ` 103 | """ 104 | location_id = uuid.lower() 105 | # Check if the location exists 106 | if not mongoUtils.find("location", {"id": location_id}): 107 | return f"Location {uuid} not found", 404 108 | # Get VIMs 109 | filter_data = {"location": location_id} 110 | vims = get_vims(filter_data) 111 | # Get Functions 112 | functions = get_func(filter_data) 113 | resources = {"VIMs": vims, "Functions": functions} 114 | return dumps(resources), 200 115 | 116 | @route("/update", methods=["GET", "POST"]) 117 | def update(self): 118 | """ 119 | Update the resource database for the stored VIMs 120 | """ 121 | thread = Thread(target=vim_update) 122 | thread.start() 123 | return "Updating resource database", 200 124 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/slice.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from logging import handlers 3 | import uuid 4 | 5 | from bson.json_util import dumps 6 | from flask import request 7 | from flask_classful import FlaskView, route 8 | import urllib3 9 | 10 | from katana.shared_utils.kafkaUtils import kafkaUtils 11 | from katana.shared_utils.mongoUtils import mongoUtils 12 | from katana.slice_mapping import slice_mapping 13 | 14 | # Logging Parameters 15 | logger = logging.getLogger(__name__) 16 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 17 | stream_handler = logging.StreamHandler() 18 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 19 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 20 | file_handler.setFormatter(formatter) 21 | stream_handler.setFormatter(stream_formatter) 22 | logger.setLevel(logging.DEBUG) 23 | logger.addHandler(file_handler) 24 | logger.addHandler(stream_handler) 25 | 26 | 27 | class SliceView(FlaskView): 28 | """ 29 | Returns a list of slices and their details, 30 | used by: `katana slice ls` 31 | """ 32 | 33 | urllib3.disable_warnings() 34 | route_prefix = "/api/" 35 | 36 | def index(self): 37 | """ 38 | Returns a list of slices and their details, 39 | used by: `katana slice ls` 40 | """ 41 | slice_data = mongoUtils.index("slice") 42 | return_data = [] 43 | for islice in slice_data: 44 | return_data.append(dict(_id=islice["_id"], name=islice["slice_name"], created_at=islice["created_at"], status=islice["status"],)) 45 | return dumps(return_data), 200 46 | 47 | def get(self, uuid): 48 | """ 49 | Returns the details of specific slice, 50 | used by: `katana slice inspect [uuid]` 51 | """ 52 | data = mongoUtils.get("slice", uuid) 53 | if data: 54 | return dumps(data), 200 55 | else: 56 | return "Not Found", 404 57 | 58 | @route("//time") 59 | def show_time(self, uuid): 60 | """ 61 | Returns deployment time of a slice 62 | """ 63 | islice = mongoUtils.get("slice", uuid) 64 | if islice: 65 | return dumps(islice["deployment_time"]), 200 66 | else: 67 | return "Not Found", 404 68 | 69 | @route("//modify", methods=["POST"]) 70 | def modify(self, uuid): 71 | """ 72 | Update the details of a specific slice. 73 | used by: `katana slice modify -f [file] [uuid]` 74 | """ 75 | result = mongoUtils.get("slice", uuid) 76 | if not result: 77 | return f"Error: No such slice: {uuid}", 404 78 | # Send the message to katana-mngr 79 | producer = kafkaUtils.create_producer() 80 | slice_message = {"action": "update", "slice_id": uuid, "updates": request.json} 81 | producer.send("slice", value=slice_message) 82 | return f"Updating {uuid}", 200 83 | 84 | def post(self): 85 | """ 86 | Add a new slice. The request must provide the slice details. 87 | used by: `katana slice add -f [file]` 88 | """ 89 | new_uuid = str(uuid.uuid4()) 90 | request.json["_id"] = new_uuid 91 | # Get the NEST from the Slice Mapping process 92 | nest, error_code = slice_mapping.nest_mapping(request.json) 93 | 94 | if error_code: 95 | return nest, error_code 96 | 97 | # Send the message to katana-mngr 98 | producer = kafkaUtils.create_producer() 99 | slice_message = {"action": "add", "message": nest} 100 | producer.send("slice", value=slice_message) 101 | 102 | return new_uuid, 201 103 | 104 | def delete(self, uuid): 105 | """ 106 | Delete a specific slice. 107 | used by: `katana slice rm [uuid]` 108 | """ 109 | 110 | # Check if slice uuid exists 111 | delete_json = mongoUtils.get("slice", uuid) 112 | try: 113 | force = request.args["force"] 114 | except KeyError: 115 | force = None 116 | else: 117 | force = force if force == "true" else None 118 | 119 | if not delete_json: 120 | return f"Error: No such slice: {uuid}", 404 121 | else: 122 | # Send the message to katana-mngr 123 | producer = kafkaUtils.create_producer() 124 | slice_message = {"action": "delete", "message": uuid, "force": force} 125 | producer.send("slice", value=slice_message) 126 | return f"Deleting {uuid}", 200 127 | 128 | @route("/errors") 129 | def show_errors(self, uuid): 130 | """ 131 | Display the runitime errors of a slice 132 | """ 133 | data = mongoUtils.get("slice", uuid) 134 | if data: 135 | runtime_errors = data["runtime_errors"] 136 | return dumps(runtime_errors), 200 137 | else: 138 | return "Slice not found", 404 139 | -------------------------------------------------------------------------------- /katana-nbi/katana/api/slice_des.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import logging 3 | from logging import handlers 4 | import uuid 5 | 6 | from bson.json_util import dumps 7 | from flask import request 8 | from flask_classful import FlaskView 9 | 10 | from katana.shared_utils.mongoUtils import mongoUtils 11 | 12 | # Logging Parameters 13 | logger = logging.getLogger(__name__) 14 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 15 | stream_handler = logging.StreamHandler() 16 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 17 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 18 | file_handler.setFormatter(formatter) 19 | stream_handler.setFormatter(stream_formatter) 20 | logger.setLevel(logging.DEBUG) 21 | logger.addHandler(file_handler) 22 | logger.addHandler(stream_handler) 23 | 24 | 25 | class Base_slice_desView(FlaskView): 26 | route_prefix = "/api/" 27 | 28 | def index(self): 29 | """ 30 | Returns a list of Slice Descriptors and their details, 31 | used by: `katana slice_des ls` 32 | """ 33 | slice_des_data = mongoUtils.index("base_slice_des_ref") 34 | return_data = [] 35 | for islicedes in slice_des_data: 36 | return_data.append(dict(_id=islicedes["_id"], base_slice_des_id=islicedes["base_slice_des_id"])) 37 | return dumps(return_data), 200 38 | 39 | def post(self): 40 | """ 41 | Add a new base slice descriptor. The request must provide the base 42 | slice descriptor details. Used by: `katana slice_des add -f [file]` 43 | """ 44 | new_uuid = str(uuid.uuid4()) 45 | data = request.json 46 | data["_id"] = new_uuid 47 | return str(mongoUtils.add("base_slice_des_ref", data)), 201 48 | 49 | def get(self, uuid): 50 | """ 51 | Returns the details of specific Slice Descriptor, 52 | used by: `katana slice_des inspect [uuid]` 53 | """ 54 | data = mongoUtils.get("base_slice_des_ref", uuid) 55 | if data: 56 | return dumps(data), 200 57 | else: 58 | return "Not Found", 404 59 | 60 | def put(self, uuid): 61 | """ 62 | Add or update a new base slice descriptor. 63 | The request must provide the service details. 64 | used by: `katana slice_des update -f [file]` 65 | """ 66 | data = request.json 67 | data["_id"] = uuid 68 | old_data = mongoUtils.get("base_slice_des_ref", uuid) 69 | 70 | if old_data: 71 | mongoUtils.update("base_slice_des_ref", uuid, data) 72 | return f"Modified {uuid}", 200 73 | else: 74 | new_uuid = uuid 75 | data = request.json 76 | data["_id"] = new_uuid 77 | return str(mongoUtils.add("base_slice_des_ref", data)), 201 78 | 79 | def delete(self, uuid): 80 | """ 81 | Delete a specific Slice Descriptor. 82 | used by: `katana slice_des rm [uuid]` 83 | """ 84 | result = mongoUtils.delete("base_slice_des_ref", uuid) 85 | if result: 86 | return f"Deleted Slice Descriptor {uuid}", 200 87 | else: 88 | # if uuid is not found, return error 89 | return f"Error: No such Slice Descriptor: {uuid}", 404 90 | -------------------------------------------------------------------------------- /katana-nbi/katana/app.py: -------------------------------------------------------------------------------- 1 | """ Katana North-Bound Interface - Implemented with Flask""" 2 | 3 | # -*- coding: utf-8 -*- 4 | from flask import Flask 5 | from flask_cors import CORS 6 | 7 | from katana.api.ems import EmsView 8 | from katana.api.function import FunctionView 9 | from katana.api.gst import GstView 10 | from katana.api.nfvo import NFVOView 11 | from katana.api.nslist import NslistView 12 | from katana.api.policy import PolicyView 13 | from katana.api.resource import ResourcesView 14 | from katana.api.slice import SliceView 15 | from katana.api.slice_des import Base_slice_desView 16 | from katana.api.vim import VimView 17 | from katana.api.wim import WimView 18 | from katana.api.bootstrap import BootstrapView 19 | from katana.api.locations import LocationView 20 | from katana.api.alerts import AlertView 21 | 22 | 23 | def create_app(): 24 | """ 25 | Create a Flask application using the app factory pattern. 26 | 27 | :return: Flask app 28 | """ 29 | app = Flask(__name__, instance_relative_config=True) 30 | 31 | # Enable CORS for the app 32 | CORS(app) 33 | 34 | app.config.from_object("config.settings") 35 | app.config.from_pyfile("settings.py", silent=True) 36 | 37 | VimView.register(app, trailing_slash=False) 38 | WimView.register(app, trailing_slash=False) 39 | EmsView.register(app, trailing_slash=False) 40 | NFVOView.register(app, trailing_slash=False) 41 | SliceView.register(app, trailing_slash=False) 42 | FunctionView.register(app, trailing_slash=False) 43 | Base_slice_desView.register(app, trailing_slash=False) 44 | GstView.register(app, trailing_slash=False) 45 | ResourcesView.register(app, trailing_slash=False) 46 | PolicyView.register(app, trailing_slash=False) 47 | NslistView.register(app, trailing_slash=False) 48 | BootstrapView.register(app, trailing_slash=False) 49 | LocationView.register(app, trailing_slash=False) 50 | AlertView.register(app, trailing_slash=False) 51 | 52 | return app 53 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/emsUtils/amar_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "/slice" 37 | url = ems_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = emsd 40 | r = None 41 | try: 42 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, emsd): 55 | """ 56 | Delete a configured radio slice 57 | """ 58 | logger.info("Deleting Radio Slice Configuration") 59 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/emsUtils/open5gs_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "/slice/" 37 | slice_id = emsd["slice_id"] 38 | url = ems_url + api_prefix + slice_id 39 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 40 | data = emsd 41 | r = None 42 | try: 43 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 44 | logger.info(r.json()) 45 | r.raise_for_status() 46 | except requests.exceptions.HTTPError as errh: 47 | logger.exception("Http Error:", errh) 48 | except requests.exceptions.ConnectionError as errc: 49 | logger.exception("Error Connecting:", errc) 50 | except requests.exceptions.Timeout as errt: 51 | logger.exception("Timeout Error:", errt) 52 | except requests.exceptions.RequestException as err: 53 | logger.exception("Error:", err) 54 | 55 | def del_slice(self, emsd): 56 | """ 57 | Delete a configured radio slice 58 | """ 59 | logger.info("Deleting Radio Slice Configuration") 60 | ems_url = self.url 61 | api_prefix = "/slice/" 62 | for iemsd in emsd: 63 | slice_id = iemsd["slice_id"] 64 | url = ems_url + api_prefix + slice_id 65 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 66 | r = None 67 | try: 68 | r = requests.delete(url, timeout=360, headers=headers) 69 | logger.info(r.json()) 70 | r.raise_for_status() 71 | except requests.exceptions.HTTPError as errh: 72 | logger.exception("Http Error:", errh) 73 | except requests.exceptions.ConnectionError as errc: 74 | logger.exception("Error Connecting:", errc) 75 | except requests.exceptions.Timeout as errt: 76 | logger.exception("Timeout Error:", errt) 77 | except requests.exceptions.RequestException as err: 78 | logger.exception("Error:", err) 79 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/emsUtils/test_emsUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Ems: 21 | """ 22 | Class implementing the communication API with EMS 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def conf_radio(self, emsd): 32 | """ 33 | Configure radio components for the newly created slice 34 | """ 35 | ems_url = self.url 36 | api_prefix = "" 37 | url = ems_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = emsd 40 | r = None 41 | try: 42 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, emsd): 55 | """ 56 | Delete a configured radio slice 57 | """ 58 | logger.info("Deleting Radio Slice Configuration") 59 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/kafkaUtils/kafkaUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | import time 5 | 6 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, admin, errors 7 | 8 | # Logging Parameters 9 | logger = logging.getLogger(__name__) 10 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 11 | stream_handler = logging.StreamHandler() 12 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | file_handler.setFormatter(formatter) 15 | stream_handler.setFormatter(stream_formatter) 16 | logger.setLevel(logging.DEBUG) 17 | logger.addHandler(file_handler) 18 | logger.addHandler(stream_handler) 19 | 20 | # NOTE: It is required to have global parameters for kafka objects 21 | consumer, producer, topic = None, None, None 22 | 23 | 24 | def create_consumer(topic_name, bootstrap_servers=None): 25 | global consumer 26 | 27 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 28 | # Create the kafka consumer 29 | tries = 30 30 | exit = False 31 | while not exit: 32 | try: 33 | consumer = KafkaConsumer( 34 | topic_name, 35 | bootstrap_servers=bootstrap_servers, 36 | auto_offset_reset="earliest", 37 | enable_auto_commit=True, 38 | auto_commit_interval_ms=10000, 39 | group_id="katana-mngr-group", 40 | value_deserializer=lambda m: json.loads(m.decode("utf-8")), 41 | ) 42 | except errors.NoBrokersAvailable as KafkaError: 43 | if tries > 0: 44 | tries -= 1 45 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 46 | time.sleep(5) 47 | else: 48 | logger.error(KafkaError) 49 | else: 50 | logger.info("New consumer") 51 | exit = True 52 | tries = 30 53 | return consumer 54 | 55 | 56 | def create_producer(bootstrap_servers=None): 57 | global producer 58 | 59 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 60 | # Create the kafka producer 61 | tries = 30 62 | exit = False 63 | while not exit: 64 | try: 65 | producer = KafkaProducer(bootstrap_servers=bootstrap_servers, value_serializer=lambda m: json.dumps(m).encode("utf-8"),) 66 | except errors.NoBrokersAvailable as KafkaError: 67 | if tries > 0: 68 | tries -= 1 69 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 70 | time.sleep(5) 71 | else: 72 | logger.error(KafkaError) 73 | else: 74 | logger.info("New producer") 75 | exit = True 76 | tries = 30 77 | return producer 78 | 79 | 80 | def create_topic(topic_name, bootstrap_servers=None): 81 | global topic 82 | 83 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 84 | # Create the kafka topic 85 | tries = 30 86 | exit = False 87 | while not exit: 88 | try: 89 | try: 90 | topic = admin.NewTopic(name=topic_name, num_partitions=1, replication_factor=1) 91 | broker = KafkaAdminClient(bootstrap_servers=bootstrap_servers) 92 | broker.create_topics([topic]) 93 | except errors.TopicAlreadyExistsError: 94 | logger.warning("Topic exists already") 95 | else: 96 | logger.info("New topic") 97 | except errors.NoBrokersAvailable as KafkaError: 98 | if tries > 0: 99 | tries -= 1 100 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 101 | time.sleep(5) 102 | else: 103 | logger.error(KafkaError) 104 | else: 105 | exit = True 106 | tries = 30 107 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/mongoUtils/README.md: -------------------------------------------------------------------------------- 1 | # Mongo Collections 2 | 3 | ## Platform Components 4 | * nfvo 5 | * vim 6 | * wim 7 | * ems 8 | 9 | ## Platform Components Binary Objects 10 | * nfvo 11 | * vim 12 | * wim 13 | * ems 14 | 15 | ## Slice Related 16 | * func 17 | * slice 18 | 19 | ## Network Services Related 20 | * nsd 21 | * vnfd -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/mongoUtils/mongoUtils.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient, ASCENDING 2 | 3 | 4 | client = MongoClient("mongodb://mongo") 5 | db = client.katana 6 | 7 | # Initialize all collections and create indexes 8 | db.vim.create_index([("id", ASCENDING)], unique=True) 9 | db.nfvo.create_index([("id", ASCENDING)], unique=True) 10 | db.wim.create_index([("id", ASCENDING)], unique=True) 11 | db.ems.create_index([("id", ASCENDING)], unique=True) 12 | db.policy.create_index([("id", ASCENDING)], unique=True) 13 | db.nsd.create_index([("nsd-id", ASCENDING)], unique=True) 14 | db.vnfd.create_index([("vnfd-id", ASCENDING)], unique=True) 15 | db.func.create_index([("id", ASCENDING)], unique=True) 16 | db.location.create_index([("id", ASCENDING)], unique=True) 17 | 18 | 19 | def index(collection_name): 20 | collection = db[collection_name] 21 | return collection.find({}) 22 | 23 | 24 | def get(collection_name, uuid): 25 | collection = db[collection_name] 26 | return collection.find_one({"_id": uuid}) 27 | 28 | 29 | def add(collection_name, json_data): 30 | collection = db[collection_name] 31 | return collection.insert_one(json_data).inserted_id 32 | 33 | 34 | def add_many(collection_name, list_data): 35 | collection = db[collection_name] 36 | return collection.insert_many(list_data).inserted_ids 37 | 38 | 39 | def delete(collection_name, uuid): 40 | result = db[collection_name].delete_one({"_id": uuid}).deleted_count 41 | return result 42 | 43 | 44 | def update(collection_name, uuid, json_data): 45 | collection = db[collection_name] 46 | return collection.replace_one({"_id": uuid}, json_data).modified_count 47 | 48 | 49 | def count(collection_name): 50 | collection = db[collection_name] 51 | return collection.count_documents({}) 52 | 53 | 54 | def find(collection_name, data={}): 55 | collection = db[collection_name] 56 | return collection.find_one(data) 57 | 58 | 59 | def find_all(collection_name, data={}): 60 | collection = db[collection_name] 61 | return collection.find(data) 62 | 63 | 64 | def delete_all(collection_name, data={}): 65 | collection = db[collection_name] 66 | return collection.delete_many(data) 67 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/policyUtils/neatUtils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | from logging import handlers 5 | 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Policy: 21 | """ 22 | Class implementing the communication API with Policy System 23 | """ 24 | 25 | def __init__(self, url, id): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def send_nest(self, data): 32 | """ 33 | Send the nest parameters 34 | """ 35 | api_prefix = "" 36 | url = self.url + api_prefix 37 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 38 | r = None 39 | try: 40 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 41 | logger.info(r.json()) 42 | r.raise_for_status() 43 | except requests.exceptions.HTTPError as errh: 44 | logger.exception("Http Error:", errh) 45 | except requests.exceptions.ConnectionError as errc: 46 | logger.exception("Error Connecting:", errc) 47 | except requests.exceptions.Timeout as errt: 48 | logger.exception("Timeout Error:", errt) 49 | except requests.exceptions.RequestException as err: 50 | logger.exception("Error:", err) 51 | 52 | def notify(self, alert_type, slice_id, status): 53 | """ 54 | Notify NEAT policy engine 55 | """ 56 | neat_message = { 57 | "slice_id": slice_id, 58 | "type": alert_type, 59 | "value": status, 60 | "ttl": -1, 61 | } 62 | neat_url = self.url + "/event" 63 | neat_headers = {"Content-type": "Application/JSON"} 64 | r = requests.put(neat_url, json=json.loads(json.dumps(neat_message)), headers=neat_headers) 65 | logger.info(f"Notifying NEAT, message: {neat_message}") 66 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/policyUtils/test_policyUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Policy: 21 | """ 22 | Class implementing the communication API with Policy System 23 | """ 24 | 25 | def __init__(self, url, id): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def send_nest(self, data): 32 | """ 33 | Send the nest parameters 34 | """ 35 | api_prefix = "" 36 | url = self.url + api_prefix 37 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 38 | r = None 39 | try: 40 | r = requests.post(url, json=json.loads(json.dumps(data)), timeout=360, headers=headers) 41 | logger.info(r.json()) 42 | r.raise_for_status() 43 | except requests.exceptions.HTTPError as errh: 44 | logger.exception("Http Error:", errh) 45 | except requests.exceptions.ConnectionError as errc: 46 | logger.exception("Error Connecting:", errc) 47 | except requests.exceptions.Timeout as errt: 48 | logger.exception("Timeout Error:", errt) 49 | except requests.exceptions.RequestException as err: 50 | logger.exception("Error:", err) 51 | 52 | def del_slice(self, data): 53 | """ 54 | Delete a configured radio slice 55 | """ 56 | logger.info("Deleting Radio Slice Configuration") 57 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/sliceUtils/sliceUtils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import pickle 3 | import os 4 | from logging import handlers 5 | 6 | from katana.shared_utils.mongoUtils import mongoUtils 7 | from katana.shared_utils.kafkaUtils.kafkaUtils import create_producer 8 | 9 | # Logging Parameters 10 | logger = logging.getLogger(__name__) 11 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 12 | stream_handler = logging.StreamHandler() 13 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | file_handler.setFormatter(formatter) 16 | stream_handler.setFormatter(stream_formatter) 17 | logger.setLevel(logging.DEBUG) 18 | logger.addHandler(file_handler) 19 | logger.addHandler(stream_handler) 20 | 21 | 22 | def check_runtime_errors(nest): 23 | """ 24 | Function that checks about slice runtime errors and updates the slice status 25 | """ 26 | 27 | slice_id = nest["_id"] 28 | if nest["runtime_errors"]: 29 | nest_status = "runtime_error" 30 | else: 31 | nest_status = "Running" 32 | # Notify NEAT 33 | isapex = os.getenv("APEX", None) 34 | if isapex: 35 | neat_list = mongoUtils.find_all("policy", {"type": "neat"}) 36 | for ineat in neat_list: 37 | # Get the NEAT object 38 | neat_obj = pickle.loads(mongoUtils.get("policy_obj", ineat["_id"])["obj"]) 39 | neat_obj.notify(alert_type="FailingNS", slice_id=slice_id, status=False) 40 | nest["status"] = nest_status 41 | mongoUtils.update("slice", slice_id, nest) 42 | # Update monitoring status 43 | if nest["slice_monitoring"]: 44 | mon_producer = create_producer() 45 | mon_producer.send( 46 | "nfv_mon", 47 | value={ 48 | "action": "katana_mon", 49 | "slice_info": {"slice_id": nest["_id"], "status": nest_status}, 50 | }, 51 | ) 52 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/vimUtils/kubernetesUtils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import logging 3 | from logging import handlers 4 | from multiprocessing import Process 5 | 6 | import kubernetes 7 | 8 | # Logging Parameters 9 | logger = logging.getLogger(__name__) 10 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 11 | stream_handler = logging.StreamHandler() 12 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | file_handler.setFormatter(formatter) 15 | stream_handler.setFormatter(stream_formatter) 16 | logger.setLevel(logging.DEBUG) 17 | logger.addHandler(file_handler) 18 | logger.addHandler(stream_handler) 19 | 20 | 21 | def timeout(func): 22 | """ 23 | Wrapper for function, terminate after 5 seconds 24 | """ 25 | 26 | @functools.wraps(func) 27 | def wrapper(*args, **kwargs): 28 | action = Process(target=func, args=args, kwargs=kwargs) 29 | action.start() 30 | action.join(timeout=5) 31 | if action.is_alive(): 32 | # terminate function 33 | action.terminate() 34 | # clean up 35 | action.join() 36 | raise (TimeoutError) 37 | # if process is not 0, is not succesfull 38 | if action.exitcode != 0: 39 | # raise Attirbute Error, which is the most probable 40 | raise (AttributeError) 41 | 42 | return wrapper 43 | 44 | class Kubernetes: 45 | """ 46 | Class implementing the communication API with Kubernetes 47 | """ -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/wimUtils/odl_wimUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | 5 | import requests 6 | 7 | from katana.shared_utils.kafkaUtils import kafkaUtils 8 | 9 | # Logging Parameters 10 | logger = logging.getLogger(__name__) 11 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 12 | stream_handler = logging.StreamHandler() 13 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 15 | file_handler.setFormatter(formatter) 16 | stream_handler.setFormatter(stream_formatter) 17 | logger.setLevel(logging.DEBUG) 18 | logger.addHandler(file_handler) 19 | logger.addHandler(stream_handler) 20 | 21 | 22 | class Wim: 23 | """ 24 | Class implementing the communication API with WIM 25 | """ 26 | 27 | def __init__(self, url): 28 | """ 29 | Initialize an object of the class 30 | """ 31 | self.url = url 32 | 33 | def create_slice(self, wsd): 34 | """ 35 | Create the transport network slice 36 | """ 37 | wim_message = {"action": "create", "data": wsd} 38 | 39 | # Create the kafka producer 40 | bootstrap_servers = [f"{self.url}:9092"] 41 | producer = kafkaUtils.create_producer(bootstrap_servers=bootstrap_servers) 42 | producer.send("wan-slice", value=wim_message) 43 | logger.info("Sent WAN Slice Creation request to WIM") 44 | 45 | def del_slice(self, slice_id): 46 | """ 47 | Delete the transport network slice 48 | """ 49 | wim_message = {"action": "terminate", "data": slice_id} 50 | bootstrap_servers = [f"{self.url}:9092"] 51 | producer = kafkaUtils.create_producer(bootstrap_servers=bootstrap_servers) 52 | producer.send("wan-slice", value=wim_message) 53 | logger.info("Sent WAN Slice Termination request to WIM") 54 | -------------------------------------------------------------------------------- /katana-nbi/katana/shared_utils/wimUtils/test_wimUtils.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import json 3 | import logging 4 | 5 | from logging import handlers 6 | 7 | # Logging Parameters 8 | logger = logging.getLogger(__name__) 9 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 10 | stream_handler = logging.StreamHandler() 11 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | file_handler.setFormatter(formatter) 14 | stream_handler.setFormatter(stream_formatter) 15 | logger.setLevel(logging.DEBUG) 16 | logger.addHandler(file_handler) 17 | logger.addHandler(stream_handler) 18 | 19 | 20 | class Wim: 21 | """ 22 | Class implementing the communication API with WIM 23 | """ 24 | 25 | def __init__(self, url): 26 | """ 27 | Initialize an object of the class 28 | """ 29 | self.url = url 30 | 31 | def create_slice(self, wsd): 32 | """ 33 | Create the transport network slice 34 | """ 35 | wim_url = self.url 36 | api_prefix = "" 37 | url = wim_url + api_prefix 38 | headers = {"Content-Type": "application/json", "Accept": "application/json"} 39 | data = wsd 40 | r = None 41 | try: 42 | r = requests.post(url, headers=headers, json=json.loads(json.dumps(data)), timeout=10) 43 | logger.info(r.json()) 44 | r.raise_for_status() 45 | except requests.exceptions.HTTPError as errh: 46 | logger.exception("Http Error:", errh) 47 | except requests.exceptions.ConnectionError as errc: 48 | logger.exception("Error Connecting:", errc) 49 | except requests.exceptions.Timeout as errt: 50 | logger.exception("Timeout Error:", errt) 51 | except requests.exceptions.RequestException as err: 52 | logger.exception("Error:", err) 53 | 54 | def del_slice(self, wsd): 55 | """ 56 | Delete the transport network slice 57 | """ 58 | logger.info("Deleting Transport Network Slice") 59 | -------------------------------------------------------------------------------- /katana-nbi/katana/utils/README.md: -------------------------------------------------------------------------------- 1 | > In this directory will Utility files used by katana-nbi -------------------------------------------------------------------------------- /katana-nbi/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==2.1.0 2 | 3 | # API. 4 | flask-classful==0.14 5 | itsdangerous==2.0.1 6 | PyYAML==5.4 7 | 8 | # Application server for both development and production. 9 | gunicorn==19.9.0 10 | 11 | openstacksdk==0.23.0 12 | 13 | #PyONE - OpenNebula 14 | pyone==5.8.2 15 | 16 | # PyMongo 17 | pymongo==3.7.2 18 | 19 | # Tnglib 20 | # git+https://github.com/sonata-nfv/tng-cli 21 | 22 | # Flask CORS 23 | flask-cors 24 | 25 | # Kafka 26 | kafka-python==1.4.7 27 | 28 | # WSGI web application library 29 | werkzeug==2.1.2 30 | -------------------------------------------------------------------------------- /katana-nfv_mon/.env: -------------------------------------------------------------------------------- 1 | PYTHONWARNINGS=ignore:Unverified HTTPS request 2 | -------------------------------------------------------------------------------- /katana-nfv_mon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7.4-slim 2 | 3 | RUN mkdir -p /nfv_mon 4 | 5 | WORKDIR /nfv_mon 6 | COPY katana-nfv_mon/. . 7 | 8 | RUN pip install --upgrade pip 9 | RUN pip install -r requirements.txt 10 | 11 | ENV PYTHONPATH=. 12 | 13 | CMD [ "python", katana/exporter.py ] -------------------------------------------------------------------------------- /katana-nfv_mon/README.md: -------------------------------------------------------------------------------- 1 | # Katana NFV Monitoring 2 | 3 | Service responsible for monitoring the NFV domain that are part of the slice 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-nfv_mon/katana/utils/kafkaUtils/kafkaUtils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from logging import handlers 4 | import time 5 | 6 | from kafka import KafkaAdminClient, KafkaConsumer, KafkaProducer, admin, errors 7 | 8 | # Logging Parameters 9 | logger = logging.getLogger(__name__) 10 | file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5) 11 | stream_handler = logging.StreamHandler() 12 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 13 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 14 | file_handler.setFormatter(formatter) 15 | stream_handler.setFormatter(stream_formatter) 16 | logger.setLevel(logging.DEBUG) 17 | logger.addHandler(file_handler) 18 | logger.addHandler(stream_handler) 19 | 20 | # NOTE: It is required to have global parameters for kafka objects 21 | consumer, producer, topic = None, None, None 22 | 23 | 24 | def create_consumer(topic_name, bootstrap_servers=None): 25 | global consumer 26 | 27 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 28 | # Create the kafka consumer 29 | tries = 30 30 | exit = False 31 | while not exit: 32 | try: 33 | consumer = KafkaConsumer( 34 | topic_name, 35 | bootstrap_servers=bootstrap_servers, 36 | auto_offset_reset="earliest", 37 | enable_auto_commit=True, 38 | auto_commit_interval_ms=10000, 39 | group_id="katana-mngr-group", 40 | value_deserializer=lambda m: json.loads(m.decode("utf-8")), 41 | ) 42 | except errors.NoBrokersAvailable as KafkaError: 43 | if tries > 0: 44 | tries -= 1 45 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 46 | time.sleep(5) 47 | else: 48 | logger.error(KafkaError) 49 | else: 50 | logger.info("New consumer") 51 | exit = True 52 | tries = 30 53 | return consumer 54 | 55 | 56 | def create_producer(bootstrap_servers=None): 57 | global producer 58 | 59 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 60 | # Create the kafka producer 61 | tries = 30 62 | exit = False 63 | while not exit: 64 | try: 65 | producer = KafkaProducer(bootstrap_servers=bootstrap_servers, value_serializer=lambda m: json.dumps(m).encode("utf-8"),) 66 | except errors.NoBrokersAvailable as KafkaError: 67 | if tries > 0: 68 | tries -= 1 69 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 70 | time.sleep(5) 71 | else: 72 | logger.error(KafkaError) 73 | else: 74 | logger.info("New producer") 75 | exit = True 76 | tries = 30 77 | return producer 78 | 79 | 80 | def create_topic(topic_name, bootstrap_servers=None): 81 | global topic 82 | 83 | bootstrap_servers = bootstrap_servers or ["kafka:19092"] 84 | # Create the kafka topic 85 | tries = 30 86 | exit = False 87 | while not exit: 88 | try: 89 | try: 90 | topic = admin.NewTopic(name=topic_name, num_partitions=1, replication_factor=1) 91 | broker = KafkaAdminClient(bootstrap_servers=bootstrap_servers) 92 | broker.create_topics([topic]) 93 | except errors.TopicAlreadyExistsError: 94 | logger.warning("Topic exists already") 95 | else: 96 | logger.info("New topic") 97 | except errors.NoBrokersAvailable as KafkaError: 98 | if tries > 0: 99 | tries -= 1 100 | logger.warning(f"Kafka not ready yet. Tries remaining: {tries}") 101 | time.sleep(5) 102 | else: 103 | logger.error(KafkaError) 104 | else: 105 | exit = True 106 | tries = 30 107 | -------------------------------------------------------------------------------- /katana-nfv_mon/katana/utils/mongoUtils/README.md: -------------------------------------------------------------------------------- 1 | # Mongo Collections 2 | 3 | ## Platform Components 4 | * nfvo 5 | * vim 6 | * wim 7 | * ems 8 | 9 | ## Platform Components Binary Objects 10 | * nfvo 11 | * vim 12 | * wim 13 | * ems 14 | 15 | ## Slice Related 16 | * func 17 | * slice 18 | 19 | ## Network Services Related 20 | * nsd 21 | * vnfd -------------------------------------------------------------------------------- /katana-nfv_mon/katana/utils/mongoUtils/mongoUtils.py: -------------------------------------------------------------------------------- 1 | from pymongo import MongoClient, ASCENDING 2 | 3 | 4 | client = MongoClient("mongodb://mongo") 5 | db = client.katana 6 | 7 | # Initialize all collections and create indexes 8 | db.vim.create_index([('id', ASCENDING)], unique=True) 9 | db.nfvo.create_index([('id', ASCENDING)], unique=True) 10 | db.wim.create_index([('id', ASCENDING)], unique=True) 11 | db.ems.create_index([('id', ASCENDING)], unique=True) 12 | db.policy.create_index([('id', ASCENDING)], unique=True) 13 | db.nsd.create_index([('nsd-id', ASCENDING)], unique=True) 14 | db.vnfd.create_index([('vnfd-id', ASCENDING)], unique=True) 15 | db.func.create_index([('id', ASCENDING)], unique=True) 16 | 17 | 18 | def index(collection_name): 19 | collection = db[collection_name] 20 | return collection.find({}) 21 | 22 | 23 | def get(collection_name, uuid): 24 | collection = db[collection_name] 25 | return collection.find_one({"_id": uuid}) 26 | 27 | 28 | def add(collection_name, json_data): 29 | collection = db[collection_name] 30 | return collection.insert_one(json_data).inserted_id 31 | 32 | 33 | def add_many(collection_name, list_data): 34 | collection = db[collection_name] 35 | return collection.insert_many(list_data).inserted_ids 36 | 37 | 38 | def delete(collection_name, uuid): 39 | result = db[collection_name].delete_one({"_id": uuid}).deleted_count 40 | return result 41 | 42 | 43 | def update(collection_name, uuid, json_data): 44 | collection = db[collection_name] 45 | return collection.replace_one({"_id": uuid}, json_data).modified_count 46 | 47 | 48 | def count(collection_name): 49 | collection = db[collection_name] 50 | return collection.count_documents({}) 51 | 52 | 53 | def find(collection_name, data={}): 54 | collection = db[collection_name] 55 | return collection.find_one(data) 56 | 57 | 58 | def find_all(collection_name, data={}): 59 | collection = db[collection_name] 60 | return collection.find(data) 61 | 62 | 63 | def delete_all(collection_name, data={}): 64 | collection = db[collection_name] 65 | return collection.delete_many(data) 66 | -------------------------------------------------------------------------------- /katana-nfv_mon/katana/utils/threadingUtis/threadingUtils.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import logging 3 | 4 | from katana.utils.mongoUtils import mongoUtils 5 | from katana.utils.nfvoUtils import osmUtils 6 | 7 | # Create the logger 8 | logger = logging.getLogger(__name__) 9 | stream_handler = logging.StreamHandler() 10 | formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 11 | stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s") 12 | stream_handler.setFormatter(stream_formatter) 13 | logger.setLevel(logging.DEBUG) 14 | logger.addHandler(stream_handler) 15 | 16 | 17 | class MonThread(threading.Thread): 18 | """ 19 | Class that implements a per Network Service thread for monitoring purposes 20 | """ 21 | 22 | def __init__(self, ns, ns_status, ns_name, slice_id): 23 | super().__init__() 24 | self.ns = ns 25 | self.ns_status = ns_status 26 | self.ns_name = ns_name 27 | self.slice_id = slice_id 28 | # Create the stop parameter 29 | self._stop = threading.Event() 30 | 31 | def run(self): 32 | """ 33 | The function that will run to check the NS status 34 | """ 35 | while not self.stopped(): 36 | target_nfvo = mongoUtils.find("nfvo", {"id": self.ns["nfvo-id"]}) 37 | if target_nfvo["type"] == "OSM": 38 | target_nfvo_obj = osmUtils.Osm( 39 | target_nfvo["id"], 40 | target_nfvo["nfvoip"], 41 | target_nfvo["nfvousername"], 42 | target_nfvo["nfvopassword"], 43 | ) 44 | else: 45 | logger.error("Not supported NFVO type") 46 | return 47 | insr = target_nfvo_obj.getNsr(self.ns["nfvo_inst_ns"]) 48 | if not insr: 49 | self.ns_status.labels(self.slice_id, self.ns_name).set(2) 50 | elif insr["operational-status"] == "terminating": 51 | self.ns_status.labels(self.slice_id, self.ns_name).set(4) 52 | elif insr["operational-status"] != "running": 53 | self.ns_status.labels(self.slice_id, self.ns_name).set(3) 54 | self._stop.wait(timeout=10) 55 | 56 | def ns_stop(self): 57 | """ 58 | Sets the status of the metric to 5 (admin stopped) 59 | """ 60 | self.ns_status.labels(self.slice_id, self.ns_name).set(5) 61 | 62 | def stopped(self): 63 | """ 64 | Checks if the thread has stopped 65 | """ 66 | return self._stop.is_set() 67 | 68 | def stop(self): 69 | """ 70 | Stops the thread 71 | """ 72 | self._stop.set() 73 | -------------------------------------------------------------------------------- /katana-nfv_mon/requirements.txt: -------------------------------------------------------------------------------- 1 | # Prometheus Client 2 | prometheus_client 3 | 4 | # Python modules 5 | requests 6 | 7 | # Kafka 8 | kafka-python==1.4.7 9 | 10 | # PyMongo 11 | pymongo==3.7.2 -------------------------------------------------------------------------------- /katana-prometheus/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM prom/prometheus:v2.22.1 2 | 3 | RUN mkdir -p /etc/prometheus/rules 4 | 5 | COPY katana-prometheus/prometheus.yml /etc/prometheus/prometheus.yml 6 | COPY katana-prometheus/alerts.yml /etc/prometheus/rules/alerts.yml -------------------------------------------------------------------------------- /katana-prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Katana Prometheus 2 | 3 | Service that hosts the Prometheus server. Container name: katana-prometheus 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-prometheus/alerts.yml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: BuiltInAlets 3 | rules: 4 | - alert: NSFailing 5 | expr: (ns_status == 2 OR ns_status == 3) AND ON(slice_id) katana_status != 12 -------------------------------------------------------------------------------- /katana-prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | evaluation_interval: 10s 3 | scrape_interval: 10s 4 | 5 | # Alertmanager configuration 6 | alerting: 7 | alertmanagers: 8 | - static_configs: 9 | - targets: 10 | - katana-alertmanager:9093 11 | 12 | # Load alerts 13 | rule_files: 14 | - /etc/prometheus/rules/alerts.yml 15 | 16 | # Define the targets 17 | scrape_configs: 18 | - job_name: SB_Components 19 | scheme: http 20 | static_configs: 21 | - targets: [] 22 | file_sd_configs: 23 | - files: 24 | - /etc/prometheus/targets/wim_targets.json 25 | - /etc/prometheus/targets/vim_targets.json 26 | refresh_interval: 30s 27 | - job_name: katana 28 | scheme: http 29 | static_configs: 30 | - targets: 31 | - localhost:9090 32 | - katana-nfv_mon:8002 33 | -------------------------------------------------------------------------------- /katana-prometheus/vim_targets.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "targets": [], 4 | "labels": {} 5 | }, 6 | { 7 | "targets": [], 8 | "labels": {} 9 | } 10 | ] -------------------------------------------------------------------------------- /katana-prometheus/wim_targets.json: -------------------------------------------------------------------------------- 1 | [{"targets": [], "labels": {}}] 2 | -------------------------------------------------------------------------------- /katana-swagger/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM swaggerapi/swagger-ui:v3.36.1 2 | 3 | COPY katana-swagger/swagger.json /my_swagger/swagger.json 4 | COPY katana-swagger/fixIP.sh /my_swagger/fixIP.sh 5 | COPY katana-swagger/fixVersion.sh /my_swagger/fixVersion.sh -------------------------------------------------------------------------------- /katana-swagger/README.md: -------------------------------------------------------------------------------- 1 | # Katana Swagger 2 | 3 | Service that hosts the swagger service. Container name: katana-swagger 4 | 5 | > Visit the Wiki Page for documentation 6 | -------------------------------------------------------------------------------- /katana-swagger/fixIP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Add the Katana Host IP Address on swagger.json 4 | sed -i "s?katana.host?${KATANA_HOST}?" "/my_swagger/swagger.json" 5 | -------------------------------------------------------------------------------- /katana-swagger/fixVersion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Add the Katana Version on swagger.json 4 | sed -i "s?katana.version?${KATANA_VERSION}?" "/my_swagger/swagger.json" 5 | -------------------------------------------------------------------------------- /templates/README.md: -------------------------------------------------------------------------------- 1 | # Katana Slice Information Model - Generic Slice Template (GST) 2 | 3 | ## Roles in network slicing 4 | Multiple roles related to network slicing are specified in 3GPP TS 28.530. In this document the following roles are used: 5 | • Communication Service Customer (CSC): Uses communication services, e.g. end user, tenant, vertical. 6 | • Communication Service Provider (CSP): Provides communication services. Designs, builds and operates its communication services. The CSP provided communication service can be built with or without network slice. 7 | • Network Operator (NOP): Provides network services. Designs, builds and operates its networks to offer such services. 8 | • Network Slice Customer (NSC): The Communication Service Provider (CSP) or Communication Service Customer (CSC) who uses Network Slice as a Service. 9 | • Network Slice Provider (NSP): The Communication Service Provider (CSP) or Network Operator (NOP) who provides Network Slice as a Service. 10 | 11 | ## GST and NEST 12 | The Generic Slice Template (GST) is a set of attributes that can characterise a type of network slice/service. GST is generic and is not tied to any specific network deployment. The NEtwork Slice Type (NEST) is a GST filled with values. The attributes and their values are assigned to fulfil a given set of requirements derived from a network slice customer use case. The NEST is an input to the network slice (instance) preparation performed by the Network Slice Manager. One or more NSIs (Network Slice Instance as defined in 3GPP TS 23.501) can be created out of the same NEST, but also existing NSI(s) may be reused 13 | 14 | A NEST is sent to Katana Slice Manager with a slice creation request. It is then parsed by the Slice Mapping process, which, in combination with the supported network functions by the underlying infrastructure, creates the slice. 15 | 16 | ## How to use 17 | ### Structure 18 | Katana Slice GST is used for the creation of new slices. It has three sections: 19 | 20 | * Slice Descriptor (slice_descriptor) 21 | * Vertical Services Descriptor (service_descriptor) 22 | * Test Descriptor (test_descriptor) 23 | 24 | > Slice Descriptor is mandatory object while the other two are optional. 25 | These descriptors define parameters that will be used for the creation of the new slice. 26 | 27 | ### On-board descriptors 28 | Descriptors for each sections can be on-boarded to Katana Slice Manager before the slice creation phase. Katana returns lists with all the onboarded descriptors. The on-boarded descriptors can be referenced in a GST during the creation of a slice, instead of defining a new descriptor on the GST. 29 | 30 | ### Overwrite parameter values of a referenced descriptor 31 | You can reference a previously on-boarded descriptor on a GST, and also define some parameter values for that sector. This will use the referenced descriptor as a base, replacing the parameters with the ones defined on the GST. 32 | 33 | 34 | ## Sources 35 | The GST is based on the [GSMA GST v1.0](https://www.gsma.com/newsroom/wp-content/uploads//NG.116-v1.0.pdf) 36 | and [GSMA GST v2.0](https://www.gsma.com/newsroom/wp-content/uploads//NG.116-v2.0.pdf) 37 | 38 | Katana Slice Information Model follows the JSON Schema model, on which OpenAPIs are based: 39 | 40 | * [Source](http://json-schema.org/) 41 | * [Understanding JSON Schema](http://json-schema.org/understanding-json-schema/UnderstandingJSONSchema.pdf) 42 | 43 | ## Useful Tools 44 | * [JSON Validator](https://jsonlint.com/) 45 | * [YAML Validator](http://www.yamllint.com/) 46 | * [JSON Schema Validator](https://json-schema-validator.herokuapp.com/) 47 | * [JSON Schema Generator](https://jsonschema.net/) 48 | 49 | ## Values from GSMA GST 50 | ### Included 51 | * sliceid 52 | * delay_tolerance 53 | * deterministic_communication 54 | - availability 55 | - periodicity 56 | * network_DL_throughput 57 | - guaranteed 58 | - maximum 59 | * ue_DL_throughput 60 | - guaranteed 61 | - maximum 62 | * group_communication_support 63 | * isolation 64 | - Core isolation 65 | - RAN isolation 66 | * mtu 67 | * mission_critical_support 68 | - availability 69 | - mc_service 70 | * mmtel_support 71 | * Network Slice Customer network functions --> It is covered by the service_descriptor section 72 | * nb_iot 73 | * Perofrmance Monitoring --> It is covered by the test_descriptor section 74 | * Performance Prediction --> It is covered by the test_descriptor section 75 | * positional_support 76 | - availability 77 | - frequency 78 | - accuracy 79 | * radio_spectrum 80 | * simultaneous_nsi 81 | * qos 82 | - qi 83 | - resource_type 84 | - priority_level 85 | - packet_delay_budget 86 | - packet_error_rate 87 | - jitter 88 | - max_packet_loss_rate 89 | * nonIP_traffic 90 | * device_velocity 91 | * terminal_density 92 | 93 | ### Not included 94 | * Energy efficiency 95 | * Location based message delivery 96 | * Reliability --> Not defined yet 97 | * Availability --> Not defined yet 98 | * Root cause investigation 99 | * Session and Service Continuity support 100 | * Synchronicity --> Not defined yet -------------------------------------------------------------------------------- /templates/components/ems_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | 4 | "type": "object", 5 | "description": "A new EMS", 6 | "properties": { 7 | "id":{ 8 | "type": "string", 9 | "description": "Unique id" 10 | }, 11 | "name": { 12 | "type": "string", 13 | "description": "The name for the new EMS" 14 | }, 15 | "description": { 16 | "type": "string", 17 | "description": "A description for the EMS" 18 | }, 19 | "url": { 20 | "type": "string", 21 | "description": "EMS' authentication URL - example: http://10.200.64.2:5000/" 22 | }, 23 | "type": { 24 | "type": "string", 25 | "description": "EMS' type" 26 | } 27 | }, 28 | "required": ["id", "url", "type"] 29 | } -------------------------------------------------------------------------------- /templates/components/nfvo_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | 4 | "type": "object", 5 | "description": "A new NFVO", 6 | "properties": { 7 | "id":{ 8 | "type": "string", 9 | "description": "Unique id" 10 | }, 11 | "name": { 12 | "type": "string", 13 | "description": "The name for the new NFVO" 14 | }, 15 | "nfvoip": { 16 | "type": "string", 17 | "description": "NFVO's authentication URL - example: http://10.200.64.2:5000/v3/" 18 | }, 19 | "nfvousername": { 20 | "type": "string", 21 | "description": "The admin username" 22 | }, 23 | "nfvopassword": { 24 | "type": "string", 25 | "description": "The admin password" 26 | }, 27 | "tenantname": { 28 | "type": "string", 29 | "description": "NFVO's Tenant name" 30 | }, 31 | "type": { 32 | "type": "string", 33 | "description": "NFVO's type" 34 | }, 35 | "version": { 36 | "type": "string", 37 | "description": "The version of the NFVO's OS" 38 | }, 39 | "description": { 40 | "type": "string", 41 | "description": "A description for the NFVO" 42 | }, 43 | "config": { 44 | "type": "object", 45 | "description": "Optional parameters regarding the NFVO operation - example: network: flat" 46 | } 47 | }, 48 | "required": ["id", "nfvousername", "nfvopassword", "nfvoip", "tenantname"] 49 | } -------------------------------------------------------------------------------- /templates/components/pdu_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | 4 | "type": "object", 5 | "description": "A new PDU", 6 | "properties": { 7 | "id":{ 8 | "type": "string", 9 | "description": "Unique id" 10 | }, 11 | "name": { 12 | "type": "string", 13 | "description": "The name for the new PDU" 14 | }, 15 | "ip": { 16 | "type": "string", 17 | "description": "The mgmt IP of the PDU" 18 | }, 19 | "description": { 20 | "type": "string", 21 | "description": "A description for the PDU" 22 | }, 23 | "location": { 24 | "type": "string", 25 | "description": "The location for new PDU" 26 | } 27 | }, 28 | "required": ["id"] 29 | } -------------------------------------------------------------------------------- /templates/components/policy_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | 4 | "type": "object", 5 | "description": "A new Policy Management System", 6 | "properties": { 7 | "id":{ 8 | "type": "string", 9 | "description": "Unique id" 10 | }, 11 | "url": { 12 | "type": "string", 13 | "description": "The URL of the Policy Management System" 14 | }, 15 | "description": { 16 | "type": "string", 17 | "description": "A description for the Policy Management System" 18 | }, 19 | "type": { 20 | "type": "string", 21 | "description": "The type for new Policy Management System" 22 | } 23 | }, 24 | "required": ["id", "url", "type"] 25 | } -------------------------------------------------------------------------------- /templates/components/vim_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "description": "A new VIM", 5 | "properties": { 6 | "id": { 7 | "type": "string", 8 | "description": "Unique id" 9 | }, 10 | "name": { 11 | "type": "string", 12 | "description": "The name for the new VIM" 13 | }, 14 | "auth_url": { 15 | "type": "string", 16 | "description": "VIM's authentication URL - example: http://10.200.64.2:5000/v3/" 17 | }, 18 | "username": { 19 | "type": "string", 20 | "description": "The admin username" 21 | }, 22 | "password": { 23 | "type": "string", 24 | "description": "The admin password" 25 | }, 26 | "admin_project_name": { 27 | "type": "string", 28 | "description": "The admin project" 29 | }, 30 | "location": { 31 | "type": "string", 32 | "description": "VIM's location" 33 | }, 34 | "type": { 35 | "type": "string", 36 | "description": "VIM's type" 37 | }, 38 | "version": { 39 | "type": "string", 40 | "description": "The version of the VIM's OS" 41 | }, 42 | "description": { 43 | "type": "string", 44 | "description": "A description for the VIM" 45 | }, 46 | "infrastructure_monitoring": { 47 | "type": "string", 48 | "description": "Optional - The URL of the Prometheus system that is responsible for monitoring the VIM" 49 | }, 50 | "config": { 51 | "type": "object", 52 | "description": "Optional parameters regarding the VIM operation - example: Security group" 53 | } 54 | }, 55 | "required": [ 56 | "id", 57 | "auth_url", 58 | "username", 59 | "password", 60 | "admin_project_name" 61 | ] 62 | } -------------------------------------------------------------------------------- /templates/components/wim_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "description": "A new WIM", 5 | "properties": { 6 | "id": { 7 | "type": "string", 8 | "description": "Unique id" 9 | }, 10 | "name": { 11 | "type": "string", 12 | "description": "The name for the new WIM" 13 | }, 14 | "description": { 15 | "type": "string", 16 | "description": "A description for the WIM" 17 | }, 18 | "url": { 19 | "type": "string", 20 | "description": "WIM's authentication URL - example: http://10.200.64.2:5000/" 21 | }, 22 | "type": { 23 | "type": "string", 24 | "description": "WIM's type" 25 | }, 26 | "monitoring-url": { 27 | "type": "string", 28 | "description": "If set, katana-prometheus will scrape the target URL" 29 | } 30 | }, 31 | "required": [ 32 | "id", 33 | "url", 34 | "type" 35 | ] 36 | } -------------------------------------------------------------------------------- /templates/example_config_files/Functions/example_demo5gcore.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "group0_demo5GCore", 3 | "name": "group0_demo5GCore", 4 | "gen": 5, 5 | "func": 0, 6 | "shared": { 7 | "availability": false 8 | }, 9 | "type": 0, 10 | "location": "Core", 11 | "pnf_list": [], 12 | "ns_list": [ 13 | { 14 | "nsd-id": "f08b2bbe-45ef-4a62-83cd-33ca1ad43c46", 15 | "ns-name": "group0_demo5GCore", 16 | "placement": 0, 17 | "optional": false 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /templates/example_config_files/Functions/example_demo5ggnb.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "group0_demo5GGnb", 3 | "name": "group0_demo5GGnb", 4 | "gen": 5, 5 | "func": 1, 6 | "shared": { 7 | "availability": false 8 | }, 9 | "type": 0, 10 | "location": "group0_edge", 11 | "pnf_list": [], 12 | "ns_list": [ 13 | { 14 | "nsd-id": "896757b6-1966-4657-9ec4-1e001e726d61", 15 | "ns-name": "group0_demo5GGnb", 16 | "placement": 0, 17 | "optional": false 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_ems.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "test-ems", 3 | "name": "test-ems", 4 | "description": "Test EMS", 5 | "url": "URL", 6 | "type": "test-ems" 7 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_osm8.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "osm8", 3 | "name": "osm8", 4 | "nfvoip": "IP_ADDRESS", 5 | "nfvousername": "*****", 6 | "nfvopassword": "*****", 7 | "tenantname": "*****", 8 | "type": "OSM", 9 | "version": "8", 10 | "description": "OSM Release 8", 11 | "config": {} 12 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "test_policy1", 3 | "description": "Test Policy Management System for debugging", 4 | "url": "URL", 5 | "type": "test-policy" 6 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_vim_core.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "vim_core", 3 | "name": "vim_Core", 4 | "auth_url": "http://10.161.0.11:5000/v3/", 5 | "username": "*****", 6 | "password": "*****", 7 | "admin_project_name": "*****", 8 | "location": "Core", 9 | "type": "openstack", 10 | "version": "Wallaby", 11 | "description": "Group 0 VIM", 12 | "infrastructure_monitoring": "*****", 13 | "config": { 14 | "security_groups": "TBD" 15 | } 16 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_vim_edge_cosmote.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "cosmote_vim", 3 | "name": "cosmote_vim", 4 | "auth_url": "http://IP_ADDRESS:5000/v3/", 5 | "username": "******", 6 | "password": "******", 7 | "admin_project_name": "******", 8 | "location": "Cosmote", 9 | "type": "openstack", 10 | "version": "Queens", 11 | "description": "Edge VIM - Media/Cosmote", 12 | "config": { 13 | "security_groups": "TBD" 14 | } 15 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_vim_edge_minilab.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "minilab_vim", 3 | "name": "minilab_vim", 4 | "auth_url": "http://IP_ADDRESS:5000/v3/", 5 | "username": "******", 6 | "password": "******", 7 | "admin_project_name": "******", 8 | "location": "Minilab", 9 | "type": "openstack", 10 | "version": "Queens", 11 | "description": "Edge VIM - IoRL/Minilab", 12 | "config": { 13 | "security_groups": "TBD" 14 | } 15 | } -------------------------------------------------------------------------------- /templates/example_config_files/SB_components/example_wim.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "test-wim", 3 | "name": "test-wim", 4 | "description": "Test Katana WIM", 5 | "url": "URL", 6 | "type": "test-wim" 7 | } -------------------------------------------------------------------------------- /templates/example_config_files/location/example_group0_edge.json: -------------------------------------------------------------------------------- 1 | { 2 | "id": "group0_edge", 3 | "description": "Group 0 Edge location" 4 | } -------------------------------------------------------------------------------- /templates/example_config_files/nest/example_group0_nest.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_slice_descriptor": { 3 | "base_slice_des_id": "group0_demo_slice", 4 | "coverage": [ 5 | "group0_edge" 6 | ], 7 | "delay_tolerance": true, 8 | "network_DL_throughput": { 9 | "guaranteed": 1500000 10 | }, 11 | "ue_DL_throughput": { 12 | "guaranteed": 1500000 13 | }, 14 | "network_UL_throughput": { 15 | "guaranteed": 50000 16 | }, 17 | "ue_UL_throughput": { 18 | "guaranteed": 60000 19 | }, 20 | "mtu": 1500 21 | }, 22 | "service_descriptor": {} 23 | } 24 | -------------------------------------------------------------------------------- /templates/example_config_files/nest/example_group0_nest_fw.json: -------------------------------------------------------------------------------- 1 | { 2 | "base_slice_descriptor": { 3 | "base_slice_des_id": "group0_demo_slice", 4 | "coverage": [ 5 | "group0_edge" 6 | ], 7 | "delay_tolerance": true, 8 | "network_DL_throughput": { 9 | "guaranteed": 1500000 10 | }, 11 | "ue_DL_throughput": { 12 | "guaranteed": 1500000 13 | }, 14 | "network_UL_throughput": { 15 | "guaranteed": 50000 16 | }, 17 | "ue_UL_throughput": { 18 | "guaranteed": 60000 19 | }, 20 | "mtu": 1500 21 | }, 22 | "service_descriptor": { 23 | "ns_list": [ 24 | { 25 | "nsd-id": "f08b2bbe-45ef-4a62-83cd-33ca1ad43c46", 26 | "ns-name": "group0_demoFW", 27 | "placement": 0, 28 | "optional": false 29 | } 30 | ] 31 | } 32 | } -------------------------------------------------------------------------------- /templates/images/SlicingOptions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/medianetlab/katana-slice_manager/2e7a14a41fc85bd7188d71ef9beaf51acc94015c/templates/images/SlicingOptions.png -------------------------------------------------------------------------------- /templates/images/katana-logo.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /templates/sbi-messages/wim_message_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "type": "object", 4 | "description": "Schema of the message from Katana to WIM", 5 | "properties": { 6 | "slice_sla": { 7 | "type": "object", 8 | "description": "Slice parameteres as defiend in NEST", 9 | "properties": { 10 | "network_DL_throughput": { 11 | "type": "object", 12 | "description": "The achievable data rate in downlink for the whole network slice (and not per user).", 13 | "properties": { 14 | "guaranteed": { 15 | "type": "number", 16 | "description": "kbps" 17 | }, 18 | "maximum": { 19 | "type": "number", 20 | "description": "kbps" 21 | } 22 | } 23 | }, 24 | "network_UL_throughput": { 25 | "type": "object", 26 | "description": "The achievable data rate in uplink for the whole network slice (and not per user).", 27 | "properties": { 28 | "guaranteed": { 29 | "type": "number", 30 | "description": "kbps" 31 | }, 32 | "maximum": { 33 | "type": "number", 34 | "description": "kbps" 35 | } 36 | } 37 | }, 38 | "mtu": { 39 | "type": "number", 40 | "description": "Bytes" 41 | } 42 | } 43 | }, 44 | "connections": { 45 | "type": "array", 46 | "description": "List of connections that are part of the slice and must be implemented by the WIM", 47 | "items": { 48 | "type": "object", 49 | "description": "The endpoints of the connections", 50 | "properties": { 51 | "core": { 52 | "type": "string", 53 | "description": "The core part of the radio connection" 54 | } 55 | } 56 | }, 57 | "radio": { 58 | "type": "string", 59 | "description": "The core part of the radio connection" 60 | } 61 | } 62 | } 63 | } -------------------------------------------------------------------------------- /templates/supported_functions_model.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "http://json-schema.org/draft-07/schema#", 3 | "definitions": { 4 | "ns": { 5 | "type": "object", 6 | "description": "A Network Service", 7 | "properties": { 8 | "nsd-id": { 9 | "type": "string", 10 | "description": "The NSD id as defined on the NFVO" 11 | }, 12 | "ns-name": { 13 | "type": "string", 14 | "description": "The name of the NS" 15 | }, 16 | "placement": { 17 | "type": "number", 18 | "enum": [ 19 | 0, 20 | 1 21 | ], 22 | "description": "1: Core, 2: Edge" 23 | }, 24 | "optional": { 25 | "type": "boolean" 26 | } 27 | } 28 | }, 29 | "pnf": { 30 | "type": "object", 31 | "description": "A Physical Network Service", 32 | "properties": { 33 | "pnf-id": { 34 | "type": "string", 35 | "description": "A Unique ID of the pnf" 36 | }, 37 | "pnf-name": { 38 | "type": "string", 39 | "description": "The name of the PNF" 40 | }, 41 | "description": { 42 | "type": "string" 43 | }, 44 | "ip": { 45 | "type": "string", 46 | "description": "The management IP of the PNF" 47 | }, 48 | "ip_s1": { 49 | "type": "string", 50 | "description": "Optional - The IP of the S1 Interface" 51 | }, 52 | "location": { 53 | "type": "string", 54 | "description": "The location of the PNF" 55 | }, 56 | "optional": { 57 | "type": "boolean" 58 | } 59 | } 60 | } 61 | }, 62 | "type": "object", 63 | "description": "A core slice network function", 64 | "properties": { 65 | "id": { 66 | "type": "string", 67 | "description": "A unique ID for this network function" 68 | }, 69 | "name": { 70 | "type": "string", 71 | "description": "Optional name for the network function" 72 | }, 73 | "gen": { 74 | "type": "number", 75 | "enum": [ 76 | 4, 77 | 5 78 | ], 79 | "description": "Type of the network function - 4: 4G, 5: 5G" 80 | }, 81 | "func": { 82 | "type": "number", 83 | "enum": [ 84 | 0, 85 | 1 86 | ], 87 | "description": "0: Core, 1: Radio" 88 | }, 89 | "shared": { 90 | "type": "object", 91 | "description": "Defines if the function can be shared between different slices", 92 | "properties": { 93 | "availability": { 94 | "type": "boolean", 95 | "description": "true: shared, false: no shared" 96 | }, 97 | "max_shared": { 98 | "type": "number", 99 | "description": "Max number of slices - If availability is true and max_shared not defined, it will be assumed unlimited availability" 100 | } 101 | }, 102 | "required": [ 103 | "availability" 104 | ] 105 | }, 106 | "type": { 107 | "type": "number", 108 | "enum": [ 109 | 0, 110 | 1 111 | ], 112 | "description": "0: Virtual, 1: Physical" 113 | }, 114 | "location": { 115 | "type": "string", 116 | "description": "Supported location" 117 | }, 118 | "ns_list": { 119 | "type": "array", 120 | "description": "Conditional - If type == Virtual - A list of the NSs that will be part of the slice", 121 | "items": { 122 | "$ref": "#/definitions/ns" 123 | } 124 | }, 125 | "pnf_list": { 126 | "type": "array", 127 | "description": "Conditional - If type == Physical - A list of the PNFs that will be part of the slice", 128 | "items": { 129 | "$ref": "#/definitions/pnf" 130 | } 131 | }, 132 | "ems-id": { 133 | "type": "string", 134 | "description": "Optional - Defines the EMS that is responsible for D1&2 configuration" 135 | } 136 | }, 137 | "required": [ 138 | "id", 139 | "gen", 140 | "func", 141 | "shared", 142 | "type", 143 | "location" 144 | ] 145 | } --------------------------------------------------------------------------------