├── CDK_py ├── .gitignore ├── README.md ├── app.py ├── cdk.json ├── cdk_py │ ├── __init__.py │ └── cdk_py_stack.py ├── requirements.txt ├── setup.py └── source.bat ├── Dockerfile ├── Dockerfile.Readme.md ├── LICENSE ├── NOTICE ├── README.md ├── Readme-old.md ├── eks-cluster-setup ├── eks-cluster-join-fed.script ├── eks-cluster-warmup.script ├── eks-cluster.yaml └── rbac-tiller.yaml ├── exec-kubefed.py ├── fed-app-example ├── federated-deployment-rsp.yaml ├── federated-nginx.yaml ├── federated-service.yaml └── namespace.yaml ├── k8s-fed-yml-setup ├── fed-cluster-setup.script └── federated-namespace.yaml ├── k8sFederated-mailru.png ├── mcs-cluster-setup ├── cluster_provision.sh ├── create_extip.sh ├── create_network_resources.sh └── vpnserver.sh ├── site-to-site-VPN-AWS ├── VPC-VPN-site2site.yaml └── vpn-create-cfn.sh └── super-big-script.sh /CDK_py/.gitignore: -------------------------------------------------------------------------------- 1 | # VSCode extension 2 | .vscode/ 3 | /.favorites.json 4 | 5 | # TypeScript incremental build states 6 | *.tsbuildinfo 7 | 8 | # Local state files & OS specifics 9 | .DS_Store 10 | node_modules/ 11 | lerna-debug.log 12 | dist/ 13 | pack/ 14 | .BUILD_COMPLETED 15 | .local-npm/ 16 | .tools/ 17 | coverage/ 18 | .nyc_output 19 | .LAST_BUILD 20 | *.sw[a-z] 21 | *~ 22 | .idea 23 | 24 | # We don't want tsconfig at the root 25 | /tsconfig.json 26 | 27 | # CDK Context & Staging files 28 | cdk.context.json 29 | .cdk.staging/ 30 | cdk.out/ 31 | *.tabl.json 32 | 33 | # Yarn error log 34 | yarn-error.log 35 | 36 | # Generated jest config 37 | jest.config.gen.json 38 | 39 | # Byte-compiled / optimized / DLL files 40 | __pycache__/ 41 | *.py[cod] 42 | *$py.class 43 | 44 | # C extensions 45 | *.so 46 | 47 | # Distribution / packaging 48 | .Python 49 | build/ 50 | develop-eggs/ 51 | dist/ 52 | downloads/ 53 | eggs/ 54 | .eggs/ 55 | lib/ 56 | lib64/ 57 | parts/ 58 | sdist/ 59 | var/ 60 | wheels/ 61 | pip-wheel-metadata/ 62 | share/python-wheels/ 63 | *.egg-info/ 64 | .installed.cfg 65 | *.egg 66 | MANIFEST 67 | 68 | # PyInstaller 69 | # Usually these files are written by a python script from a template 70 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 71 | *.manifest 72 | *.spec 73 | 74 | # Installer logs 75 | pip-log.txt 76 | pip-delete-this-directory.txt 77 | 78 | # Unit test / coverage reports 79 | htmlcov/ 80 | .tox/ 81 | .nox/ 82 | .coverage 83 | .coverage.* 84 | .cache 85 | nosetests.xml 86 | coverage.xml 87 | *.cover 88 | *.py,cover 89 | .hypothesis/ 90 | .pytest_cache/ 91 | 92 | # Translations 93 | *.mo 94 | *.pot 95 | 96 | # Django stuff: 97 | *.log 98 | local_settings.py 99 | db.sqlite3 100 | db.sqlite3-journal 101 | 102 | # Flask stuff: 103 | instance/ 104 | .webassets-cache 105 | 106 | # Scrapy stuff: 107 | .scrapy 108 | 109 | # Sphinx documentation 110 | docs/_build/ 111 | 112 | # PyBuilder 113 | target/ 114 | 115 | # Jupyter Notebook 116 | .ipynb_checkpoints 117 | 118 | # IPython 119 | profile_default/ 120 | ipython_config.py 121 | 122 | # pyenv 123 | # For a library or package, you might want to ignore these files since the code is 124 | # intended to run in multiple environments; otherwise, check them in: 125 | # .python-version 126 | 127 | # pipenv 128 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 129 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 130 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 131 | # install all needed dependencies. 132 | #Pipfile.lock 133 | 134 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 135 | __pypackages__/ 136 | 137 | # Celery stuff 138 | celerybeat-schedule 139 | celerybeat.pid 140 | 141 | # SageMath parsed files 142 | *.sage.py 143 | 144 | # Environments 145 | .env 146 | .venv 147 | env/ 148 | venv/ 149 | ENV/ 150 | env.bak/ 151 | venv.bak/ 152 | 153 | # Spyder project settings 154 | .spyderproject 155 | .spyproject 156 | 157 | # Rope project settings 158 | .ropeproject 159 | 160 | # mkdocs documentation 161 | /site 162 | 163 | # mypy 164 | .mypy_cache/ 165 | .dmypy.json 166 | dmypy.json 167 | 168 | # Pyre type checker 169 | .pyre/ 170 | 171 | # pytype static type analyzer 172 | .pytype/ -------------------------------------------------------------------------------- /CDK_py/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Welcome to your CDK Python project! 3 | 4 | This is a blank project for Python development with CDK. 5 | 6 | The `cdk.json` file tells the CDK Toolkit how to execute your app. 7 | 8 | This project is set up like a standard Python project. The initialization 9 | process also creates a virtualenv within this project, stored under the .env 10 | directory. To create the virtualenv it assumes that there is a `python3` 11 | (or `python` for Windows) executable in your path with access to the `venv` 12 | package. If for any reason the automatic creation of the virtualenv fails, 13 | you can create the virtualenv manually. 14 | 15 | To manually create a virtualenv on MacOS and Linux: 16 | 17 | ``` 18 | $ python3 -m venv .env 19 | ``` 20 | 21 | After the init process completes and the virtualenv is created, you can use the following 22 | step to activate your virtualenv. 23 | 24 | ``` 25 | $ source .env/bin/activate 26 | ``` 27 | 28 | If you are a Windows platform, you would activate the virtualenv like this: 29 | 30 | ``` 31 | % .env\Scripts\activate.bat 32 | ``` 33 | 34 | Once the virtualenv is activated, you can install the required dependencies. 35 | 36 | ``` 37 | $ pip install -r requirements.txt 38 | ``` 39 | 40 | At this point you can now synthesize the CloudFormation template for this code. 41 | 42 | ``` 43 | $ cdk synth 44 | ``` 45 | 46 | To add additional dependencies, for example other CDK libraries, just add 47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt` 48 | command. 49 | 50 | ## Useful commands 51 | 52 | * `cdk ls` list all stacks in the app 53 | * `cdk synth` emits the synthesized CloudFormation template 54 | * `cdk deploy` deploy this stack to your default AWS account/region 55 | * `cdk diff` compare deployed stack with current state 56 | * `cdk docs` open CDK documentation 57 | 58 | Enjoy! 59 | -------------------------------------------------------------------------------- /CDK_py/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from aws_cdk import core 4 | 5 | from cdk_py.cdk_py_stack import CdkPyStack 6 | 7 | 8 | app = core.App() 9 | CdkPyStack(app, "cdk-py",env={'region': 'eu-west-2'}) 10 | 11 | app.synth() 12 | -------------------------------------------------------------------------------- /CDK_py/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py" 3 | } 4 | -------------------------------------------------------------------------------- /CDK_py/cdk_py/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MailRuCloudSolutions/k8sfed/e3f8de5a1adcdcc8f76f123b3798b28dbb362253/CDK_py/cdk_py/__init__.py -------------------------------------------------------------------------------- /CDK_py/cdk_py/cdk_py_stack.py: -------------------------------------------------------------------------------- 1 | from aws_cdk import ( 2 | aws_ec2 as ec2, 3 | aws_eks as eks, 4 | aws_iam as iam, 5 | aws_autoscaling as autoscaling, 6 | core 7 | ) 8 | 9 | 10 | class CdkPyStack(core.Stack): 11 | 12 | def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: 13 | super().__init__(scope, id, **kwargs) 14 | cluster_admin = iam.Role(self, "AdminRole", 15 | assumed_by=iam.AccountRootPrincipal()) 16 | 17 | vpc = ec2.Vpc(self,"EKSVpc",cidr="10.2.0.0/16") 18 | 19 | eksCluster = eks.Cluster(self, "fedcluster", 20 | vpc=vpc, 21 | cluster_name="awsfedcluster", 22 | kubectl_enabled=True, 23 | masters_role=cluster_admin, 24 | default_capacity=2, 25 | default_capacity_instance=ec2.InstanceType("t3.large")) 26 | 27 | -------------------------------------------------------------------------------- /CDK_py/requirements.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -------------------------------------------------------------------------------- /CDK_py/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | 4 | with open("README.md") as fp: 5 | long_description = fp.read() 6 | 7 | 8 | setuptools.setup( 9 | name="cdk_py", 10 | version="0.0.1", 11 | 12 | description="An empty CDK Python app", 13 | long_description=long_description, 14 | long_description_content_type="text/markdown", 15 | 16 | author="author", 17 | 18 | package_dir={"": "cdk_py"}, 19 | packages=setuptools.find_packages(where="cdk_py"), 20 | 21 | install_requires=[ 22 | "aws-cdk.core", 23 | "aws-cdk.aws_eks", 24 | "aws-cdk.aws_iam", 25 | "aws-cdk.aws_ec2", 26 | "aws-cdk.aws_autoscaling", 27 | ], 28 | 29 | python_requires=">=3.6", 30 | 31 | classifiers=[ 32 | "Development Status :: 4 - Beta", 33 | 34 | "Intended Audience :: Developers", 35 | 36 | "License :: OSI Approved :: Apache Software License", 37 | 38 | "Programming Language :: JavaScript", 39 | "Programming Language :: Python :: 3 :: Only", 40 | "Programming Language :: Python :: 3.6", 41 | "Programming Language :: Python :: 3.7", 42 | "Programming Language :: Python :: 3.8", 43 | 44 | "Topic :: Software Development :: Code Generators", 45 | "Topic :: Utilities", 46 | 47 | "Typing :: Typed", 48 | ], 49 | ) 50 | -------------------------------------------------------------------------------- /CDK_py/source.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | rem The sole purpose of this script is to make the command 4 | rem 5 | rem source .env/bin/activate 6 | rem 7 | rem (which activates a Python virtualenv on Linux or Mac OS X) work on Windows. 8 | rem On Windows, this command just runs this batch file (the argument is ignored). 9 | rem 10 | rem Now we don't need to document a Windows command for activating a virtualenv. 11 | 12 | echo Executing .env\Scripts\activate.bat for you 13 | .env\Scripts\activate.bat 14 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | ENV NVM_DIR /usr/local/nvm 4 | ENV TZ=Europe/Moscow 5 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 6 | 7 | RUN DEBIAN_FRONTEND=noninteractive apt-get update -y && \ 8 | apt-get install python3 python3-pip python3-venv python-openstackclient curl -y && \ 9 | pip3 install --upgrade pip && \ 10 | pip3 install -q paramiko scp 11 | 12 | RUN mkdir -p $NVM_DIR && \ 13 | curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.2/install.sh | bash - && \ 14 | . $NVM_DIR/nvm.sh && \ 15 | nvm install v12.17.0 && \ 16 | npm install -g aws-cdk 17 | 18 | RUN curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz" | tar xz -C /tmp && \ 19 | mv /tmp/eksctl /usr/local/bin && \ 20 | curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ 21 | chmod +x ./kubectl && mv ./kubectl /usr/local/bin/kubectl 22 | 23 | RUN curl -LO https://get.helm.sh/helm-v2.16.3-linux-amd64.tar.gz && \ 24 | tar xzvf helm-v2.16.3-linux-amd64.tar.gz && mv linux-amd64/helm /usr/local/bin/ 25 | 26 | RUN curl -LO https://github.com/kubernetes-sigs/kubefed/releases/download/v0.1.0-rc6/kubefedctl-0.1.0-rc6-linux-amd64.tgz && \ 27 | tar xzvf kubefedctl-0.1.0-rc6-linux-amd64.tgz && mv kubefedctl /usr/local/bin 28 | 29 | RUN apt-get -y install jq 30 | RUN pip3 install awscli --upgrade 31 | 32 | COPY . /app 33 | WORKDIR /app 34 | 35 | CMD tail -f /dev/null 36 | -------------------------------------------------------------------------------- /Dockerfile.Readme.md: -------------------------------------------------------------------------------- 1 | # Docker image for k8sfed 2 | 3 | This image is intended for usage in interactive mode. 4 | 5 | ## Step 0: Create image 6 | 7 | ```bash 8 | docker build -t aws-mcs-k8s-federation . 9 | ``` 10 | 11 | ## Step 1: Run container 12 | 13 | ```bash 14 | docker run -it aws-mcs-k8s-federation /bin/bash 15 | ``` 16 | 17 | OR 18 | 19 | ```bash 20 | docker run -d aws-mcs-k8s-federation 21 | docker exec -it /bin/bash 22 | ``` 23 | 24 | ## Step 2: Configure AWS credentials 25 | 26 | This step may be skipped, if on host machine you have configured credentials in `~/.aws` and during container run you've mounted them, e.g. `-v $HOME/.aws:/root/.aws`. Credentials are expected to be in `/root/.aws`. Otherwise configure them during interactive mode: 27 | 28 | ```bash 29 | aws configure 30 | ``` 31 | 32 | ## Step 3: Configure MCS credentials 33 | 34 | Again as in AWS, this step may be skipped, if a correct mount is provided during container run, e.g. `-v $HOME/my-openrc.sh:/app/openrc`. Credentials are expected to be in file `/app/openrc`. See [help](https://mcs.mail.ru/help/iaas-api/openstack-api). It is highly recommended to have password in there, instead of interactive request, e.g. `export OS_PASSWORD="mypass"`. 35 | 36 | If you've already have running container, copy the file during interactive mode: 37 | 38 | ```bash 39 | cat > /app/openrc 40 | 41 | 42 | 43 | Ctrl+D 44 | ``` 45 | 46 | ## Step 4: Run the main script 47 | 48 | Main script is `super-big-script.sh`. 49 | 50 | ```bash 51 | ./super-big-script.sh 52 | ``` 53 | 54 | ## Outputs 55 | 56 | After script has finished, you'll have plenty of files needed for later work **inside** the container. To mitigate possibility of losing them, it is recommeded to copy them somewhere **outside** the container. 57 | 58 | * MCS Keypair with name `k8s-fed-XXXX` will be created. Private part will be stored in `/var/tmp/k8s-fed_id_rsa`. It should be used to access VPN server and Kubernetes nodes by SSH. 59 | * MCS KUBECONFIG with private IP will be stored in `/var/tmp/mcs_k8s_cfg`. This is not so critical, because may be reacquired from MCS console or API. 60 | * AWS EKS KUBECONFIG updated to conform to `kubefedctl` tool. This is stored in `/root/.kube/config`. 61 | * AWS VPN configuration is stored in `/var/tmp/vpn_cfg_conn.xml`. 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Amazon Software License 2 | 3 | 1. Definitions 4 | “Licensor” means any person or entity that distributes its Work. 5 | 6 | “Software” means the original work of authorship made available under this License. 7 | 8 | “Work” means the Software and any additions to or derivative works of the Software that are made available under this License. 9 | 10 | The terms “reproduce,” “reproduction,” “derivative works,” and “distribution” have the meaning as provided under U.S. copyright law; provided, however, that for the purposes of this License, derivative works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work. 11 | 12 | Works, including the Software, are “made available” under this License by including in or with the Work either (a) a copyright notice referencing the applicability of this License to the Work, or (b) a copy of this License. 13 | 2. License Grants 14 | 2.1 Copyright Grant. Subject to the terms and conditions of this License, each Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense and distribute its Work and any resulting derivative works in any form. 15 | 2.2 Patent Grant. Subject to the terms and conditions of this License, each Licensor grants to you a perpetual, worldwide, non-exclusive, royalty-free patent license to make, have made, use, sell, offer for sale, import, and otherwise transfer its Work, in whole or in part. The foregoing license applies only to the patent claims licensable by Licensor that would be infringed by Licensor’s Work (or portion thereof) individually and excluding any combinations with any other materials or technology. 16 | 3. Limitations 17 | 3.1 Redistribution. You may reproduce or distribute the Work only if (a) you do so under this License, (b) you include a complete copy of this License with your distribution, and (c) you retain without modification any copyright, patent, trademark, or attribution notices that are present in the Work. 18 | 3.2 Derivative Works. You may specify that additional or different terms apply to the use, reproduction, and distribution of your derivative works of the Work (“Your Terms”) only if (a) Your Terms provide that the use limitation in Section 3.3 applies to your derivative works, and (b) you identify the specific derivative works that are subject to Your Terms. Notwithstanding Your Terms, this License (including the redistribution requirements in Section 3.1) will continue to apply to the Work itself. 19 | 3.3 Use Limitation. The Work and any derivative works thereof only may be used or intended for use with the web services, computing platforms or applications provided by Amazon.com, Inc. or its affiliates, including Amazon Web Services, Inc. 20 | 3.4 Patent Claims. If you bring or threaten to bring a patent claim against any Licensor (including any claim, cross-claim or counterclaim in a lawsuit) to enforce any patents that you allege are infringed by any Work, then your rights under this License from such Licensor (including the grants in Sections 2.1 and 2.2) will terminate immediately. 21 | 3.5 Trademarks. This License does not grant any rights to use any Licensor’s or its affiliates’ names, logos, or trademarks, except as necessary to reproduce the notices described in this License. 22 | 3.6 Termination. If you violate any term of this License, then your rights under this License (including the grants in Sections 2.1 and 2.2) will terminate immediately. 23 | 4. Disclaimer of Warranty. 24 | THE WORK IS PROVIDED “AS IS” WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WARRANTIES OR CONDITIONS OF M ERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE OR NON-INFRINGEMENT. YOU BEAR THE RISK OF UNDERTAKING ANY ACTIVITIES UNDER THIS LICENSE. SOME STATES’ CONSUMER LAWS DO NOT ALLOW EXCLUSION OF AN IMPLIED WARRANTY, SO THIS DISCLAIMER MAY NOT APPLY TO YOU. 25 | 5. Limitation of Liability. 26 | EXCEPT AS PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER IN TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE SHALL ANY LICENSOR BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATED TO THIS LICENSE, THE USE OR INABILITY TO USE THE WORK (INCLUDING BUT NOT LIMITED TO LOSS OF GOODWILL, BUSINESS INTERRUPTION, LOST PROFITS OR DATA, COMPUTER FAILURE OR MALFUNCTION, OR ANY OTHER COMM ERCIAL DAMAGES OR LOSSES), EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 27 | Effective Date – April 18, 2008 © 2008 Amazon.com, Inc. or its affiliates. All rights reserved. 28 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | [AWS EKS Federated Kubernetes cluster with Mail.ru Cloud] 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The Project is deprecated. 2 | 3 | ============== 4 | 5 | # Kubernetes cluster Federation between Mail.ru Cloud and AWS 6 | This project automates the deployment and configuration process of the demo stand. The main purpose of this deployment is to demonstrate the work of federation between two clusters: AWS EKS (Elastic Kubernetes Service) and Mail.ru Cloud Managed Kubernetes service through a secured VPN connection. Federation is done using the official Kubernetes [kubefed project](https://github.com/kubernetes-sigs/kubefed). We see these scenarios that could benifit from cluster federation but not limited to them: 7 | * Dynamic scaling between clusters 8 | * Helping to arrange Geo/Country sensitive workloads and hybrid architectures 9 | * High availability between clusters/clouds 10 | * Single point of administration for multiple clusters across regions or clouds 11 | 12 | ## Architecture of deployment 13 |

14 | 15 | ## How to deploy using prepared Docker image 16 | 17 | This image is intended for usage in interactive mode. 18 | 19 | ### Step 0: Create image 20 | 21 | ```bash 22 | docker build -t aws-mcs-k8s-federation . 23 | ``` 24 | 25 | ### Step 1: Run container 26 | 27 | ```bash 28 | docker run -it aws-mcs-k8s-federation /bin/bash 29 | ``` 30 | 31 | OR 32 | 33 | ```bash 34 | docker run -d aws-mcs-k8s-federation 35 | docker exec -it /bin/bash 36 | ``` 37 | 38 | ### Step 2: Configure AWS credentials 39 | 40 | This step may be skipped, if on host machine you have configured credentials in `~/.aws` and during container run you've mounted them, e.g. `-v $HOME/.aws:/root/.aws`. Credentials are expected to be in `/root/.aws`. Otherwise, configure them during interactive mode: 41 | 42 | ```bash 43 | aws configure 44 | ``` 45 | 46 | ### Step 3: Configure MCS credentials 47 | 48 | Again as in AWS, this step may be skipped, if a correct mount is provided during container run, e.g. `-v $HOME/my-openrc.sh:/app/openrc`. Credentials are expected to be in file `/app/openrc`. See [help](https://mcs.mail.ru/help/iaas-api/openstack-api). It is highly recommended to have the password in there, instead of an interactive request, e.g. `export OS_PASSWORD="mypass"`. 49 | 50 | If you've already have running container, copy the file during interactive mode: 51 | 52 | ```bash 53 | cat > /app/openrc 54 | 55 | 56 | 57 | Ctrl+D 58 | ``` 59 | 60 | ### Step 4: Run the main script 61 | 62 | Main script is `super-big-script.sh`. 63 | 64 | ```bash 65 | ./super-big-script.sh 66 | ``` 67 | 68 | ### Outputs 69 | 70 | After the script has finished, you'll have plenty of files needed for later work **inside** the container. To mitigate possibility of losing them, it is recommended to copy them somewhere **outside** the container. 71 | 72 | * MCS Keypair with name `k8s-fed-XXXX` will be created. Private part will be stored in `/var/tmp/k8s-fed_id_rsa`. It should be used to access VPN server and Kubernetes nodes by SSH. 73 | * MCS KUBECONFIG with private IP will be stored in `/var/tmp/mcs_k8s_cfg`. This is not so critical, because may be reacquired from MCS console or API. 74 | * AWS EKS KUBECONFIG updated to conform to `kubefedctl` tool. This is stored in `/root/.kube/config`. 75 | * AWS VPN configuration is stored in `/var/tmp/vpn_cfg_conn.xml`. 76 | 77 | ### Check that the federation is working 78 | In docker container execute this command: 79 | ```bash 80 | kubectl -n kube-federation-system get kubefedclusters 81 | ``` 82 | You should see something like this in the output: 83 | ```bash 84 | NAME READY AGE 85 | awsfedcluster True 30s 86 | mcs-cluster-42k4 True 24s 87 | ``` 88 | -------------------------------------------------------------------------------- /Readme-old.md: -------------------------------------------------------------------------------- 1 | Install: 2 | Ubuntu: 3 | nodejs 4 | iam-authenticator 5 | (yes) eksctl (see https://github.com/weaveworks/eksctl) 6 | (yes) kubectl (see https://eksworkshop.com/020_prerequisites/k8stools/) 7 | (yes) kubefedctl (see https://github.com/kubernetes-sigs/kubefed/blob/master/docs/installation.md) 8 | (yes) helm 2x 9 | aws cli 10 | cdk 11 | pip -> python3 (check) 12 | 13 | 14 | MCS: 15 | source mcs-cluster-setup/aws-fed-openrc.sh 16 | 17 | 18 | @todo: 19 | 1. EKS public endpoint: restrict 20 | 2. 21 | 22 | 23 | Setup eks: 24 | 0. Make sure to disable "Managed creds" in Cloud9 (see eksworkshop) 25 | 1. eksctl + cluster.yaml: eksctl create cluster -f cluster.yaml 26 | 2. create rbac role for tiller: kubectl apply -f rbac-tiller.yaml 27 | 3. init helm tiller : helm init --service-account tiller 28 | 4. add repo: helm repo add kubefed-charts https://raw.githubusercontent.com/kubernetes-sigs/kubefed/master/charts 29 | 5. install kubefed chart: helm install kubefed-charts/kubefed --name kubefed --version=0.1.0-rc6 --namespace kube-federation-system 30 | 6. create merged kubeconfig with two clusters info. !NB Change AWS api endpoint to lowercase (example kubeconfig.yaml) 31 | 7. join clusters: 32 | kubefedctl join eks-kubefed --cluster-context eks-kubefed --host-cluster-context eks-kubefed --v=2 33 | kubefedctl join kubernetes-cluster-5454 --cluster-context default/kubernetes-cluster-5454 --host-cluster-context eks-kubefed --v=2 34 | 8. create test namespace : kubectl apply -f namespace.yaml 35 | 9. create federated namespace : kubectl apply -f federated-namespace.yaml 36 | 10. create fedeareted deployment kubectl apply -f federated-nginx.yaml 37 | 11. change deployment policy: 38 | 12. 39 | 40 | ExternalDNS setup 41 | 1. Create policy: aws iam create-policy --policy-name AllowExternalDNSUpdates --policy-document file://policy 42 | 2. create service role: 43 | eksctl utils associate-iam-oidc-provider --region=eu-central-1 --cluster=kubefed --approve 44 | eksctl create iamserviceaccount \ 45 | --name allowaxternaldnsupdates \ 46 | --namespace test \ 47 | --cluster kubefed \ 48 | --attach-policy-arn arn:aws:iam::633127108222:policy/AllowExternalDNSUpdates \ 49 | --approve \ 50 | --override-existing-serviceaccounts 51 | 3. get hosted zone id : aws route53 list-hosted-zones-by-name --output json --dns-name "mcs-aws.kubefed.local" | jq -r '.HostedZones[0].Id' 52 | 4. kubectl apply -f external-dns.yaml -n test 53 | 54 | Site-to-Site VPN setup: 55 | full description is here - https://docs.aws.amazon.com/vpn/latest/s2svpn/SetUpVPNConnections.html 56 | 1. Create VPC with no overlapping CIDR block with external on-premise site 57 | 2. Create a Customer Gateway with MCS external IP (Thats MCS endpoint 89.208.230.187) 58 | 2. Create a Virtual Private Gateway (Default ASN) 59 | 3. Create Subnet without overlapping CIDR 60 | 4. Security Group with allow traffic (SSH, ICMP ALL to check PING etc) 61 | 6. Create VPN-site-to-site connection, associate with CGW, VPG and with static route prefix (192.168.10.0/24 = MCS subnet) 62 | 7. Create Routing table with external CIDR block (192.168.10.0/24) or just add auto propagation from VGW, associated with subnet (preferable way) 63 | 64 | Its possible to get VPN connection configuration using that request (xml file): 65 | aws ec2 describe-vpn-connections --vpn-connection-id vpn-00e01e263c899595e 66 | 67 | --- Kubefed helper scripts/commands 68 | Check kubefed status: 69 | kubectl -n kube-federation-system get kubefedclusters 70 | KubeFed version: rc6 71 | 72 | Federation links: 73 | https://github.com/kubernetes-sigs/kubefed/blob/master/docs/userguide.md#helm-chart-deployment (propagation status) 74 | https://github.com/kubernetes-sigs/kubefed/blob/master/docs/userguide.md#verify-your-deployment-is-working 75 | 76 | Check podes in federated clusters: 77 | kubectl get po -n test --context default/kubernetes-cluster-5454 78 | kubectl get po -n test --context eks-kubefed 79 | 80 | Example for federation: https://github.com/kairen/aws-k8s-federation 81 | -------------------------------------------------------------------------------- /eks-cluster-setup/eks-cluster-join-fed.script: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | 6 | ReplaceAndLowerTheKubeConfig() { 7 | 8 | local file=$1 9 | #declare -p file 10 | 11 | local searchServer=$(kubectl config view -o json | jq -c '.clusters[] | select (.name | contains("awsfed")) | .name' | jq -r .) 12 | local replaceServer=$(kubectl config view -o json | jq -c '.clusters[] | select (.name | contains("awsfed")) | .name | sub(".*/";"")'| jq -r .) 13 | local serverURL=$(kubectl config view -o json | jq -c '.clusters[] | select (.name | contains("awsfed")) | .cluster.server' | jq -r .) 14 | 15 | echo ${#searchServer} 16 | 17 | if [ ${#searchServer} -gt 0 ]; then 18 | echo 'Found Cluster name and it will be shortened' 19 | else 20 | echo 'Cluster name is empty or null in kubeconfig file' 21 | exit 22 | fi 23 | 24 | if [ ${#serverURL} -gt 4 ]; then 25 | echo 'Server URL found and would be lowered' 26 | else 27 | echo 'Cluster URL is empty or null in kubeconfig file' 28 | exit 29 | fi 30 | 31 | local lowerServerURL=$(echo "$serverURL" | tr '[:upper:]' '[:lower:]') 32 | 33 | #fullpath=$(realpath --relative-to=${PWD} "$file") 34 | 35 | # Replacing cluster name 36 | sed -i "s!${searchServer}!${replaceServer}!g" "$file" 37 | 38 | # Lowering cluster URLs 39 | sed -i "s!${serverURL}!${lowerServerURL}!g" "$file" 40 | } 41 | 42 | cat ~/.kube/config > ~/.kube/config_orig 43 | 44 | #Changing UpperCase to LowerCase for kubfed and lowering enpoind url 45 | ReplaceAndLowerTheKubeConfig ~/.kube/config 46 | 47 | # merge 2 configs into one 48 | KUBECONFIG=~/.kube/config:/var/tmp/mcs_k8s_cfg kubectl config view --flatten > ~/.kube/config2 49 | cat ~/.kube/config2 > ~/.kube/config 50 | -------------------------------------------------------------------------------- /eks-cluster-setup/eks-cluster-warmup.script: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #$clustername=$0 4 | # eks-kubefed = AWS EKS cluster name from k8s context file, default/kubernetes-cluster-5454 = MCS k8s 5 | # eksctl create cluster -f eks-cluster-setup/eks-cluster.yaml 6 | # Create cluster with another VPC CIDR 7 | #CLUSTER_NAME=kubefed2 8 | #REGION=eu-central-1 9 | #VPC_CIDR=10.1.0.0/16 10 | #NODES_COUNT=2 11 | #NODES_TYPE=t3.large 12 | #eksctl create cluster \ 13 | # --name=$CLUSTER_NAME \ 14 | # --region=$REGION \ 15 | # --vpc-cidr=$VPC_CIDR \ 16 | # --nodes=$NODES_COUNT \ 17 | # --node-type=$NODES_TYPE \ 18 | # --asg-access \ 19 | # --full-ecr-access \ 20 | # --external-dns-access \ 21 | # --alb-ingress-access 22 | ############# 23 | kubectl apply -f eks-cluster-setup/rbac-tiller.yaml 24 | helm init --service-account tiller --wait 25 | helm repo add kubefed-charts https://raw.githubusercontent.com/kubernetes-sigs/kubefed/master/charts 26 | sleep 10 27 | helm repo update 28 | sleep 5 29 | helm install kubefed-charts/kubefed --name kubefed --version=0.1.0-rc6 --namespace kube-federation-system 30 | -------------------------------------------------------------------------------- /eks-cluster-setup/eks-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: kubefed2 6 | region: eu-central-1 7 | 8 | nodeGroups: 9 | - name: ng-1 10 | instanceType: t3.large 11 | desiredCapacity: 2 12 | iam: 13 | withAddonPolicies: 14 | imageBuilder: true 15 | autoScaler: true 16 | externalDNS: true 17 | certManager: true 18 | appMesh: true 19 | ebs: true 20 | fsx: true 21 | efs: true 22 | albIngress: true 23 | xRay: true 24 | cloudWatch: true 25 | 26 | cloudWatch: 27 | clusterLogging: 28 | enableTypes: ["*"] 29 | -------------------------------------------------------------------------------- /eks-cluster-setup/rbac-tiller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tiller 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tiller 18 | namespace: kube-system -------------------------------------------------------------------------------- /exec-kubefed.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """Client to handle connections and actions executed against a remote host.""" 4 | 5 | import argparse 6 | import codecs 7 | import sys 8 | import time 9 | from functools import wraps 10 | from os import system 11 | from paramiko import SSHClient, AutoAddPolicy, RSAKey 12 | from paramiko.auth_handler import AuthenticationException, SSHException 13 | from scp import SCPClient, SCPException 14 | 15 | 16 | sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach()) 17 | sys.stderr = codecs.getwriter("utf-8")(sys.stderr.detach()) 18 | 19 | 20 | class Logger: 21 | def info(self, msg): 22 | sys.stdout.write(msg + '\n') 23 | 24 | def error(self, msg): 25 | sys.stderr.write(msg + '\n') 26 | 27 | 28 | logger = Logger() 29 | 30 | 31 | def retry(ExceptionToCheck, tries=3, delay=2, backoff=2, logger=None): 32 | def deco_retry(f): 33 | @wraps(f) 34 | def f_retry(*args, **kwargs): 35 | mtries, mdelay = tries, delay 36 | while mtries > 1: 37 | try: 38 | return f(*args, **kwargs) 39 | except ExceptionToCheck as e: 40 | msg = "%s, Retrying in %d seconds..." % (str(e), mdelay) 41 | if logger: 42 | logger.info(msg) 43 | time.sleep(mdelay) 44 | mtries -= 1 45 | mdelay *= backoff 46 | return f(*args, **kwargs) 47 | return f_retry 48 | return deco_retry 49 | 50 | 51 | class RemoteClient: 52 | """Client to interact with a remote host via SSH & SCP.""" 53 | 54 | def __init__(self, host, user, ssh_key_filepath, remote_path): 55 | self.host = host 56 | self.user = user 57 | self.ssh_key_filepath = ssh_key_filepath 58 | self.remote_path = remote_path 59 | self.client = None 60 | self.scp = None 61 | self.conn = None 62 | 63 | def __connect(self): 64 | """ 65 | Open connection to remote host. 66 | """ 67 | try: 68 | self.client = SSHClient() 69 | self.client.load_system_host_keys() 70 | self.client.set_missing_host_key_policy(AutoAddPolicy()) 71 | self.client.connect(self.host, 72 | username=self.user, 73 | key_filename=self.ssh_key_filepath, 74 | look_for_keys=True, 75 | timeout=5000) 76 | self.scp = SCPClient(self.client.get_transport()) 77 | except AuthenticationException as error: 78 | logger.info('Authentication failed: did you remember to create an SSH key?') 79 | logger.error(error) 80 | raise error 81 | finally: 82 | return self.client 83 | 84 | def __disconnect(self): 85 | self.client.close() 86 | self.scp.close() 87 | self.client = None 88 | 89 | def upload_file(self, file, remote_path=None): 90 | """Upload a single file to a remote directory.""" 91 | if self.client is None: 92 | self.client = self.__connect() 93 | if not remote_path: 94 | remote_path = self.remote_path 95 | try: 96 | self.scp.put(file, 97 | recursive=True, 98 | remote_path=remote_path) 99 | except SCPException as error: 100 | logger.error(error) 101 | raise error 102 | finally: 103 | logger.info(f'Uploaded {file} to {self.remote_path}') 104 | self.__disconnect() 105 | 106 | @retry(Exception, logger=logger) 107 | def execute_command(self, cmd): 108 | """ 109 | Execute command in ssh session. 110 | 111 | :param cmd: unix command as string. 112 | """ 113 | if self.client is None: 114 | self.client = self.__connect() 115 | try: 116 | stdin, stdout, stderr = self.client.exec_command(cmd) 117 | stdout.channel.recv_exit_status() 118 | response = stdout.readlines() 119 | error = stderr.readlines() 120 | if response or error: 121 | msg = f'INPUT: {cmd}\n' 122 | if response: 123 | msg += 'STDOUT:\n{}'.format('\n'.join(response)) 124 | if error: 125 | msg += 'STDERR:\n{}'.format('\n'.join(error)) 126 | logger.info(msg) 127 | except Exception: 128 | self.__disconnect() 129 | raise 130 | else: 131 | self.__disconnect() 132 | 133 | 134 | def main(): 135 | parser = argparse.ArgumentParser() 136 | parser.add_argument('--host', required=True, 137 | help='Remote host to connect by SSH') 138 | parser.add_argument('--user', required=True, 139 | help='User to connect to remote host') 140 | parser.add_argument('--private-key', required=True, 141 | help='SSH private key') 142 | parser.add_argument('--remote-path', required=True, 143 | help='Remote path on SSH connection') 144 | 145 | args = parser.parse_args() 146 | rc = RemoteClient(args.host, args.user, args.private_key, args.remote_path) 147 | rc.execute_command('mkdir -p .aws && mkdir -p .kube') 148 | rc.execute_command( 149 | 'curl -sLO {} && chmod 755 kubectl && sudo mv -v kubectl /usr/bin/'.format( 150 | 'https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl' 151 | ) 152 | ) 153 | rc.execute_command( 154 | 'curl -sLO {} && tar xzf kubefedctl-0.1.0-rc6-linux-amd64.tgz && chmod 755 kubefedctl && sudo mv -v kubefedctl /usr/bin/'.format( 155 | 'https://github.com/kubernetes-sigs/kubefed/releases/download/v0.1.0-rc6/kubefedctl-0.1.0-rc6-linux-amd64.tgz' 156 | ) 157 | ) 158 | rc.execute_command( 159 | 'curl -sL {} -o jq && chmod 755 jq && sudo mv -v jq /usr/bin/'.format( 160 | 'https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64' 161 | ) 162 | ) 163 | rc.execute_command( 164 | 'curl -sO https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py' 165 | ) 166 | rc.execute_command('sudo pip install -q awscli') 167 | rc.upload_file(['/root/.aws/config', '/root/.aws/credentials'], '/home/centos/.aws/') 168 | rc.upload_file('/root/.kube/config', '/home/centos/.kube/') 169 | kubefedctl_cmd = ( 170 | "export AWSCTX=$(kubectl config view -o json | jq -c '.contexts[] | select (.name | contains(\"awsfed\")) | .name' | jq -r .) && " 171 | "export AWSNAME=$(kubectl config view -o json | jq -c '.clusters[] | select (.name | contains(\"awsfed\")) | .name' | jq -r .) && " 172 | "export MCSCTX=$(kubectl config view -o json | jq -c '.contexts[] | select (.name | contains(\"mcs-cluster\")) | .name' | jq -r .) && " 173 | "export MCSNAME=$(kubectl config view -o json | jq -c '.clusters[] | select (.name | contains(\"mcs-cluster\")) | .name' | jq -r .) && " 174 | "kubefedctl join $AWSNAME --cluster-context $AWSCTX --host-cluster-context $AWSCTX --v=2 && " 175 | "kubefedctl join $MCSNAME --cluster-context $MCSCTX --host-cluster-context $AWSCTX --v=2" 176 | ) 177 | rc.execute_command(kubefedctl_cmd) 178 | rc.execute_command('rm -rf .aws && rm -rf .kube') 179 | 180 | 181 | if __name__ == '__main__': 182 | main() 183 | -------------------------------------------------------------------------------- /fed-app-example/federated-deployment-rsp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.kubefed.io/v1alpha1 2 | kind: ReplicaSchedulingPreference 3 | metadata: 4 | name: test-deployment 5 | namespace: test 6 | spec: 7 | targetKind: FederatedDeployment 8 | totalReplicas: 10 9 | rebalance: true 10 | -------------------------------------------------------------------------------- /fed-app-example/federated-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: types.kubefed.io/v1beta1 2 | kind: FederatedDeployment 3 | metadata: 4 | name: test-deployment 5 | namespace: test 6 | spec: 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: nginx 16 | template: 17 | metadata: 18 | labels: 19 | app: nginx 20 | spec: 21 | tolerations: 22 | - effect: NoExecute 23 | key: node.kubernetes.io/unreachable 24 | operator: Exists 25 | tolerationSeconds: 30 26 | - effect: NoExecute 27 | key: node.kubernetes.io/not-ready 28 | operator: Exists 29 | tolerationSeconds: 30 30 | containers: 31 | - image: nginx 32 | name: nginx 33 | 34 | placement: 35 | clusters: 36 | - name: awsfedcluster 37 | - name: mcs-cluster 38 | -------------------------------------------------------------------------------- /fed-app-example/federated-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: types.kubefed.io/v1beta1 2 | kind: FederatedService 3 | metadata: 4 | name: test-service 5 | namespace: test 6 | spec: 7 | template: 8 | spec: 9 | selector: 10 | app: nginx 11 | type: NodePort 12 | ports: 13 | - name: http 14 | port: 80 15 | placement: 16 | clusters: 17 | - name: awsfedcluster 18 | - name: mcs-cluster -------------------------------------------------------------------------------- /fed-app-example/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: test 5 | -------------------------------------------------------------------------------- /k8s-fed-yml-setup/fed-cluster-setup.script: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MailRuCloudSolutions/k8sfed/e3f8de5a1adcdcc8f76f123b3798b28dbb362253/k8s-fed-yml-setup/fed-cluster-setup.script -------------------------------------------------------------------------------- /k8s-fed-yml-setup/federated-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: types.kubefed.io/v1beta1 2 | kind: FederatedNamespace 3 | metadata: 4 | name: test 5 | namespace: test 6 | spec: 7 | placement: 8 | clusters: 9 | - name: awsfedcluster 10 | - name: mcs-cluster -------------------------------------------------------------------------------- /k8sFederated-mailru.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MailRuCloudSolutions/k8sfed/e3f8de5a1adcdcc8f76f123b3798b28dbb362253/k8sFederated-mailru.png -------------------------------------------------------------------------------- /mcs-cluster-setup/cluster_provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | K8S_CFG_PATH=$1 6 | NETWORK_ID=$2 7 | SUBNET_ID=$3 8 | KEYPAIR_NAME=$4 9 | 10 | CLUSTER_NAME="mcs-cluster-$RAND_PART" 11 | 12 | cluster_payload=$(cat << EOF 13 | { 14 | "cluster_template_id": "a6b541da-c3fa-420a-9cb9-29a34b7ec2c9", 15 | "flavor_id": "Standard-2-4-40", 16 | "keypair": "$KEYPAIR_NAME", 17 | "master_count": 1, 18 | "master_flavor_id": "Standard-2-4-40", 19 | "name": "$CLUSTER_NAME", 20 | "node_groups": [{ 21 | "name": "default", 22 | "node_count": 1 23 | }], 24 | "labels": { 25 | "cluster_node_volume_type": "dp1-ssd", 26 | "heapster_enabled": false, 27 | "influx_grafana_dashboard_enabled": false, 28 | "prometheus_monitoring": false, 29 | "fixed_network": "$NETWORK_ID", 30 | "fixed_subnet": "$SUBNET_ID" 31 | } 32 | } 33 | EOF 34 | ) 35 | 36 | create_cluster() { 37 | echo $(curl -s -g -X POST -d "$cluster_payload" https://infra.mail.ru:9511/v1/clusters -H "Accept: application/json" -H "Content-Type: application/json" -H "X-Auth-Token: $1" | python -c "import sys, json; print( json.load(sys.stdin)['uuid'])") 38 | } 39 | 40 | get_cluster_status() { 41 | echo $(curl -s -g -X GET https://infra.mail.ru:9511/v1/clusters/$2 -H "Accept: application/json" -H "Content-Type: application/json" -H "X-Auth-Token: $1" | python -c "import sys, json; print(json.load(sys.stdin)['status'])") 42 | } 43 | 44 | wait_for_create() { 45 | i=0 46 | while [ $i -le 120 ] 47 | do 48 | i=$(( $i + 1 )) 49 | cluster_status=$(get_cluster_status $1 $2) 50 | if [ "$cluster_status" = "CREATE_COMPLETE" ]; then 51 | break 52 | elif [ "$cluster_status" = "CREATE_FAILED" ]; then 53 | echo ERROR: failed to create cluster 54 | exit 1 55 | fi 56 | echo Cluster status is $cluster_status 57 | sleep 30 58 | done 59 | } 60 | 61 | write_cluster_kubeconfig () { 62 | curl -s -g -X GET https://infra.mail.ru:9511/v1/clusters/$2/kube_config -H "Content-Type: application/json" -H "X-Auth-Token: $1" > $3 63 | } 64 | 65 | get_api_private_ip () { 66 | curl -s -H "Accept: application/json" -H "Content-Type: application/json" -H "X-Auth-Token: $1" "https://infra.mail.ru:9696/v2.0/lbaas/loadbalancers?name=${CLUSTER_NAME}-api-lb" | python -c "import sys, json; print(json.load(sys.stdin)['loadbalancers'][0]['vip_address'])" 67 | } 68 | 69 | provision_cluster() { 70 | echo Acquiring token... 71 | openstack_token=$(openstack token issue -c id -f value) 72 | echo Creating cluster with payload $cluster_payload 73 | cluster_uuid=$(create_cluster $openstack_token) 74 | echo Created cluster with uuid $cluster_uuid 75 | wait_for_create $openstack_token $cluster_uuid 76 | openstack_token=$(openstack token issue -c id -f value) 77 | echo Downloading kubeconfig file into $1 78 | write_cluster_kubeconfig $openstack_token $cluster_uuid $1 79 | echo Updating API endpoint 80 | api_lb_private_ip=$(get_api_private_ip $openstack_token) 81 | sed -i "s#https://.*:6443#https://$api_lb_private_ip:6443#g" $1 82 | } 83 | 84 | provision_cluster $K8S_CFG_PATH 85 | -------------------------------------------------------------------------------- /mcs-cluster-setup/create_extip.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo $(openstack floating ip create -f value -c floating_ip_address ext-net) 6 | -------------------------------------------------------------------------------- /mcs-cluster-setup/create_network_resources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | echo Creating network k8s-fed-network 6 | export NETWORK_ID=$(openstack network create -f value -c id "k8s-fed-network-$RAND_PART") 7 | echo Creating subnet k8s-fed-subnet 8 | SUBNET_ID=$(openstack subnet create "k8s-fed-subnet-$RAND_PART" --network $NETWORK_ID --subnet-range $1 -f value -c id) 9 | echo "created subnet:$SUBNET_ID" 10 | echo Creating router k8s-fed-router 11 | ROUTER_ID=$(openstack router create "k8s-fed-router-$RAND_PART" -f value -c id) 12 | echo Connecting router to ext-net 13 | openstack router set $ROUTER_ID --external-gateway ext-net 14 | echo Connecting router to subnet 15 | openstack router add subnet $ROUTER_ID $SUBNET_ID 16 | echo $NETWORK_ID > $2 17 | echo $SUBNET_ID > $3 18 | -------------------------------------------------------------------------------- /mcs-cluster-setup/vpnserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | # CONSTANTS 6 | IMAGE="4525415d-df00-4f32-a434-b8469953fe3e" 7 | FLAVOR="Basic-1-2-20" 8 | 9 | # PARAMETERS 10 | FIP_ADDRESS=$1 11 | NETWORK_ID=$2 12 | SUBNET_ID=$3 13 | PATH_TO_VPN_XML=$4 14 | MCS_CIDR=$5 15 | AWS_CIDR=$6 16 | KEYPAIR_NAME=$7 17 | KEYPAIR_PATH=$8 18 | 19 | # KEYPAIR 20 | echo Checking-creating keypair... 21 | openstack keypair show $KEYPAIR_NAME || openstack keypair create -f value $KEYPAIR_NAME > $KEYPAIR_PATH 22 | 23 | # SEC GROUP 24 | SGROUP_ID=$(openstack security group create -f value -c id "vpnserver-sg-$RAND_PART") 25 | 26 | openstack security group rule create -f shell -c created_at --remote-ip 0.0.0.0/0 --protocol tcp --dst-port 22 $SGROUP_ID 27 | openstack security group rule create -f shell -c created_at --remote-ip 0.0.0.0/0 --protocol tcp --dst-port 4500 $SGROUP_ID 28 | openstack security group rule create -f shell -c created_at --remote-ip 0.0.0.0/0 --protocol tcp --dst-port 500 $SGROUP_ID 29 | openstack security group rule create -f shell -c created_at --remote-ip 0.0.0.0/0 --protocol icmp $SGROUP_ID 30 | 31 | # PREPARE USERDATA 32 | IPSEC_CONF=$(python - << EOF 33 | import sys 34 | import xml.etree.ElementTree as ET 35 | 36 | tree = ET.parse('$PATH_TO_VPN_XML') 37 | tunnels = tree.findall('ipsec_tunnel') 38 | ipsec_conf = "config setup\n\n" 39 | for i, t in enumerate(tunnels): 40 | ipsec_conf += "conn Tunnel%s\n" % i 41 | ipsec_conf += " auto=start\n" 42 | ipsec_conf += " left=%defaultroute\n" 43 | ipsec_conf += " leftid=%s\n" % "$FIP_ADDRESS" 44 | ipsec_conf += " right=%s\n" % t.find('vpn_gateway').find('tunnel_outside_address').find('ip_address').text 45 | ipsec_conf += " type=tunnel\n" 46 | ipsec_conf += " leftauth=psk\n" 47 | ipsec_conf += " rightauth=psk\n" 48 | ipsec_conf += " keyexchange=ikev1\n" 49 | ipsec_conf += " ike=aes128-sha1-modp1024\n" 50 | ipsec_conf += " ikelifetime=%s\n" % t.find('ike').find('lifetime').text 51 | ipsec_conf += " esp=aes128-sha1-modp1024\n" 52 | ipsec_conf += " lifetime=1h\n" 53 | ipsec_conf += " keyingtries=%forever\n" 54 | ipsec_conf += " leftsubnet=${MCS_CIDR}\n" 55 | ipsec_conf += " rightsubnet=${AWS_CIDR}\n" 56 | ipsec_conf += " dpddelay=10s\n" 57 | ipsec_conf += " dpdtimeout=30s\n" 58 | ipsec_conf += " dpdaction=restart\n" 59 | ipsec_conf += "\n" 60 | # for now we create only 1 tunnel 61 | break 62 | 63 | sys.stdout.write(ipsec_conf) 64 | EOF 65 | ) 66 | 67 | IPSEC_SECRETS=$(python - << EOF 68 | import sys 69 | import xml.etree.ElementTree as ET 70 | 71 | tree = ET.parse('$PATH_TO_VPN_XML') 72 | tunnels = tree.findall('ipsec_tunnel') 73 | ipsec_secrets = "# ipsec.secrets\n\n" 74 | for t in tunnels: 75 | ipsec_secrets += "{} {} : PSK \"{}\"\n".format( 76 | "$FIP_ADDRESS", 77 | t.find('vpn_gateway').find('tunnel_outside_address').find('ip_address').text, 78 | t.find('ike').find('pre_shared_key').text 79 | ) 80 | 81 | sys.stdout.write(ipsec_secrets) 82 | EOF 83 | ) 84 | 85 | cat < /tmp/user-data.txt 86 | #!/bin/bash -x 87 | 88 | # enable ip routing and disable icmp redirects 89 | echo -e "\nnet.ipv4.ip_forward = 1\n" >> /usr/lib/sysctl.d/50-default.conf 90 | echo -e "net.ipv4.conf.all.send_redirects = 0\n" >> /usr/lib/sysctl.d/50-default.conf 91 | sysctl --system 92 | 93 | # install packages 94 | yum install -y epel-release yum-utils 95 | yum -q makecache -y 96 | 97 | # strongswan 98 | yum install -y strongswan 99 | 100 | cat < /etc/strongswan/ipsec.conf 101 | $IPSEC_CONF 102 | EOT 103 | 104 | cat < /etc/strongswan/ipsec.secrets 105 | $IPSEC_SECRETS 106 | EOT 107 | 108 | systemctl enable strongswan.service 109 | systemctl start strongswan.service 110 | EOF 111 | 112 | # CREATE SERVER 113 | SERVER_ID=$(openstack server create -f value -c id --key-name $KEYPAIR_NAME --image $IMAGE --nic "net-id=$NETWORK_ID" --flavor $FLAVOR --security-group $SGROUP_ID --user-data /tmp/user-data.txt "vpnserver-$RAND_PART") 114 | until [ $(openstack server show -f value -c status $SERVER_ID) = "ACTIVE" ] 115 | do 116 | sleep 2 117 | done 118 | 119 | # GET PRIVATE IP 120 | vpnserver_private_ip=$(openstack server show -f json $SERVER_ID | python -c "import sys,json; print(json.load(sys.stdin)['addresses'].split('=')[1])") 121 | 122 | # ATTACH IP 123 | openstack server add floating ip $SERVER_ID $FIP_ADDRESS 124 | 125 | # CREATE ROUTES ON DHCP 126 | openstack subnet set --host-route "destination=${AWS_CIDR},gateway=${vpnserver_private_ip}" $SUBNET_ID 127 | 128 | # ADD PORT PERMISSIONS 129 | port_id=$(openstack port list -f value -c ID --server $SERVER_ID) 130 | openstack port set --allowed-address "ip-address=${AWS_CIDR}" $port_id 131 | -------------------------------------------------------------------------------- /site-to-site-VPN-AWS/VPC-VPN-site2site.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: "2010-09-09" 2 | 3 | Description: > 4 | Create and attach VPN Gateway, Customer GateWay, VPN-Connection to VPC 5 | 6 | Parameters: 7 | 8 | HomeIP: 9 | Description: IP address of home network 10 | Type: String 11 | 12 | 13 | ExtRouteCIDR: 14 | Description: Please enter the IP range (CIDR notation) for external network in MCS 15 | Type: String 16 | Default: 192.168.10.0/24 17 | 18 | VpcId: 19 | Type: "AWS::EC2::VPC::Id" 20 | Description: VpcId that VPNGateway is attached 21 | 22 | RouteTableIds: 23 | Type: CommaDelimitedList 24 | Description: Propegate Routes to the following tables delimitted with comma 25 | 26 | Resources: 27 | 28 | VpnCgw: 29 | Type: "AWS::EC2::CustomerGateway" 30 | Properties: 31 | Tags: 32 | - Key: Name 33 | Value: "S2S-MCS-CustomerGateWay" 34 | Type: ipsec.1 35 | IpAddress: !Ref HomeIP 36 | BgpAsn: '65000' 37 | 38 | VpnGw: 39 | Type: "AWS::EC2::VPNGateway" 40 | Properties: 41 | Type: ipsec.1 42 | Tags: 43 | - Key: Name 44 | Value: "S2S-MCS-VPnGateWay" 45 | 46 | VpnAttach: 47 | Type: "AWS::EC2::VPCGatewayAttachment" 48 | Properties: 49 | VpcId: 50 | !Ref VpcId 51 | VpnGatewayId: 52 | !Ref VpnGw 53 | 54 | VpnConnection1: 55 | Type: "AWS::EC2::VPNConnection" 56 | Properties: 57 | Tags: 58 | - Key: Name 59 | Value: "S2S-MCS-VPNconnection" 60 | Type: ipsec.1 61 | StaticRoutesOnly: 'true' 62 | VpnGatewayId: !Ref VpnGw 63 | CustomerGatewayId: !Ref VpnCgw 64 | 65 | croute1: 66 | DependsOn: 67 | - VpnConnection1 68 | Type: 'AWS::EC2::VPNConnectionRoute' 69 | Properties: 70 | VpnConnectionId: !Ref VpnConnection1 71 | DestinationCidrBlock: !Ref ExtRouteCIDR 72 | 73 | VpnRoutePropagate: 74 | DependsOn: 75 | - VpnAttach 76 | - VpnConnection1 77 | - VpnGw 78 | - VpnCgw 79 | - croute1 80 | Type: "AWS::EC2::VPNGatewayRoutePropagation" 81 | Properties: 82 | RouteTableIds: 83 | !Ref RouteTableIds 84 | VpnGatewayId: 85 | !Ref VpnGw 86 | 87 | Outputs: 88 | VPNGatewayId: 89 | Value: !Ref VpnConnection1 -------------------------------------------------------------------------------- /site-to-site-VPN-AWS/vpn-create-cfn.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | ip1=$1 5 | cidr2=$2 6 | filename=$3 7 | vpnconnid_path=$4 8 | aws_region=$5 9 | stackname="aws-mcs-kubfed-vpn" 10 | 11 | # Fetching newly created VPC ID based on tags 12 | vpcid=$(aws ec2 describe-vpcs --region=$aws_region --filters Name=tag:Name,Values="cdk-py/EKSVpc" --query 'Vpcs[0].VpcId' | jq -r .) 13 | 14 | # Fetching newly created routing tables. We will need these to update them with static route from MCS side 15 | rtids=$(aws ec2 describe-route-tables --region=$aws_region --filters "Name=vpc-id,Values=$vpcid" "Name=tag:aws:cloudformation:stack-name, Values=cdk-py" --query 'RouteTables[].RouteTableId' | jq -r 'join(",")') 16 | 17 | echo 'We have vpcid:$vpcid and rtids:$rtids' 18 | echo "Input params: ip:$ip1, cidr:$cidr2, filename:$filename" 19 | 20 | # Deploy cloudformation stack with all AWS resources: VPN Gateway, VPN Customer Gateway, VPN Site-site connection and etc 21 | aws cloudformation deploy --region=$aws_region --template-file site-to-site-VPN-AWS/VPC-VPN-site2site.yaml --stack-name $stackname --parameter-overrides HomeIP=$ip1 ExtRouteCIDR=$cidr2 VpcId=$vpcid RouteTableIds=$rtids --tags KubeFed=True 22 | echo 'Yaml with VPN gateway, Customer Gateway, VPN connection created' 23 | 24 | # Fetching VPNconnectionId from cloudformation stack results 25 | VpnConnectionID=$(aws cloudformation describe-stacks --region=$aws_region --stack-name $stackname | jq -r '.Stacks[0].Outputs[] | select(.OutputKey == "VPNGatewayId") | .OutputValue') 26 | echo 'We have VpnConnection ID: $VpnConnectionID' 27 | echo $VpnConnectionID > $vpnconnid_path 28 | 29 | # Fetching xml config for newly created VPN connection that should be providede to MCS side 30 | aws ec2 describe-vpn-connections --region=$aws_region --vpn-connection-id $VpnConnectionID | jq -r '.VpnConnections[0].CustomerGatewayConfiguration' > $filename 31 | echo 'File with vpn configuration created and saved' 32 | echo 'Everything is created and prepaired on AWS side for VPN connection!' -------------------------------------------------------------------------------- /super-big-script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 Amazon.com, Inc. and its affiliates. All Rights Reserved. 4 | 5 | # Licensed under the Amazon Software License (the "License"). 6 | # You may not use this file except in compliance with the License. 7 | # A copy of the License is located at 8 | 9 | # http://aws.amazon.com/asl/ 10 | 11 | # or in the "license" file accompanying this file. This file is distributed 12 | # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 13 | # express or implied. See the License for the specific language governing 14 | # permissions and limitations under the License. 15 | 16 | set -ex 17 | 18 | # Path to xaml file with aws configuration it will be autogenereated later in that script 19 | vpn_cfg_path=/var/tmp/vpn_cfg_conn.xml 20 | 21 | # Path to mcs_k8s config files 22 | mcs_k8s_cfg_path=/var/tmp/mcs_k8s_cfg 23 | mcs_netwid_path=/var/tmp/mcs_netwid_cfg 24 | mcs_subnetid_path=/var/tmp/mcs_subnetid_cfg 25 | mcs_keypair_path=/var/tmp/k8s-fed_id_rsa 26 | 27 | aws_vpnconnid_path=/var/tmp/vpnconnid 28 | aws_region=$(aws configure get region) 29 | 30 | # CIDR Ip block on MCS side that should be added as static route on AWS side after VPN connection 31 | # Thats CIDR block would be reserved in newly created subnet on MCS side and could be changed to anything 32 | mcs_cidr="192.168.10.0/24" 33 | aws_cidr="10.2.0.0/16" 34 | 35 | # Install EKS via CDK 36 | # All cluster init configuration located /CDK_py/cdk_py/cdk_py_stack.py 37 | # Requirements location in requirements.txt 38 | # @TODO: need to pass $aws_region into CDK 39 | cd CDK_py 40 | rm -rf .env 41 | python3 -m venv .env 42 | source .env/bin/activate 43 | pip install -r requirements.txt 44 | cdk bootstrap 45 | cdk synth 46 | cdk deploy --require-approval never 47 | deactivate 48 | cd .. 49 | 50 | echo 'EKS created and we need to wait until it will fully provisioned for 30 sec ' 51 | sleep 30 #30 secs 52 | 53 | # check that kubectl get nodes works and current context = awsfedclusterctx 54 | # create .kube/config: get command from CF Output. 55 | rm -rf ~/.kube/config #delete if already exists 56 | runforEKSkubeconfig=$(aws cloudformation describe-stacks --stack-name cdk-py --region=$aws_region | jq -c '.Stacks[].Outputs[] | select (.OutputKey | contains("fedclusterConfigCommand")) | .OutputValue' | jq -r .) 57 | $runforEKSkubeconfig 58 | 59 | # Provisining Kubefed, Tiller into AWS EKS newly created cluster 60 | ./eks-cluster-setup/eks-cluster-warmup.script 61 | 62 | # OpenStack auto for MCS cloud (hardcoded params @TODO) 63 | source ./openrc 64 | 65 | # Shared variables 66 | export RAND_PART=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 4 | head -n 1) 67 | 68 | mcs_keypair="k8s-fed-$RAND_PART" 69 | 70 | # Creating External Ip adress on MCS side that would be paired with AWS VPN Gateway 71 | extip=$(./mcs-cluster-setup/create_extip.sh) 72 | echo "External IP from MCS side created: $extip" 73 | echo $extip > /var/tmp/extip 74 | 75 | # MCS: script for extIP, subnet, k8s cluster 76 | ./mcs-cluster-setup/create_network_resources.sh $mcs_cidr $mcs_netwid_path $mcs_subnetid_path 77 | mcs_netw_id=$(cat $mcs_netwid_path) 78 | mcs_subid_id=$(cat $mcs_subnetid_path) 79 | 80 | # Install AWS VPN resources (VPN Gateway, Customer Gateway and etc) to establish VPN connections 81 | # Params: extip - newly created on MCS side ip for VPN pairing 82 | # mcs_cidr - MCS CIDR IP block that is static variable for routing on AWS side 83 | # vpn_cfg_path - path to file that would be generated after execution for MCS side 84 | ./site-to-site-VPN-AWS/vpn-create-cfn.sh $extip $mcs_cidr $vpn_cfg_path $aws_vpnconnid_path $aws_region 85 | 86 | #Creating virtual machine on MCS side with strongswan Ipsec VPN server and establishing connetion to AWS 87 | ./mcs-cluster-setup/vpnserver.sh $extip $mcs_netw_id $mcs_subid_id $vpn_cfg_path $mcs_cidr $aws_cidr $mcs_keypair $mcs_keypair_path 88 | 89 | # Wait func for S2S VPN Connection is UP (AWS+MCS) 90 | wait_for_create_vpn() { 91 | i=0 92 | while [ $i -le 30 ] 93 | do 94 | i=$(( $i + 1 )) 95 | VPN_conn_status=$(aws ec2 describe-vpn-connections --region=$aws_region --filters Name=vpn-connection-id,Values=$1 --query 'VpnConnections[].VgwTelemetry[].Status' | jq -r 'join(",")' ) 96 | echo "$i: $1 VPN_conn_status: $VPN_conn_status" 97 | if [ -n "$(echo "$VPN_conn_status" | grep "UP")" ] 98 | then 99 | echo "VPN Conn $1 have something UP" 100 | break 101 | else 102 | echo "VPN Conn $1 is still DOWN" 103 | fi 104 | sleep 10 105 | done 106 | } 107 | 108 | # Checking whether VPN connection got UP eventually 109 | vpnconnid=$(cat $aws_vpnconnid_path) 110 | wait_for_create_vpn $vpnconnid 111 | 112 | # Creating MCS k8s 113 | ./mcs-cluster-setup/cluster_provision.sh $mcs_k8s_cfg_path $mcs_netw_id $mcs_subid_id $mcs_keypair 114 | 115 | # Federation 116 | ./eks-cluster-setup/eks-cluster-join-fed.script 117 | 118 | # Configuring federation from VPN server 119 | ./exec-kubefed.py --host $extip --user centos --private-key $mcs_keypair_path --remote-path '/home/centos' 120 | 121 | # Configuring federated resources 122 | kubectl apply -f fed-app-example/namespace.yaml # create test NS 123 | kubectl apply -f k8s-fed-yml-setup/federated-namespace.yaml # create Federated NS 124 | kubectl apply -f fed-app-example/federated-nginx.yaml # federated Nginx Deployment 125 | 126 | kubectl -n kube-federation-system get kubefedclusters 127 | 128 | echo "Federation is up and running. Quit." 129 | --------------------------------------------------------------------------------