├── netpolymigrator ├── __init__.py ├── .DS_Store ├── example-apps │ ├── calico-np.yaml │ ├── cilium-np.yaml │ └── demo-app.yaml ├── validate.py ├── cleanup_cilium.py ├── cleanup_calico.py ├── pre_migration_check.py ├── rollback.py ├── collect.py ├── apply.py ├── calico_utils.py ├── cilium_utils.py ├── convert.py └── utils.py ├── requirements.txt ├── NOTICE ├── .DS_Store ├── CODE_OF_CONDUCT.md ├── setup.py ├── CONTRIBUTING.md ├── bin └── netpol_migrator.sh ├── README.md └── LICENSE /netpolymigrator/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.26.115 2 | kubernetes==26.1.0 3 | PyYAML==6.0 -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/k8s-network-policy-migrator/HEAD/.DS_Store -------------------------------------------------------------------------------- /netpolymigrator/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/k8s-network-policy-migrator/HEAD/netpolymigrator/.DS_Store -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /netpolymigrator/example-apps/calico-np.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: "projectcalico.org/v3" 3 | # kind: NetworkPolicy 4 | # metadata: 5 | # name: deny-all-ingress 6 | # namespace: npmigrator-test 7 | # spec: 8 | # selector: app == 'demo-app' 9 | # ingress: 10 | # - {} 11 | --- 12 | apiVersion: "projectcalico.org/v3" 13 | kind: NetworkPolicy 14 | metadata: 15 | name: demo-app-ingress-rule 16 | namespace: npmigrator-test 17 | spec: 18 | selector: app == 'demo-app' 19 | ingress: 20 | - action: Allow 21 | protocol: TCP 22 | source: 23 | selector: app == 'client-one' 24 | destination: 25 | ports: 26 | - 80 27 | -------------------------------------------------------------------------------- /netpolymigrator/example-apps/cilium-np.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "cilium.io/v2" 3 | kind: CiliumNetworkPolicy 4 | metadata: 5 | name: "deny-all-ingress" 6 | namespace: npmigrator-test 7 | spec: 8 | endpointSelector: 9 | matchLabels: 10 | app: demo-app 11 | ingress: 12 | - {} 13 | --- 14 | apiVersion: "cilium.io/v2" 15 | kind: CiliumNetworkPolicy 16 | metadata: 17 | name: "demo-app-ingress-rule" 18 | namespace: npmigrator-test 19 | spec: 20 | endpointSelector: 21 | matchLabels: 22 | app: demo-app 23 | ingress: 24 | - fromEndpoints: 25 | - matchLabels: 26 | app: client-one 27 | toPorts: 28 | - ports: 29 | - port: "80" 30 | protocol: TCP 31 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | setup( 4 | name="netpolymigrator", 5 | version="0.2.0", 6 | author="Sanjeev Ganjihal", 7 | description="A tool to migrate Calico and Cilium network policies to Kubernetes native network policies", 8 | packages=find_packages(), 9 | classifiers=[ 10 | "Development Status :: 3 - Alpha", 11 | "Intended Audience :: Developers", 12 | "License :: OSI Approved :: Apache 2.0", 13 | "Operating System :: OS Independent", 14 | "Programming Language :: Python :: 3", 15 | "Programming Language :: Python :: 3.6", 16 | "Programming Language :: Python :: 3.7", 17 | "Programming Language :: Python :: 3.8", 18 | "Programming Language :: Python :: 3.9", 19 | ], 20 | python_requires=">=3.6", 21 | install_requires=[ 22 | "kubernetes>=12.0.0", 23 | "PyYAML>=5.1", 24 | "click>=7.0", 25 | ], 26 | entry_points={ 27 | "console_scripts": [ 28 | "netpolymigrator=netpolymigrator.cli:main" 29 | ] 30 | } 31 | ) 32 | -------------------------------------------------------------------------------- /netpolymigrator/example-apps/demo-app.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: npmigrator-test 6 | --- 7 | apiVersion: v1 8 | kind: Service 9 | metadata: 10 | name: demo-svc 11 | namespace: npmigrator-test 12 | spec: 13 | type: ClusterIP 14 | ports: 15 | - port: 80 16 | selector: 17 | app: demo-app 18 | --- 19 | apiVersion: apps/v1 20 | kind: Deployment 21 | metadata: 22 | name: demo-app 23 | namespace: npmigrator-test 24 | spec: 25 | selector: 26 | matchLabels: 27 | app: demo-app 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | app: demo-app 33 | spec: 34 | containers: 35 | - name: demo 36 | image: public.ecr.aws/docker/library/nginx:stable 37 | imagePullPolicy: IfNotPresent 38 | --- 39 | apiVersion: apps/v1 40 | kind: Deployment 41 | metadata: 42 | name: client-one 43 | namespace: npmigrator-test 44 | spec: 45 | selector: 46 | matchLabels: 47 | app: client-one 48 | replicas: 1 49 | template: 50 | metadata: 51 | labels: 52 | app: client-one 53 | spec: 54 | containers: 55 | - name: client-one 56 | image: public.ecr.aws/docker/library/nginx:stable 57 | imagePullPolicy: IfNotPresent 58 | --- 59 | apiVersion: apps/v1 60 | kind: Deployment 61 | metadata: 62 | name: client-two 63 | namespace: npmigrator-test 64 | spec: 65 | selector: 66 | matchLabels: 67 | app: client-two 68 | replicas: 1 69 | template: 70 | metadata: 71 | labels: 72 | app: client-two 73 | spec: 74 | containers: 75 | - name: client-two 76 | image: public.ecr.aws/docker/library/nginx:stable 77 | imagePullPolicy: IfNotPresent 78 | -------------------------------------------------------------------------------- /netpolymigrator/validate.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logger = logging.getLogger(__name__) 4 | 5 | def validate_network_policy(network_policy): 6 | """Validates a network policy.""" 7 | if not network_policy or not isinstance(network_policy, dict): 8 | logger.warning("Invalid network policy: must be a non-empty dict") 9 | return False 10 | 11 | # Check that the network policy has required fields 12 | required_fields = ["apiVersion", "kind", "metadata", "spec"] 13 | for field in required_fields: 14 | if field not in network_policy: 15 | logger.warning(f"Invalid network policy: missing required field '{field}'") 16 | return False 17 | 18 | # Check that the apiVersion is supported 19 | supported_versions = ["networking.k8s.io/v1", "networking.k8s.io/v1beta1"] 20 | if network_policy["apiVersion"] not in supported_versions: 21 | logger.warning(f"Invalid network policy: unsupported apiVersion '{network_policy['apiVersion']}'") 22 | return False 23 | 24 | # Check that the kind is NetworkPolicy 25 | if network_policy["kind"] != "NetworkPolicy": 26 | logger.warning(f"Invalid network policy: unsupported kind '{network_policy['kind']}'") 27 | return False 28 | 29 | # Check that the metadata has a name field 30 | if "name" not in network_policy["metadata"]: 31 | logger.warning("Invalid network policy: metadata missing 'name' field") 32 | return False 33 | 34 | # Check that the spec field is not empty 35 | if not network_policy.get("spec"): 36 | logger.warning("Invalid network policy: spec field must be non-empty") 37 | return False 38 | 39 | # Check that the policy types field is not empty 40 | if not network_policy["spec"].get("policyTypes"): 41 | logger.warning("Invalid network policy: policyTypes field must be non-empty") 42 | return False 43 | 44 | # Check that the podSelector.matchLabels field meets the naming requirements 45 | for egress_rule in network_policy["spec"].get("egress", []): 46 | for to_rule in egress_rule.get("to", []): 47 | if "podSelector" in to_rule and "matchLabels" in to_rule["podSelector"]: 48 | for label_key in to_rule["podSelector"]["matchLabels"]: 49 | if not label_key.isalnum() and "-" not in label_key and "_" not in label_key and "." not in label_key: 50 | logger.warning(f"Invalid network policy: podSelector.matchLabels field has invalid label key '{label_key}'") 51 | return False 52 | 53 | return True -------------------------------------------------------------------------------- /netpolymigrator/cleanup_cilium.py: -------------------------------------------------------------------------------- 1 | import subprocess # Import the subprocess module for executing shell commands 2 | import time # Import the time module for time-related functions 3 | import logging # Import the logging module for logging 4 | 5 | # Configure logging settings 6 | logging.basicConfig(filename='cleanup.log', level=logging.INFO) 7 | 8 | # Function to delete CRDs based on a keyword 9 | def delete_crd_by_keyword(keyword): 10 | try: 11 | # Get all CustomResourceDefinitions (CRDs) and decode the output 12 | crds = subprocess.check_output(["kubectl", "get", "crd", "-A", "-o", "name"]).decode("utf-8").split("\n") 13 | # Filter CRDs containing the keyword 14 | keyword_crds = [crd for crd in crds if keyword.lower() in crd.lower()] 15 | 16 | # Check if any CRDs with the keyword were found 17 | if not keyword_crds: 18 | logging.info(f"No CRDs found with the keyword '{keyword}'") 19 | return 20 | 21 | # Loop through each CRD and attempt to delete it 22 | for crd in keyword_crds: 23 | # Retry the delete operation up to 3 times if it fails 24 | for _ in range(3): 25 | try: 26 | subprocess.check_output(["kubectl", "delete", crd]) 27 | logging.info(f"CRD {crd} removed successfully") 28 | break # Exit the loop if successful 29 | except subprocess.CalledProcessError as e: 30 | logging.error(f"Error deleting CRD {crd}: {e}") 31 | time.sleep(5) # Wait for 5 seconds before retrying 32 | except subprocess.CalledProcessError as e: 33 | logging.error(f"Error retrieving CRDs: {e}") 34 | 35 | # Function to clean up Cilium 36 | def cleanup_cilium(): 37 | try: 38 | # Delete the demo application and Cilium network policies 39 | subprocess.check_output(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/demo-app.yaml"]) 40 | subprocess.check_output(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/cilium-np.yaml"]) 41 | 42 | # Delete the Cilium DaemonSet 43 | subprocess.check_output(["kubectl", "delete", "daemonset", "cilium", "-n", "kube-system"]) 44 | logging.info("Cilium DaemonSet removed successfully") 45 | 46 | # Uninstall Cilium using Helm 47 | subprocess.check_output(["helm", "delete", "cilium", "-n", "kube-system"]) 48 | logging.info("Uninstalled cilium using helm") 49 | 50 | # Delete CRDs related to Cilium 51 | delete_crd_by_keyword("cilium") 52 | except subprocess.CalledProcessError as e: 53 | logging.error(f"Error during cleanup: {e}") 54 | 55 | # Main function 56 | def main(): 57 | try: 58 | cleanup_cilium() # Call the cleanup_cilium function 59 | except KeyboardInterrupt: 60 | logging.info("Cleanup interrupted by user. Exiting...") 61 | 62 | # Entry point for the script 63 | if __name__ == "__main__": 64 | main() 65 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | **NOTE:** This repository is maintained on a best effort basis only. 4 | 5 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 6 | documentation, we greatly value feedback and contributions from our community. 7 | 8 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 9 | information to effectively respond to your bug report or contribution. 10 | 11 | 12 | ## Reporting Bugs/Feature Requests 13 | 14 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 15 | 16 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 17 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 18 | 19 | * A reproducible test case or series of steps 20 | * The version of our code being used 21 | * Any modifications you've made relevant to the bug 22 | * Anything unusual about your environment or deployment 23 | 24 | 25 | ## Contributing via Pull Requests 26 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 27 | 28 | 1. You are working against the latest source on the *main* branch. 29 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 30 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 31 | 32 | To send us a pull request, please: 33 | 34 | 1. Fork the repository. 35 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 36 | 3. Ensure local tests pass. 37 | 4. Commit to your fork using clear commit messages. 38 | 5. Send us a pull request, answering any default questions in the pull request interface. 39 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 40 | 41 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 42 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 43 | 44 | 45 | ## Finding contributions to work on 46 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 47 | 48 | 49 | ## Code of Conduct 50 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 51 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 52 | opensource-codeofconduct@amazon.com with any additional questions or comments. 53 | 54 | 55 | ## Security issue notifications 56 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 57 | 58 | 59 | ## Licensing 60 | 61 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 62 | -------------------------------------------------------------------------------- /bin/netpol_migrator.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the directory of this script 4 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" 5 | 6 | # Check if the command line argument is provided 7 | if [ -z "$1" ]; then 8 | echo "Usage: ./netpol_migrator.sh [collect|setup_environment|convert|apply|validate|rollback|cleanup]" 9 | exit 1 10 | fi 11 | 12 | case $1 in 13 | 14 | pre_migration_check) 15 | echo "Pre-migration check..." 16 | # Deploy the Demo App & Network Policy 17 | python3 "${SCRIPT_DIR}/../netpolymigrator/pre_migration_check.py" 18 | ;; 19 | 20 | collect) 21 | echo "Collecting network policies..." 22 | # Execute collect.py 23 | python3 "${SCRIPT_DIR}/../netpolymigrator/collect.py" collect 24 | ;; 25 | 26 | convert) 27 | echo "Converting network policies..." 28 | # Execute convert.py 29 | # Make sure to provide the required --input argument, for example: 30 | python3 "${SCRIPT_DIR}/../netpolymigrator/convert.py" convert --input collected_network_policies 31 | ;; 32 | 33 | apply) 34 | # Ask the user which subfolder to use 35 | echo "Which subfolder do you want to use for applying the network policies?" 36 | echo "1. cilium_converted" 37 | echo "2. calico_converted" 38 | read -p "Enter your choice (1 or 2): " choice 39 | 40 | case $choice in 41 | 1) 42 | echo "Applying network policies from cilium_converted..." 43 | # Execute apply.py with the cilium_converted subfolder 44 | python3 "${SCRIPT_DIR}/../netpolymigrator/apply.py" --input converted_network_policies/cilium_converted 45 | ;; 46 | 2) 47 | echo "Applying network policies from calico_converted..." 48 | # Execute apply.py with the calico_converted subfolder 49 | python3 "${SCRIPT_DIR}/../netpolymigrator/apply.py" --input converted_network_policies/calico_converted 50 | ;; 51 | *) 52 | echo "Invalid choice. Please enter 1 or 2." 53 | exit 1 54 | ;; 55 | esac 56 | ;; 57 | 58 | rollback) 59 | echo "Rolling back applied network policies..." 60 | # Execute rollback.py 61 | # Make sure to provide the required --applied-network-policies-file argument, for example: 62 | python3 "${SCRIPT_DIR}/../netpolymigrator/rollback.py" --applied-network-policies-file applied_network_policies.yaml 63 | ;; 64 | 65 | validate) 66 | echo "Validating network policies..." 67 | # Post validation step 68 | python3 "${SCRIPT_DIR}/../netpolymigrator/validate.py" 69 | ;; 70 | 71 | cleanup) 72 | echo "Cleaning up..." 73 | # Prompt user to select CNI provider 74 | echo "Which CNI provider are you using?" 75 | select cni_provider in "Calico" "Cilium"; do 76 | case $cni_provider in 77 | "Calico") 78 | echo "Cleaning up Calico..." 79 | python3 "${SCRIPT_DIR}/../netpolymigrator/cleanup_calico.py" 80 | break 81 | ;; 82 | "Cilium") 83 | echo "Cleaning up Cilium..." 84 | python3 "${SCRIPT_DIR}/../netpolymigrator/cleanup_cilium.py" 85 | break 86 | ;; 87 | *) 88 | echo "Invalid input. Please select a number from the options." 89 | ;; 90 | esac 91 | done 92 | ;; 93 | 94 | *) 95 | echo "Invalid command. Usage: ./netpol_migrator.sh [collect|setup_environment|convert|apply|validate|rollback|cleanup]" 96 | exit 1 97 | ;; 98 | esac 99 | -------------------------------------------------------------------------------- /netpolymigrator/cleanup_calico.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import subprocess 3 | import time 4 | import logging 5 | 6 | # Initialize logging to write logs to 'cleanup.log' file with INFO level 7 | logging.basicConfig(filename='cleanup.log', level=logging.INFO) 8 | 9 | # Define function to delete Custom Resource Definitions (CRDs) based on a keyword 10 | def delete_crd_by_keyword(keyword): 11 | try: 12 | # Retrieve the list of CRDs in the Kubernetes cluster 13 | crds = subprocess.check_output(["kubectl", "get", "crd", "-A", "-o", "name"]).decode("utf-8").split("\n") 14 | 15 | # Filter CRDs that contain the specified keyword 16 | keyword_crds = [crd for crd in crds if keyword.lower() in crd.lower()] 17 | 18 | # If no CRDs match the keyword, log this information 19 | if not keyword_crds: 20 | logging.info(f"No CRDs found with the keyword '{keyword}'") 21 | return 22 | 23 | # Loop through each CRD to delete it 24 | for crd in keyword_crds: 25 | # Retry up to 3 times in case of failure 26 | for _ in range(3): 27 | try: 28 | # Delete the CRD 29 | subprocess.check_output(["kubectl", "delete", crd]) 30 | logging.info(f"CRD {crd} removed successfully") 31 | break # Exit the retry loop if deletion is successful 32 | except subprocess.CalledProcessError as e: 33 | # Log any errors that occur 34 | logging.error(f"Error deleting CRD {crd}: {e}") 35 | time.sleep(5) # Wait 5 seconds before retrying 36 | 37 | except subprocess.CalledProcessError as e: 38 | logging.error(f"Error retrieving CRDs: {e}") 39 | 40 | # Define function to cleanup Calico resources 41 | def cleanup_calico(): 42 | try: 43 | # Delete Calico installation 44 | subprocess.check_output(["kubectl", "delete", "installation.operator.tigera.io", "default"]) 45 | logging.info("Deleted installation.operator.tigera.io default") 46 | 47 | # Wait for all resources in each namespace to be deleted 48 | for namespace in ["calico-apiserver", "calico-system", "tigera-operator"]: 49 | subprocess.check_output(["kubectl", "delete", "--all", "-n", namespace, "pod,svc,deploy"]) 50 | while True: 51 | resources = subprocess.check_output(["kubectl", "get", "all", "-n", namespace]).decode("utf-8") 52 | if "No resources found" in resources: 53 | break 54 | logging.info(f"Waiting for resources in {namespace} to be deleted...") 55 | time.sleep(5) # Wait 5 seconds before re-checking 56 | 57 | # Delete CRDs related to Calico and Tigera 58 | delete_crd_by_keyword("projectcalico") 59 | delete_crd_by_keyword("tigera") 60 | 61 | # Delete namespaces 62 | for namespace in ["calico-apiserver", "calico-system", "tigera-operator"]: 63 | subprocess.check_output(["kubectl", "delete", "namespace", namespace]) 64 | logging.info(f"Deleted namespace {namespace}") 65 | 66 | # Delete demo application and Calico network policy 67 | subprocess.check_output(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/demo-app.yaml"]) 68 | subprocess.check_output(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/calico-np.yaml"]) 69 | logging.info("Deleted demo-app.yaml and calico-np.yaml deployments") 70 | 71 | except subprocess.CalledProcessError as e: 72 | logging.error(f"Error during cleanup: {e}") 73 | 74 | # Main function to initiate cleanup 75 | def main(): 76 | try: 77 | cleanup_calico() 78 | except KeyboardInterrupt: 79 | logging.info("Cleanup interrupted by user. Exiting...") 80 | 81 | # Entry point of the script 82 | if __name__ == "__main__": 83 | main() 84 | -------------------------------------------------------------------------------- /netpolymigrator/pre_migration_check.py: -------------------------------------------------------------------------------- 1 | # This script performs a set of prerequisites needed before you migrate your network policies 2 | # It detects which type of custom network policy you're using (Calico or Cilium) 3 | # Installs a demo application and its corresponding network policies, and then validates that everything is working as expected 4 | # Cleans up created objects 5 | 6 | # Import necessary modules 7 | import logging 8 | from utils import detect_custom_network_policy_type, validate_np 9 | import subprocess 10 | 11 | # Set up logging 12 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 13 | logger = logging.getLogger(__name__) 14 | 15 | # Define namespace and demo app endpoint for validation 16 | namespace = "npmigrator-test" 17 | endpoint_demoapp = "demo-svc.npmigrator-test.svc.cluster.local" 18 | 19 | def cleanup(): 20 | """ 21 | Function to clean up created objects. 22 | """ 23 | try: 24 | # Deleting demo app 25 | subprocess.check_call(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/demo-app.yaml"]) 26 | 27 | # Deleting calico network policy if it exists 28 | try: 29 | subprocess.check_call(["kubectl", "get", "networkpolicies.projectcalico.org", "demo-app-ingress-rule"]) 30 | subprocess.check_call(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/calico-np.yaml"]) 31 | except subprocess.CalledProcessError: 32 | logger.warning("Calico network policy not found. Skipping deletion.") 33 | 34 | # Deleting cilium network policy if it exists 35 | try: 36 | subprocess.check_call(["kubectl", "get", "networkpolicies.cilium.io", "demo-app-ingress-rule"]) 37 | subprocess.check_call(["kubectl", "delete", "-f", "../netpolymigrator/example-apps/cilium-np.yaml"]) 38 | except subprocess.CalledProcessError: 39 | logger.warning("Cilium network policy not found. Skipping deletion.") 40 | 41 | logger.info("Cleanup successful.") 42 | 43 | except subprocess.CalledProcessError as e: 44 | logger.error(f"Cleanup failed: {e}") 45 | 46 | def pre_migration_check(): 47 | """ 48 | Function to perform pre-migration checks. 49 | """ 50 | 51 | try: 52 | # Step 1: Detect the custom network policy type (Calico or Cilium) 53 | custom_network_policy_type = detect_custom_network_policy_type() 54 | 55 | # Step 2: Install the Demo App 56 | logger.info("Installing Demo App") 57 | subprocess.check_output(["kubectl", "apply", "-f", "../netpolymigrator/example-apps/demo-app.yaml"]) 58 | logger.info("Installed Demo App successfully") 59 | 60 | # Step 3: Install the NetworkPolicy 61 | if custom_network_policy_type == "calico": 62 | subprocess.check_output(["kubectl", "apply", "-f", "../netpolymigrator/example-apps/calico-np.yaml"]) 63 | logger.info("Installed Calico network policy") 64 | elif custom_network_policy_type == "cilium": 65 | subprocess.check_output(["kubectl", "apply", "-f", "../netpolymigrator/example-apps/cilium-np.yaml"]) 66 | logger.info("Installed Cilium network policy") 67 | else: 68 | logger.error("No supported custom NetworkPolicy CRD found.") 69 | return 70 | 71 | # Step 4: Validate Network Policies 72 | if validate_np(namespace, endpoint_demoapp): 73 | logger.info("All network policies validated successfully.") 74 | else: 75 | logger.error("NetworkPolicy test failed") 76 | raise Exception("Network Policy Validation failed!!") 77 | 78 | except Exception as e: 79 | logger.error(f"An error occurred: {e}") 80 | 81 | finally: 82 | # Step 5: Cleanup 83 | cleanup() 84 | 85 | if __name__ == "__main__": 86 | # Call the pre_migration_check function 87 | pre_migration_check() 88 | -------------------------------------------------------------------------------- /netpolymigrator/rollback.py: -------------------------------------------------------------------------------- 1 | # Import the required modules 2 | import kubernetes 3 | from kubernetes import config 4 | import yaml 5 | import argparse 6 | import os 7 | import logging 8 | 9 | # Configure logging to output messages with a specific format and level 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | logger = logging.getLogger(__name__) 12 | 13 | # Define a function to roll back a single network policy 14 | def roll_back_network_policy(network_policy, namespace, dry_run=False): 15 | """ 16 | Rolls back a network policy in a Kubernetes cluster. 17 | 18 | Args: 19 | - network_policy (dict): The network policy to roll back. 20 | - namespace (str): The Kubernetes namespace where the policy exists. 21 | - dry_run (bool): Whether to perform a dry run. 22 | """ 23 | # Load Kubernetes config and create a client instance 24 | config.load_kube_config() 25 | client = kubernetes.client.NetworkingV1Api() 26 | 27 | # Extract the name of the network policy from its metadata 28 | network_policy_name = network_policy["metadata"]["name"] 29 | 30 | # Attempt to delete the network policy 31 | try: 32 | if dry_run: 33 | # If dry run, log what would be done without making actual changes 34 | logger.info(f"Dry run: Network policy '{network_policy_name}' would be rolled back in namespace '{namespace}'") 35 | else: 36 | # Actually delete the network policy 37 | client.delete_namespaced_network_policy(network_policy_name, namespace) 38 | logger.info(f"Network policy '{network_policy_name}' rolled back successfully in namespace '{namespace}'") 39 | except kubernetes.client.rest.ApiException as e: 40 | # Log any errors encountered during the deletion 41 | logger.error(f"Error rolling back network policy '{network_policy_name}' in namespace '{namespace}': {e}") 42 | return False 43 | 44 | return True 45 | 46 | # Define a function to load network policies from a YAML file 47 | def load_applied_network_policies(file_path): 48 | """Load the list of applied network policies from a file.""" 49 | with open(file_path, "r") as infile: 50 | return list(yaml.safe_load_all(infile)) 51 | 52 | # Define the main function 53 | def main(): 54 | """Main function that parses arguments and orchestrates the rollback.""" 55 | # Create an argument parser and define the command-line arguments 56 | parser = argparse.ArgumentParser(description='Roll back applied network policies in a Kubernetes cluster.') 57 | parser.add_argument('--namespace', type=str, default="default", help='Kubernetes namespace where the network policies should be rolled back.') 58 | parser.add_argument('--applied-network-policies-file', type=str, default="applied_network_policies.yaml", help='Path to the file with applied network policies.') 59 | parser.add_argument('--dry-run', action='store_true', help='Preview the changes without actually applying them.') 60 | args = parser.parse_args() 61 | 62 | # Check if the file containing applied network policies exists 63 | if not os.path.isfile(args.applied_network_policies_file): 64 | logger.error(f"Cannot find the applied network policies file '{args.applied_network_policies_file}'. Please check the file path.") 65 | return 66 | 67 | # Load the applied network policies from the file 68 | applied_network_policies = load_applied_network_policies(args.applied_network_policies_file) 69 | 70 | # If no applied network policies are found, exit the script 71 | if not applied_network_policies: 72 | logger.info("No applied network policies found to roll back.") 73 | return 74 | 75 | # Log the number of network policies to be rolled back and proceed to roll them back 76 | logger.info(f"Rolling back {len(applied_network_policies)} applied network policies in namespace '{args.namespace}'...") 77 | for network_policy in applied_network_policies: 78 | roll_back_network_policy(network_policy, args.namespace, args.dry_run) 79 | 80 | # Entry point for the script 81 | if __name__ == '__main__': 82 | main() 83 | -------------------------------------------------------------------------------- /netpolymigrator/collect.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import os 3 | import yaml 4 | import argparse 5 | import logging 6 | import tempfile 7 | from utils import detect_custom_network_policy_type, collect_network_policies 8 | 9 | # Configure logging settings 10 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 11 | logger = logging.getLogger(__name__) 12 | 13 | # Function to save a network policy to a file in YAML format 14 | def save_policy(policy, output_file): 15 | """Save a network policy to a file in YAML format.""" 16 | try: 17 | # Use a temporary file to initially save the policy 18 | with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp_file: 19 | yaml.safe_dump(policy, temp_file) 20 | 21 | # Change file permissions to read-only 22 | os.chmod(temp_file.name, 0o400) 23 | 24 | # Rename the temporary file to the final output file 25 | os.rename(temp_file.name, output_file) 26 | 27 | except Exception as e: 28 | # Log any errors that occur during the saving process 29 | logger.error(f"An error occurred while saving the policy: {e}") 30 | 31 | # Function to collect existing network policies 32 | def collect(args): 33 | """Collect existing network policies and save them to an output directory.""" 34 | try: 35 | # Detect the type of custom network policy (either Calico or Cilium) 36 | custom_network_policy_type = detect_custom_network_policy_type() 37 | 38 | # Check if a supported custom network policy type is found 39 | if custom_network_policy_type not in ("calico", "cilium"): 40 | logger.error("No supported custom NetworkPolicy CRD found.") 41 | return 42 | 43 | # Log the start of the collection process 44 | logger.info("Collecting network policies...") 45 | 46 | # Collect the custom network policies 47 | network_policies = collect_network_policies(custom_network_policy_type=custom_network_policy_type) 48 | 49 | # Check if any network policies were found 50 | if not network_policies: 51 | logger.info("No network policies found.") 52 | return 53 | 54 | # Create the output directory for saving collected policies 55 | output_folder = os.path.join(args.output, custom_network_policy_type) 56 | os.makedirs(output_folder, exist_ok=True) 57 | 58 | # Iterate over each collected policy to save it 59 | for policy in network_policies: 60 | policy_name = policy["metadata"]["name"] 61 | 62 | # Determine the namespace of the policy, if it exists 63 | if "namespace" in policy["metadata"]: 64 | policy_namespace = policy["metadata"]["namespace"] 65 | output_file = os.path.join(output_folder, f"{policy_namespace}_{policy_name}.yaml") 66 | else: 67 | # For global or cluster-wide policies 68 | subfolder = "global_policies" if custom_network_policy_type == "calico" else "clusterwide_policies" 69 | os.makedirs(os.path.join(output_folder, subfolder), exist_ok=True) 70 | output_file = os.path.join(output_folder, subfolder, f"{custom_network_policy_type}_clusterwide_{policy_name}.yaml") 71 | 72 | # If it's a dry run, just log the output file name 73 | if args.dry_run: 74 | logger.info(f'Dry run: Would write policy to {output_file}') 75 | else: 76 | # Actually save the policy to the output file 77 | save_policy(policy, output_file) 78 | 79 | # Log a summary of the collection process 80 | logger.info(f"Collected {len(network_policies)} {custom_network_policy_type} NetworkPolicies and saved to '{output_folder}' folder.") 81 | 82 | except Exception as e: 83 | # Log any errors that occur during the collection process 84 | logger.error(f"An error occurred during the collection process: {e}") 85 | 86 | # Entry point of the script 87 | def main(): 88 | # Configure the argument parser 89 | parser = argparse.ArgumentParser(description="NetPolyMigrator") 90 | subparsers = parser.add_subparsers(dest="command", required=True) 91 | 92 | # Arguments for the "collect" command 93 | collect_parser = subparsers.add_parser("collect", help="Collect custom network policies") 94 | collect_parser.add_argument("--output", type=str, default="collected_network_policies", help="Output folder for the collected custom NetworkPolicies.") 95 | collect_parser.add_argument("--dry-run", action='store_true', help='Perform a dry run without making any changes') 96 | 97 | # Parse the command-line arguments 98 | args = parser.parse_args() 99 | 100 | # Perform the appropriate action based on the command 101 | if args.command == "collect": 102 | collect(args) 103 | else: 104 | logger.error(f"Invalid command: {args.command}") 105 | 106 | # Run the script 107 | if __name__ == "__main__": 108 | main() -------------------------------------------------------------------------------- /netpolymigrator/apply.py: -------------------------------------------------------------------------------- 1 | # Import necessary modules 2 | import os 3 | import sys 4 | import yaml 5 | import argparse 6 | import logging 7 | from kubernetes import client, config 8 | from kubernetes.client.rest import ApiException 9 | from validate import validate_network_policy 10 | from utils import validate_np 11 | 12 | # Initialize logging 13 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 14 | logger = logging.getLogger(__name__) 15 | 16 | def create_namespace_if_not_exists(api, namespace): 17 | """Create a namespace if it doesn't exist.""" 18 | try: 19 | api.read_namespace(name=namespace) 20 | logger.info(f"Namespace {namespace} already exists.") 21 | except ApiException as e: 22 | if e.status == 404: 23 | api.create_namespace(client.V1Namespace( 24 | api_version="v1", 25 | kind="Namespace", 26 | metadata=client.V1ObjectMeta(name=namespace) 27 | )) 28 | logger.info(f"Created namespace {namespace}") 29 | 30 | def get_policy_files(input_path): 31 | """Get a list of policy files from the input path.""" 32 | policy_files = [] 33 | if os.path.isfile(input_path): 34 | policy_files.append(input_path) 35 | elif os.path.isdir(input_path): 36 | for file_name in os.listdir(input_path): 37 | if file_name.endswith(".yaml") or file_name.endswith(".yml"): 38 | policy_files.append(os.path.join(input_path, file_name)) 39 | else: 40 | raise ValueError(f"Invalid input path: {input_path}") 41 | return policy_files 42 | 43 | def apply_kubernetes_network_policies(network_policies, namespace, dry_run=False): 44 | """Applies network policies to a Kubernetes cluster.""" 45 | applied_network_policies = [] 46 | for network_policy_dict in network_policies: 47 | network_policy_name = network_policy_dict['metadata']['name'] 48 | logger.info(f"Validating policy {network_policy_name}") 49 | 50 | if validate_network_policy(network_policy_dict): 51 | logger.info(f"Applying policy {network_policy_name}") 52 | if apply_network_policy(network_policy_dict, namespace, dry_run): 53 | applied_network_policies.append(network_policy_dict) 54 | else: 55 | logger.warning(f"Skipping invalid policy {network_policy_name}") 56 | return applied_network_policies 57 | 58 | def apply_network_policy(network_policy, namespace, dry_run=False): 59 | """Applies a network policy to a Kubernetes cluster.""" 60 | config.load_kube_config() 61 | api = client.NetworkingV1Api() 62 | 63 | # Explicitly set the namespace in the network policy object to match the namespace in the API call 64 | network_policy['metadata']['namespace'] = namespace 65 | 66 | try: 67 | if dry_run: 68 | api.create_namespaced_network_policy(namespace, network_policy, dry_run='All') 69 | logger.info(f"Dry run: Network policy '{network_policy['metadata']['name']}' would be applied.") 70 | else: 71 | api.create_namespaced_network_policy(namespace, network_policy) 72 | logger.info(f"Network policy '{network_policy['metadata']['name']}' applied successfully.") 73 | except ApiException as e: 74 | logger.error(f"Error applying network policy '{network_policy['metadata']['name']}': {e}") 75 | return False 76 | return True 77 | 78 | def save_applied_policies_to_file(applied_policies, file_path): 79 | """Save applied network policies to a file.""" 80 | with open(file_path, 'w') as f: 81 | yaml.dump_all(applied_policies, f) 82 | logger.info(f"Saved applied network policies to {file_path}") 83 | 84 | def main(): 85 | parser = argparse.ArgumentParser(description='Apply network policies to a Kubernetes cluster.') 86 | parser.add_argument('--input', type=str, required=True, help='Path to the input network policy file or directory.') 87 | parser.add_argument('--namespace', type=str, default='default', help='Kubernetes namespace where the network policies will be applied.') 88 | parser.add_argument('--dry-run', action='store_true', help='Preview the changes without actually applying them.') 89 | args = parser.parse_args() 90 | 91 | # Initialize Kubernetes API client 92 | config.load_kube_config() 93 | api = client.NetworkingV1Api() 94 | core_api = client.CoreV1Api() 95 | 96 | # Create namespace if it doesn't exist 97 | create_namespace_if_not_exists(core_api, args.namespace) 98 | 99 | # Read and validate policy files 100 | k8s_network_policies = [] 101 | for policy_file in get_policy_files(args.input): 102 | with open(policy_file, 'r') as f: 103 | network_policy_dict = yaml.safe_load(f) 104 | if validate_network_policy(network_policy_dict): 105 | k8s_network_policies.append(network_policy_dict) 106 | else: 107 | logger.warning(f"Skipping invalid policy {network_policy_dict['metadata']['name']}") 108 | 109 | # Apply the network policies 110 | applied_policies = apply_kubernetes_network_policies(k8s_network_policies, args.namespace, args.dry_run) 111 | 112 | # Save applied policies to a file for future rollback 113 | save_applied_policies_to_file(applied_policies, 'applied_network_policies.yaml') 114 | 115 | if __name__ == "__main__": 116 | main() 117 | -------------------------------------------------------------------------------- /netpolymigrator/calico_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | logger = logging.getLogger(__name__) 6 | 7 | unsup_features = ["order", "serviceAccountSelector", "http", "icmp", "notICMP", "notProtocol", "notNets", "notPorts", "notSelector", "serviceAccounts", "services"] 8 | 9 | # Implement conversion logic for Calico to Kubernetes native network policy 10 | def convert_calico_network_policy(calico_policy): 11 | k8s_network_policy = { 12 | "apiVersion": "networking.k8s.io/v1", 13 | "kind": "NetworkPolicy", 14 | "metadata": { 15 | "name": calico_policy["metadata"]["name"], 16 | "namespace": calico_policy["metadata"]["namespace"], 17 | }, 18 | "spec": { 19 | "policyTypes": [] 20 | }, 21 | } 22 | 23 | spec = calico_policy["spec"] 24 | # Handle selectors 25 | if "selector" in spec: 26 | kv = parse_labels(spec["selector"]) 27 | k8s_network_policy["spec"]["podSelector"] = { 28 | "matchLabels": {kv[0] : kv[1]} 29 | } 30 | 31 | check_for_unsup_features(spec) 32 | 33 | # Handle ingress rules 34 | if "ingress" in spec: 35 | k8s_network_policy["spec"]["ingress"] = [] 36 | for rule in spec["ingress"]: 37 | if "Allow" != rule["action"]: 38 | raise Exception(f"Calico NetworkPolicy has unsupported ingress action - {rule['action']}") 39 | 40 | check_for_unsup_features(rule) 41 | 42 | k8s_rule = {} 43 | if "source" in rule: 44 | check_for_unsup_features(rule["source"]) 45 | k8s_rule["from"] = [] 46 | if "namespaceSelector" in rule["source"]: 47 | kv = parse_labels(rule["source"]["namespaceSelector"]) 48 | k8s_rule["from"].append({"namespaceSelector": {"matchLabels": {kv[0] : kv[1]}}}) 49 | 50 | if "selector" in rule["source"]: 51 | kv = parse_labels(rule["source"]["selector"]) 52 | k8s_rule["from"].append({"podSelector": {"matchLabels": {kv[0] : kv[1]}}}) 53 | 54 | if "nets" in rule["source"]: 55 | k8s_rule["from"].append({"ipBlock": {"cidr": rule["source"]["nets"][0]}}) 56 | 57 | ports = convert_ports(rule) 58 | if len(ports) > 0: 59 | k8s_rule["ports"] = ports 60 | 61 | if "action" in rule and rule["action"].lower() == "allow": 62 | k8s_network_policy["spec"]["ingress"].append(k8s_rule) 63 | k8s_network_policy["spec"]["policyTypes"].append("Ingress") 64 | 65 | # Handle egress rules 66 | if "spec" in calico_policy and "egress" in calico_policy["spec"]: 67 | k8s_network_policy["spec"]["egress"] = [] 68 | for rule in calico_policy["spec"]["egress"]: 69 | if "Allow" != rule["action"]: 70 | raise Exception(f"Calico NetworkPolicy has unsupported egress action - {rule['action']}") 71 | 72 | check_for_unsup_features(rule) 73 | 74 | k8s_rule = {} 75 | if "destination" in rule: 76 | check_for_unsup_features(rule["destination"]) 77 | k8s_rule["to"] = [] 78 | if "namespaceSelector" in rule["destination"]: 79 | kv = parse_labels(rule["destination"]["namespaceSelector"]) 80 | k8s_rule["to"].append({"namespaceSelector": {"matchLabels": {kv[0] : kv[1]}}}) 81 | 82 | if "selector" in rule["destination"]: 83 | kv = parse_labels(rule["destination"]["selector"]) 84 | k8s_rule["to"].append({"podSelector": {"matchLabels": {kv[0] : kv[1]}}}) 85 | 86 | if "nets" in rule["destination"]: 87 | k8s_rule["to"].append({"ipBlock": {"cidr": rule["destination"]["nets"][0]}}) 88 | 89 | ports = convert_ports(rule) 90 | if len(ports) > 0: 91 | k8s_rule["ports"] = ports 92 | 93 | if "action" in rule and rule["action"].lower() == "allow": 94 | k8s_network_policy["spec"]["egress"].append(k8s_rule) 95 | k8s_network_policy["spec"]["policyTypes"].append("Egress") 96 | 97 | return k8s_network_policy 98 | 99 | def convert_ports(rule): 100 | if "protocol" in rule and rule["protocol"] != "TCP": 101 | raise Exception(f"Calico NetworkPolicy has unsupported protocol - {rule['protocol']}") 102 | 103 | if "destination" in rule and rule["destination"].get("ports"): 104 | return [ 105 | { 106 | "protocol": rule["protocol"] if "protocol" in rule else None, 107 | "port": port, 108 | }for port in rule["destination"]["ports"] 109 | ] 110 | 111 | return [] 112 | 113 | def parse_labels(labels): 114 | kv = labels.split('==') 115 | return [kv[0].strip(), kv[1].replace("'", "").strip()] 116 | 117 | def check_for_unsup_features(input): 118 | for unsup in unsup_features: 119 | if unsup in input: 120 | logger.error("Conversion is not supported due to missing support in upstream NetworkPolicy") 121 | raise Exception(f"Calico NetworkPolicy has unsupported attribute - {unsup}") 122 | -------------------------------------------------------------------------------- /netpolymigrator/cilium_utils.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | # Set up logging 4 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 5 | logger = logging.getLogger(__name__) 6 | 7 | ing_unsup_features = ["fromRequires", "fromServices", "icmps", "ingressDeny"] 8 | eg_unsup_features = ["toRequires", "toServices", "toFQDNs", "icmps","toGroups", "egressDeny"] 9 | 10 | # Implement conversion logic for Cilium to Kubernetes native network policy 11 | def convert_cilium_network_policy(cilium_policy): 12 | rtn_list = [] 13 | specs = [] 14 | if cilium_policy.get("specs"): 15 | specs = cilium_policy.get("specs") 16 | else: 17 | specs.append(cilium_policy["spec"]) 18 | 19 | for spec in specs: 20 | k8s_network_policy = { 21 | "apiVersion": "networking.k8s.io/v1", 22 | "kind": "NetworkPolicy", 23 | "metadata": { 24 | "name": cilium_policy["metadata"]["name"] + ("" if len(rtn_list) == 0 else ("-"+str(len(rtn_list)))), 25 | "namespace": cilium_policy["metadata"]["namespace"] if cilium_policy["metadata"].get("namespace") else "default", 26 | }, 27 | "spec": { 28 | "policyTypes": [] 29 | }, 30 | } 31 | # Handle endpoint selectors 32 | if "endpointSelector" in spec: 33 | k8s_network_policy["spec"]["podSelector"] = { 34 | "matchLabels": spec["endpointSelector"]["matchLabels"] 35 | } 36 | 37 | # Handle ingress rules 38 | convert_ingress_rules(spec, k8s_network_policy) 39 | 40 | # Handle egress rules 41 | convert_egress_rules(spec, k8s_network_policy) 42 | 43 | rtn_list.append(k8s_network_policy) 44 | 45 | return rtn_list 46 | 47 | # Handle ingress rules 48 | def convert_ingress_rules(spec, k8s_network_policy): 49 | ingress_rule_exists = False 50 | if spec.get("ingress"): 51 | k8s_network_policy["spec"]["ingress"] = [] 52 | for rule in spec["ingress"]: 53 | k8s_rule = {} 54 | 55 | for ing_unsup in ing_unsup_features: 56 | if ing_unsup in rule: 57 | logger.error(f"Conversion is not supported due to missing support in upstream NetworkPolicy - {ing_unsup}") 58 | raise Exception(f"CiliumNetworkPolicy has unsupported attribute - {ing_unsup}") 59 | 60 | if "fromEndpoints" in rule: 61 | k8s_rule["from"] = [] 62 | for endpoint in rule["fromEndpoints"]: 63 | if endpoint.get("matchLabels"): 64 | k8s_rule["from"].append({"podSelector": {"matchLabels": endpoint["matchLabels"]}}) 65 | else: 66 | k8s_rule["from"].append({"podSelector": {}}) 67 | 68 | if "toPorts" in rule: 69 | ports = process_to_ports(rule) 70 | if len(ports) > 0: 71 | k8s_rule["ports"] = ports 72 | 73 | if "fromEntities" in rule: 74 | for entity in rule["fromEntities"]: 75 | if entity in ["world", "all"]: 76 | k8s_rule["from"] = [] 77 | k8s_rule["from"].append({"ipBlock": {"cidr":"0.0.0.0/0"}}) 78 | break 79 | else: 80 | logger.error(f"this entity is not supported: {entity}") 81 | raise Exception(f"CiliumNetworkPolicy has unsupported attribute - fromEntities({entity})") 82 | 83 | if "fromCIDRSet" in rule: 84 | cidr_set = [] 85 | for cidr in rule["fromCIDRSet"]: 86 | cidr_var = {"ipBlock": {"cidr": cidr["cidr"]}} 87 | if cidr.get("except"): 88 | cidr_var["ipBlock"]["except"] = cidr["except"] 89 | 90 | cidr_set.append(cidr_var) 91 | 92 | k8s_rule["from"] = cidr_set 93 | 94 | elif "fromCIDR" in rule: 95 | k8s_rule["from"] = [{"ipBlock": {"cidr": cidr}} for cidr in rule["fromCIDR"]] 96 | 97 | ingress_rule_exists = True 98 | k8s_network_policy["spec"]["ingress"].append(k8s_rule) 99 | 100 | if ingress_rule_exists == True: 101 | k8s_network_policy["spec"]["policyTypes"].append("Ingress") 102 | 103 | def convert_egress_rules(spec, k8s_network_policy): 104 | # Handle egress rules 105 | egress_rule_exists = False 106 | if spec.get("egress"): 107 | k8s_network_policy["spec"]["egress"] = [] 108 | for rule in spec["egress"]: 109 | k8s_rule = {} 110 | 111 | for eg_unsup in eg_unsup_features: 112 | if eg_unsup in rule: 113 | logger.error(f"Conversion is not supported due to missing support in upstream NetworkPolicy - {eg_unsup}") 114 | raise Exception(f"CiliumNetworkPolicy has unsupported attribute - {eg_unsup}") 115 | 116 | if "toEndpoints" in rule: 117 | k8s_rule["to"] = [] 118 | for endpoint in rule["toEndpoints"]: 119 | if endpoint.get("matchLabels"): 120 | k8s_rule["to"].append({"podSelector": {"matchLabels": endpoint["matchLabels"]}}) 121 | else: 122 | k8s_rule["to"].append({"podSelector": {}}) 123 | 124 | if "toCIDRSet" in rule: 125 | cidr_set = [] 126 | for cidr in rule["toCIDRSet"]: 127 | cidr_var = {"ipBlock": {"cidr": cidr["cidr"]}} 128 | if cidr.get("except"): 129 | cidr_var["ipBlock"]["except"] = cidr["except"] 130 | 131 | cidr_set.append(cidr_var) 132 | 133 | k8s_rule["to"] = cidr_set 134 | 135 | elif "toCIDR" in rule: 136 | k8s_rule["to"] = [{"ipBlock": {"cidr": cidr}} for cidr in rule["toCIDR"]] 137 | 138 | if "toPorts" in rule: 139 | ports = process_to_ports(rule) 140 | if len(ports) > 0: 141 | k8s_rule["ports"] = ports 142 | 143 | if "toEntities" in rule: 144 | for entity in rule["toEntities"]: 145 | if entity in ["world", "all"]: 146 | k8s_rule["to"].append({"ipBlock": {"cidr":"0.0.0.0/0"}}) 147 | else: 148 | logger.error(f"this entity is not supported: {entity}") 149 | raise Exception(f"CiliumNetworkPolicy has unsupported attribute - toEntities({entity})") 150 | 151 | egress_rule_exists = True 152 | k8s_network_policy["spec"]["egress"].append(k8s_rule) 153 | 154 | if egress_rule_exists == True: 155 | k8s_network_policy["spec"]["policyTypes"].append("Egress") 156 | 157 | def process_to_ports(rule): 158 | ports = [] 159 | for pvar in rule["toPorts"]: 160 | if "rules" in pvar or "listener" in pvar or "originatingTLS" in pvar: 161 | logger.error("toPorts is not supported") 162 | raise Exception("CiliumNetworkPolicy has unsupported attribute - toPorts") 163 | 164 | ports = [ 165 | { 166 | "protocol": port["protocol"] if "protocol" in port else None, 167 | "port": int(port["port"]) if "port" in port else None, 168 | }for port in pvar["ports"] 169 | ] 170 | 171 | return ports 172 | -------------------------------------------------------------------------------- /netpolymigrator/convert.py: -------------------------------------------------------------------------------- 1 | # Import required libraries and modules 2 | import os 3 | import sys 4 | import yaml 5 | import argparse 6 | import logging 7 | from kubernetes import client, config 8 | from utils import detect_custom_network_policy_type 9 | from validate import validate_network_policy 10 | from calico_utils import convert_calico_network_policy 11 | from cilium_utils import convert_cilium_network_policy 12 | 13 | # Configure logging 14 | logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') 15 | logger = logging.getLogger(__name__) 16 | 17 | # Function to validate a Kubernetes NetworkPolicy using the Kubernetes API 18 | def validate_k8s_policy(k8s_policy): 19 | try: 20 | # Load the Kubernetes client configuration 21 | config.load_kube_config() 22 | 23 | # Initialize Kubernetes Networking API client 24 | api_instance = client.NetworkingV1Api() 25 | 26 | # Retrieve the namespace from the NetworkPolicy metadata 27 | namespace = k8s_policy["metadata"]["namespace"] 28 | 29 | # Perform a dry-run to validate the NetworkPolicy 30 | api_response = api_instance.create_namespaced_network_policy(namespace, k8s_policy, dry_run='All') 31 | 32 | # Log the successful validation 33 | logger.info(f"Validation passed for the policy: {k8s_policy['metadata']['name']}") 34 | return True 35 | except Exception as e: 36 | # Log any validation errors 37 | logger.error(f"Validation error: {e}") 38 | return False 39 | 40 | # Function to convert custom (Calico or Cilium) network policies to Kubernetes native network policies 41 | def convert(args): 42 | # Check if the input folder exists 43 | if not os.path.isdir(args.input): 44 | logger.error(f"Input folder '{args.input}' does not exist.") 45 | sys.exit(1) 46 | 47 | # Create the output folder if it does not exist 48 | os.makedirs(args.output, exist_ok=True) 49 | 50 | # Initialize a counter for the number of successfully converted policies 51 | num_converted_policies = 0 52 | 53 | # Initialize a list to store validation errors 54 | validation_errors = [] 55 | 56 | # List and loop over each custom network policy folder in the input directory 57 | custom_network_policy_folders = [folder for folder in os.listdir(args.input) if os.path.isdir(os.path.join(args.input, folder))] 58 | for folder in custom_network_policy_folders: 59 | policy_type_folder = os.path.join(args.input, folder) 60 | 61 | # Loop through each policy file in the current folder 62 | for policy_file in os.listdir(policy_type_folder): 63 | policy_file_path = os.path.join(policy_type_folder, policy_file) 64 | 65 | # Skip if the file is not a regular file 66 | if not os.path.isfile(policy_file_path): 67 | continue 68 | 69 | # Load the YAML policy file 70 | with open(policy_file_path, "r") as f: 71 | policy = yaml.safe_load(f) 72 | 73 | try: 74 | # Determine the type of the custom network policy (Calico or Cilium) 75 | custom_network_policy_type = detect_custom_network_policy_type() 76 | 77 | # Skip if the policy is a global policy (i.e., not namespace-scoped) 78 | if "metadata" in policy and "namespace" not in policy["metadata"]: 79 | logger.info(f"Detected a global policy: {policy['metadata']['name']}. Focusing on converting namespace-scoped policies only.") 80 | continue 81 | 82 | # Perform conversion based on the custom network policy type 83 | if custom_network_policy_type == "calico": 84 | k8s_policies = [convert_calico_network_policy(policy)] 85 | output_policy_type_folder = os.path.join(args.output, "calico_converted") 86 | elif custom_network_policy_type == "cilium": 87 | k8s_policies = convert_cilium_network_policy(policy) 88 | output_policy_type_folder = os.path.join(args.output, "cilium_converted") 89 | else: 90 | continue 91 | 92 | # Create an output folder for the converted policies if it doesn't exist 93 | os.makedirs(output_policy_type_folder, exist_ok=True) 94 | 95 | for k8s_policy in k8s_policies: 96 | # Validate the converted policy 97 | if not validate_network_policy(k8s_policy): 98 | validation_errors.append(f"Validation failed for the policy: {policy['metadata']['name']}") 99 | else: 100 | if not args.dry_run: 101 | # Save the converted policy to a YAML file 102 | output_file = os.path.join(output_policy_type_folder, f"{policy['metadata']['name']}_k8s.yaml") 103 | with open(output_file, "w") as f: 104 | yaml.safe_dump(k8s_policy, f) 105 | 106 | # Increment the counter for successfully converted policies 107 | num_converted_policies += 1 108 | 109 | # Log the successful conversion 110 | logger.info(f"Converted policy '{policy['metadata']['name']}' to Kubernetes native NetworkPolicy.") 111 | 112 | # Validate the converted policy using the Kubernetes API 113 | if not validate_k8s_policy(k8s_policy): 114 | validation_errors.append(f"Validation failed for the policy using Kubernetes API: {policy['metadata']['name']}") 115 | else: 116 | logger.info(f"Validation passed for the policy using Kubernetes API: {policy['metadata']['name']}") 117 | 118 | except Exception as e: 119 | # Log any errors that occur during the conversion process 120 | logger.error(f"Failed to convert policy: {policy_file_path}. Error: {e}") 121 | continue 122 | 123 | # Log a summary of the conversion process 124 | logger.info(f"Converted {num_converted_policies} namespace-scoped Calico NetworkPolicies to Kubernetes native NetworkPolicies and saved them in '{args.output}' folder.") 125 | 126 | # Log any validation errors 127 | if validation_errors: 128 | logger.error("\n".join(validation_errors)) 129 | 130 | # Define the main function 131 | def main(): 132 | # Initialize argument parser and subparsers 133 | parser = argparse.ArgumentParser(description="NetPolyMigrator") 134 | subparsers = parser.add_subparsers(dest="command", required=True) 135 | convert_parser = subparsers.add_parser("convert", help="Convert custom network policies to Kubernetes native network policies") 136 | convert_parser.add_argument("--input", type=str, required=True, help="Path to the folder containing the collected custom NetworkPolicies.") 137 | convert_parser.add_argument("--output", type=str, default="converted_network_policies", help="Path to the folder where the converted Kubernetes native NetworkPolicies will be saved.") 138 | convert_parser.add_argument("--dry-run", action='store_true', help="Enable dry run mode") 139 | 140 | # Parse the command-line arguments 141 | args = parser.parse_args() 142 | 143 | # Call the appropriate function based on the subcommand 144 | if args.command == "convert": 145 | convert(args) 146 | else: 147 | logger.error(f"Invalid command: {args.command}") 148 | 149 | # Execute the main function when the script is run 150 | if __name__ == "__main__": 151 | main() 152 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # K8s Network Policy Migrator 2 | 3 | `K8s Network Policy Migrator` is a tool to migrate Calico and Cilium network policies to Kubernetes native network policies. 4 | 5 | 6 | ## Requirements 7 | 8 | - Python 3 9 | - kubectl 10 | - Kubernetes Cluster running Calico or Cilium (V1.25 and above ) 11 | 12 | ## Usage 13 | 14 | 1. Clone this repository 15 | ``` 16 | git clone git@github.com:awslabs/k8s-network-policy-migrator.git 17 | ``` 18 | 19 | 2. Go into netpolymigrator/bin directory 20 | ``` 21 | cd netpolymigrator/bin/ 22 | ``` 23 | 24 | 3. Update permissions on the executable 25 | 26 | ``` 27 | chmod +x netpol_migrator.sh 28 | ``` 29 | 30 | 4. Run the `netpol_migrator.sh` script with one of the following commands: 31 | ``` 32 | ./netpol_migrator.sh [pre_migration_check|collect|convert|apply|rollback|cleanup] 33 | ``` 34 | 35 | 36 | ## How to use 37 | 38 | ### Pre-Migration Check 39 | Performs a set of prerequisites needed before you migrate your network policies. It detects which type of custom network policy you're using (Calico or Cilium), installs a demo application and its corresponding network policies, and then validates that everything is working as expected. Also cleans up created objects. 40 | 41 | ``` 42 | ./netpol_migrator.sh pre_migration_check 43 | ``` 44 | 45 | **Output for Pre-Migration-Check** 46 | ``` 47 | Pre-migration check... 48 | 2023-08-26 20:25:22,250 [INFO]: Custom network policy type detected: calico 49 | 2023-08-26 20:25:22,250 [INFO]: Installing Demo App 50 | 2023-08-26 20:25:24,427 [INFO]: Installed Demo App successfully 51 | 2023-08-26 20:25:25,643 [INFO]: Installed Calico network policy 52 | 2023-08-26 20:25:26,415 [INFO]: pod name: pod/client-one-5d96c56dfb-h9t6b 53 | 2023-08-26 20:25:27,926 [INFO]: ['\r', '\r', '\r', 'Welcome to nginx!\r', '\r', '\r', '\r', '

Welcome to nginx!

\r', '

If you see this page, the nginx web server is successfully installed and\r', 'working. Further configuration is required.

\r', '\r', '

For online documentation and support please refer to\r', 'nginx.org.
\r', 'Commercial support is available at\r', 'nginx.com.

\r', '\r', '

Thank you for using nginx.

\r', '\r', ''] 54 | 2023-08-26 20:25:27,926 [INFO]: Test scenario 1 passed 55 | 2023-08-26 20:25:28,702 [INFO]: pod name: pod/client-two-f489dcf7b-5msqf 56 | command terminated with exit code 28 57 | 2023-08-26 20:25:35,203 [ERROR]: Error checking connectivity for demo-svc.npmigrator-test.svc.cluster.local App: Command '['kubectl', 'exec', '-n', 'npmigrator-test', '-it', 'pod/client-two-f489dcf7b-5msqf', '--', 'curl', '--max-time', '5', 'demo-svc.npmigrator-test.svc.cluster.local']' returned non-zero exit status 28. 58 | 2023-08-26 20:25:35,203 [INFO]: Test scenario 2 passed 59 | 2023-08-26 20:25:35,203 [INFO]: All network policies validated successfully. 60 | namespace "npmigrator-test" deleted 61 | service "demo-svc" deleted 62 | deployment.apps "demo-app" deleted 63 | deployment.apps "client-one" deleted 64 | deployment.apps "client-two" deleted 65 | Error from server (NotFound): networkpolicies.projectcalico.org "demo-app-ingress-rule" not found 66 | 2023-08-26 20:25:42,894 [WARNING]: Calico network policy not found. Skipping deletion. 67 | error: the server doesn't have a resource type "networkpolicies" 68 | 2023-08-26 20:25:43,763 [WARNING]: Cilium network policy not found. Skipping deletion. 69 | 2023-08-26 20:25:43,763 [INFO]: Cleanup successful. 70 | ``` 71 | 72 | ### Collect 73 | Collects your existing Calico and Cilium network policies and stores them in a directory called `collected_network_policies`. 74 | 75 | ``` 76 | ./netpol_migrator.sh collect 77 | ``` 78 | 79 | **Output for Collect** 80 | ``` 81 | Collecting network policies... 82 | 2023-08-26 20:28:21,728 [INFO]: Custom network policy type detected: calico 83 | 2023-08-26 20:28:21,729 [INFO]: Collecting network policies... 84 | 2023-08-26 20:28:22,573 [INFO]: Collected 2 calico NetworkPolicies and saved to 'collected_network_policies/calico' folder 85 | ``` 86 | 87 | ### Convert 88 | Converts your existing Calico and Cilium network policies to kubernetes native network policy and stores them in a directory called `converted_network_policies`. Make sure to provide the required `--input` argument, for example: 89 | 90 | ``` 91 | ./netpol_migrator.sh convert --input collected_network_policies 92 | ``` 93 | 94 | **Output for convert** 95 | ``` 96 | Converting network policies... 97 | 2023-08-26 20:28:32,175 [INFO]: Custom network policy type detected: calico 98 | 2023-08-26 20:28:32,178 [INFO]: Converted policy 'default.allow-nginx-ingress' to Kubernetes native NetworkPolicy. 99 | 2023-08-26 20:28:32,943 [INFO]: Validation passed for the policy: default.allow-nginx-ingress 100 | 2023-08-26 20:28:32,945 [INFO]: Validation passed for the policy using Kubernetes API: default.allow-nginx-ingress 101 | 2023-08-26 20:28:34,182 [INFO]: Custom network policy type detected: calico 102 | 2023-08-26 20:28:34,185 [INFO]: Converted policy 'default.allow-busybox-egress' to Kubernetes native NetworkPolicy. 103 | 2023-08-26 20:28:34,935 [INFO]: Validation passed for the policy: default.allow-busybox-egress 104 | 2023-08-26 20:28:34,936 [INFO]: Validation passed for the policy using Kubernetes API: default.allow-busybox-egress 105 | 2023-08-26 20:28:34,936 [INFO]: Converted 2 namespace-scoped Calico NetworkPolicies to Kubernetes native NetworkPolicies and saved them in 'converted_network_policies' folder. 106 | ``` 107 | 108 | 109 | **NOTE:** Before `apply` function you can run `pre_migration_check` just to make sure everything is working as expected. 110 | 111 | ### Apply 112 | Applies the converted network policies to your cluster. You will be prompted to select which subfolder to use (`cilium_converted` or `calico_converted`). Example usage: 113 | 114 | ``` 115 | ./netpol_migrator.sh apply 116 | ``` 117 | 118 | **Output for Apply** 119 | ``` 120 | Which subfolder do you want to use for applying the network policies? 121 | 1. cilium_converted 122 | 2. calico_converted 123 | Enter your choice (1 or 2): 2 124 | Applying network policies from calico_converted... 125 | 2023-08-26 20:42:09,645 [INFO]: Namespace default already exists. 126 | 2023-08-26 20:42:09,648 [INFO]: Validating policy default.allow-nginx-ingress 127 | 2023-08-26 20:42:09,649 [INFO]: Applying policy default.allow-nginx-ingress 128 | 2023-08-26 20:42:10,411 [INFO]: Network policy 'default.allow-nginx-ingress' applied successfully. 129 | 2023-08-26 20:42:10,412 [INFO]: Validating policy default.allow-busybox-egress 130 | 2023-08-26 20:42:10,412 [INFO]: Applying policy default.allow-busybox-egress 131 | 2023-08-26 20:42:11,192 [INFO]: Network policy 'default.allow-busybox-egress' applied successfully. 132 | 2023-08-26 20:42:11,195 [INFO]: Saved applied network policies to applied_network_policies.yaml 133 | ``` 134 | 135 | ### Rollback 136 | Rolls back the applied network policies. Make sure to provide the required `--applied-network-policies-file` argument, for example: 137 | 138 | ``` 139 | ./netpol_migrator.sh rollback --applied-network-policies-file applied_network_policies.yaml 140 | ``` 141 | 142 | **Output for Rollback** 143 | ``` 144 | 2023-08-26 20:43:06,727 - INFO - Rolling back 2 applied network policies in namespace 'default'... 145 | 2023-08-26 20:43:07,563 - INFO - Network policy 'default.allow-nginx-ingress' rolled back successfully in namespace 'default' 146 | 2023-08-26 20:43:08,326 - INFO - Network policy 'default.allow-busybox-egress' rolled back successfully in namespace 'default' 147 | ``` 148 | 149 | ### Cleanup 150 | Cleans up resources related to the CNI provider you are using (either Calico or Cilium). You will be prompted to select which CNI provider to clean up. Example usage: 151 | 152 | ``` 153 | ./netpol_migrator.sh cleanup 154 | ``` 155 | 156 | ### Validate 157 | Validates the statements shared below. 158 | ``` 159 | ./netpol_migrator.sh validate 160 | ``` 161 | 162 | **Validation Checks** 163 | * checks if the network policy is a non-empty dictionary,If not, it logs a warning and returns False 164 | * checks if the network policy contains all the required fields: "apiVersion", "kind", "metadata", and "spec". If any of these fields are missing, it logs a warning and returns False 165 | * checks if the apiVersion of the network policy is either "networking.k8s.io/v1" or "networking.k8s.io/v1beta1". If not, it logs a warning and returns False 166 | * checks if the kind of the network policy is "NetworkPolicy". If not, it logs a warning and returns False 167 | * checks if the metadata of the network policy contains the 'name' field. If not, it logs a warning and returns False 168 | * checks if the spec field of the network policy is non-empty. If not, it logs a warning and returns False 169 | * checks if the policyTypes field of the spec is non-empty. If not, it logs a warning and returns False 170 | * For each egress rule in the network policy, it checks if the podSelector.matchLabels field meets the naming requirements (alphanumeric characters, hyphen, underscore, or dot). If not, it logs a warning and returns False 171 | 172 | 173 | ## Contributing 174 | Please refer to the contribution guidelines [here](CONTRIBUTING.md). 175 | 176 | ## License 177 | This tool is released under the [Apache 2.0](LICENSE). 178 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | -------------------------------------------------------------------------------- /netpolymigrator/utils.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import logging 3 | import os 4 | import subprocess 5 | import sys 6 | import yaml 7 | from kubernetes import client, config 8 | 9 | # Configure logging 10 | logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]: %(message)s") 11 | logger = logging.getLogger(__name__) 12 | namespace = "npmigrator-test" 13 | endpoint_demoapp = "demo-svc.npmigrator-test.svc.cluster.local" 14 | 15 | def parse_args(): 16 | parser = argparse.ArgumentParser(description="Network policy converter and tester.") 17 | parser.add_argument("-o", "--output", type=str, help="Output folder to store converted network policies.", default="output") 18 | parser.add_argument("-v", "--verbosity", type=int, help="Logging verbosity level. Higher values for more verbose logging.", default=1) 19 | return parser.parse_args() 20 | 21 | 22 | def set_logging_level(verbosity): 23 | if verbosity >= 3: 24 | logger.setLevel(logging.DEBUG) 25 | elif verbosity == 2: 26 | logger.setLevel(logging.INFO) 27 | elif verbosity == 1: 28 | logger.setLevel(logging.WARNING) 29 | else: 30 | logger.setLevel(logging.ERROR) 31 | 32 | 33 | def detect_custom_network_policy_type(): 34 | try: 35 | # Load Kubernetes configuration 36 | config.load_kube_config() 37 | 38 | # Initialize API client 39 | api_client = client.ApiClient() 40 | 41 | # Get CRDs 42 | crd_api = client.CustomObjectsApi(api_client) 43 | crd_list = crd_api.list_cluster_custom_object("apiextensions.k8s.io", "v1", "customresourcedefinitions") 44 | 45 | # Check for Calico and Cilium CRDs 46 | calico_global_crd = "globalnetworkpolicies.crd.projectcalico.org" 47 | calico_crd = "networkpolicies.crd.projectcalico.org" 48 | cilium_crd = "ciliumnetworkpolicies.cilium.io" 49 | cilium_clusterwide_crd = "ciliumclusterwidenetworkpolicies.cilium.io" 50 | 51 | # Initialize custom_network_policy_type 52 | custom_network_policy_type = None 53 | 54 | # Iterate over CRDs to find the custom network policy type 55 | for crd in crd_list["items"]: 56 | if crd["metadata"]["name"] == calico_global_crd or crd["metadata"]["name"] == calico_crd: 57 | custom_network_policy_type = "calico" 58 | break 59 | elif crd["metadata"]["name"] == cilium_crd or crd["metadata"]["name"] == cilium_clusterwide_crd: 60 | custom_network_policy_type = "cilium" 61 | break 62 | 63 | if custom_network_policy_type is None: 64 | logger.error("No custom network policy type detected.") 65 | else: 66 | logger.info(f"Custom network policy type detected: {custom_network_policy_type}") 67 | 68 | return custom_network_policy_type 69 | 70 | except Exception as e: 71 | logger.error(f"Error detecting custom network policy type: {e}") 72 | return None 73 | 74 | 75 | def collect_network_policies(custom_network_policy_type): 76 | # Load Kubernetes configuration 77 | config.load_kube_config() 78 | 79 | # Initialize API client 80 | api_client = client.ApiClient() 81 | 82 | # Initialize CustomObjectsApi 83 | custom_objects_api = client.CustomObjectsApi(api_client) 84 | 85 | # Collect network policies based on custom_network_policy_type 86 | if custom_network_policy_type == "calico": 87 | # Get Calico network policies 88 | calico_policies = custom_objects_api.list_cluster_custom_object("crd.projectcalico.org", "v1", "networkpolicies") 89 | calico_global_policies = custom_objects_api.list_cluster_custom_object("crd.projectcalico.org", "v1", "globalnetworkpolicies") 90 | 91 | # Combine Calico network policies and global network policies 92 | all_calico_policies = calico_policies["items"] + calico_global_policies["items"] 93 | 94 | return all_calico_policies 95 | 96 | elif custom_network_policy_type == "cilium": 97 | # Get Cilium network policies 98 | cilium_policies = custom_objects_api.list_cluster_custom_object("cilium.io", "v2", "ciliumnetworkpolicies") 99 | cilium_clusterwide_policies = custom_objects_api.list_cluster_custom_object("cilium.io", "v2", "ciliumclusterwidenetworkpolicies") 100 | 101 | # Combine Cilium network policies and cluster-wide network policies 102 | all_cilium_policies = cilium_policies["items"] + cilium_clusterwide_policies["items"] 103 | 104 | return all_cilium_policies 105 | 106 | else: 107 | print("Invalid custom network policy type provided.") 108 | return [] 109 | 110 | # Implement conversion logic for Calico to Kubernetes native network policy 111 | def convert_calico_network_policy_to_k8s_native_network_policy(calico_policy): 112 | k8s_network_policy = {"apiVersion": "networking.k8s.io/v1", "kind": "NetworkPolicy", "metadata": {"name": calico_policy["metadata"]["name"], "namespace": calico_policy["metadata"]["namespace"]}, "spec": {"podSelector": {"matchLabels": calico_policy["spec"]["ingress"][0]["from"]["selector"]}}, "policyTypes": ["Ingress"], "ingress": []} 113 | 114 | # Check if the Calico policy is namespace-scoped or global 115 | if "namespace" in calico_policy["metadata"]: 116 | k8s_network_policy["metadata"]["namespace"] = calico_policy["metadata"]["namespace"] 117 | else: 118 | k8s_network_policy["metadata"]["labels"] = { 119 | "calico-policy-type": "global" 120 | } 121 | 122 | # Handle selectors 123 | if "spec" in calico_policy and "selector" in calico_policy["spec"]: 124 | k8s_network_policy["spec"]["podSelector"] = { 125 | "matchLabels": calico_policy["spec"]["selector"] 126 | } 127 | 128 | # Handle ingress rules 129 | if "spec" in calico_policy and "ingress" in calico_policy["spec"]: 130 | k8s_network_policy["spec"]["ingress"] = [] 131 | for rule in calico_policy["spec"]["ingress"]: 132 | k8s_rule = {} 133 | if "source" in rule: 134 | k8s_rule["from"] = [] 135 | if "selector" in rule["source"]: 136 | k8s_rule["from"].append({"podSelector": {"matchLabels": rule["source"]["selector"]}}) 137 | if "nets" in rule["source"]: 138 | k8s_rule["from"].append({"ipBlock": {"cidr": rule["source"]["nets"][0]}}) 139 | 140 | if "action" in rule and rule["action"].lower() == "allow": 141 | k8s_network_policy["spec"]["ingress"].append(k8s_rule) 142 | 143 | # Handle egress rules 144 | if "spec" in calico_policy and "egress" in calico_policy["spec"]: 145 | k8s_network_policy["spec"]["egress"] = [] 146 | for rule in calico_policy["spec"]["egress"]: 147 | k8s_rule = {} 148 | if "destination" in rule: 149 | k8s_rule["to"] = [] 150 | if "selector" in rule["destination"]: 151 | k8s_rule["to"].append({"podSelector": {"matchLabels": rule["destination"]["selector"]}}) 152 | if "nets" in rule["destination"]: 153 | k8s_rule["to"].append({"ipBlock": {"cidr": rule["destination"]["nets"][0]}}) 154 | 155 | if "action" in rule and rule["action"].lower() == "allow": 156 | k8s_network_policy["spec"]["egress"].append(k8s_rule) 157 | 158 | # Add traffic filtering based on HTTP and DNS metadata here 159 | if "action" in rule and rule["action"].lower() == "allow": 160 | k8s_network_policy["spec"]["egress"].append(k8s_rule) 161 | 162 | return k8s_network_policy 163 | 164 | # Implement conversion logic for Cilium to Kubernetes native network policy 165 | def convert_cilium_network_policy_to_k8s_native_network_policy(cilium_policy): 166 | k8s_network_policy = { 167 | "apiVersion": "networking.k8s.io/v1", 168 | "kind": "NetworkPolicy", 169 | "metadata": { 170 | "name": cilium_policy["metadata"]["name"], 171 | "namespace": cilium_policy["metadata"]["namespace"] 172 | }, 173 | "spec": { 174 | "podSelector": { 175 | "matchLabels": cilium_policy.get("spec", {}).get("endpointSelector", {}).get("matchLabels", {}) 176 | }, 177 | "policyTypes": ["Ingress"], 178 | "ingress": [] 179 | } 180 | } 181 | 182 | # Check if the Cilium policy is namespace-scoped or global 183 | if "namespace" in cilium_policy["metadata"]: 184 | k8s_network_policy["metadata"]["namespace"] = cilium_policy["metadata"]["namespace"] 185 | else: 186 | k8s_network_policy["metadata"]["labels"] = { 187 | "cilium-policy-type": "global" 188 | } 189 | print(f"Global policy detected: {cilium_policy['metadata']['name']}") 190 | return None 191 | 192 | # Handle selectors 193 | if "spec" in cilium_policy and "endpointSelector" in cilium_policy["spec"]: 194 | k8s_network_policy["spec"]["podSelector"] = { 195 | "matchLabels": cilium_policy["spec"]["endpointSelector"].get("matchLabels", {}) 196 | } 197 | 198 | # Handle ingress rules 199 | if "spec" in cilium_policy and "ingress" in cilium_policy["spec"]: 200 | k8s_network_policy["spec"]["ingress"] = [] 201 | for rule in cilium_policy["spec"]["ingress"]: 202 | k8s_rule = { 203 | "from": [], 204 | "ports": [] 205 | } 206 | if "toPort" in rule: 207 | k8s_rule["ports"].append({"protocol": "TCP", "port": rule["toPort"]}) 208 | if "fromEndpoints" in rule: 209 | k8s_rule["from"] = [] 210 | for endpoint in rule["fromEndpoints"]: 211 | k8s_rule["from"].append({"podSelector": {"matchLabels": endpoint.get("matchLabels", {})}}) 212 | if "fromCIDR" in rule: 213 | k8s_rule["from"] = [{"ipBlock": {"cidr": cidr}} for cidr in rule["fromCIDR"]] 214 | if "fromServiceAccount" in rule: 215 | k8s_rule["from"].append({"namespaceSelector": {"matchLabels": {}}, "podSelector": {"matchLabels": {}}}) 216 | 217 | k8s_network_policy["spec"]["ingress"].append(k8s_rule) 218 | 219 | # Handle egress rules 220 | if "spec" in cilium_policy and "egress" in cilium_policy["spec"]: 221 | k8s_network_policy["spec"]["egress"] = [] 222 | for rule in cilium_policy["spec"]["egress"]: 223 | k8s_rule = {} 224 | if "toEndpoints" in rule: 225 | k8s_rule["to"] = [] 226 | for endpoint in rule["toEndpoints"]: 227 | k8s_rule["to"].append({"podSelector": {"matchLabels": endpoint.get("matchLabels", {})}}) 228 | if "toCIDR" in rule: 229 | k8s_rule["to"] = [{"ipBlock": {"cidr": cidr}} for cidr in rule["toCIDR"]] 230 | if "toServiceAccount" in rule: 231 | k8s_rule["to"].append({"namespaceSelector": {"matchLabels": {}}, "podSelector": {"matchLabels": {}}}) 232 | 233 | k8s_network_policy["spec"]["egress"].append(k8s_rule) 234 | 235 | return k8s_network_policy 236 | 237 | def load_kube_config(): 238 | config.load_kube_config() 239 | 240 | def save_to_local_storage(network_policies, output_folder): 241 | os.makedirs(output_folder, exist_ok=True) 242 | for policy in network_policies: 243 | policy_name = policy["metadata"]["name"] 244 | policy_namespace = policy["metadata"]["namespace"] 245 | output_file = os.path.join(output_folder, f"{policy_namespace}_{policy_name}.yaml") 246 | 247 | with open(output_file, "w") as f: 248 | yaml.safe_dump(policy, f) 249 | 250 | def read_network_policy(policy_file): 251 | """Reads a network policy file and returns a dictionary of the policy.""" 252 | with open(policy_file) as f: 253 | policy = yaml.safe_load(f) 254 | return policy 255 | 256 | def build_test_dictionary(policy): 257 | """Builds a dictionary of the test cases for the network policy.""" 258 | test_dictionary = { 259 | "ingress": [], 260 | "egress": [] 261 | } 262 | for rule in policy["spec"].get("ingress", []): 263 | test_dictionary["ingress"].append(rule) 264 | for rule in policy["spec"].get("egress", []): 265 | test_dictionary["egress"].append(rule) 266 | return test_dictionary 267 | 268 | def build_test(policy): 269 | """Builds a test for the network policy.""" 270 | test_dictionary = build_test_dictionary(policy) 271 | for rule in test_dictionary["ingress"]: 272 | for port in rule.get("ports", []): 273 | test_case = { 274 | "name": "test_ingress_{}".format(port["protocol"]), 275 | "command": "kubectl exec test-pod --command nc -z localhost {}".format(port["port"]), 276 | "expected_result": "Failure" 277 | } 278 | yield test_case 279 | for rule in test_dictionary["egress"]: 280 | for port in rule.get("ports", []): 281 | test_case = { 282 | "name": "test_egress_{}".format(port["protocol"]), 283 | "command": "kubectl exec test-pod --command nc -z localhost {}".format(port["port"]), 284 | "expected_result": "Failure" 285 | } 286 | yield test_case 287 | 288 | def run_tests(output_dir): 289 | """ 290 | Run tests on the converted policies. 291 | """ 292 | logger.info("Running tests on the converted policies...") 293 | 294 | # Load the test policies 295 | test_policies_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "test_policies") 296 | test_policies = [] 297 | for filename in os.listdir(test_policies_dir): 298 | if filename.endswith(".yaml"): 299 | with open(os.path.join(test_policies_dir, filename), "r") as f: 300 | test_policies.append(yaml.safe_load(f)) 301 | 302 | # Loop over each converted policy and run the tests 303 | for policy_type_folder in ["calico_converted", "cilium_converted"]: 304 | converted_policy_dir = os.path.join(output_dir, policy_type_folder) 305 | for filename in os.listdir(converted_policy_dir): 306 | if filename.endswith(".yaml"): 307 | with open(os.path.join(converted_policy_dir, filename), "r") as f: 308 | policy = yaml.safe_load(f) 309 | 310 | # Check that the policy is valid 311 | if not validate_network_policy(policy): 312 | logger.warning(f"Invalid policy: {policy['metadata']['name']}") 313 | continue 314 | 315 | logger.info("Running tests for policy: {}".format(policy["metadata"]["name"])) # <-- fix is here 316 | 317 | # Loop over each test policy and run the tests 318 | for test_policy in test_policies: 319 | if not validate_network_policy(test_policy): 320 | logger.warning(f"Invalid test policy: {test_policy['metadata']['name']}") 321 | continue 322 | 323 | result = test_policy_applies_to_policy(test_policy, policy) 324 | if result: 325 | logger.info(f"Test passed: {test_policy['metadata']['name']} applies to {policy['metadata']['name']}") 326 | else: 327 | logger.warning(f"Test failed: {test_policy['metadata']['name']} does not apply to {policy['metadata']['name']}") 328 | 329 | logger.info("Tests completed.") 330 | 331 | def check_connectivity(pod_labels, namespace, endpoint): 332 | conn_check = False 333 | try: 334 | # Check the connectivity 335 | pod_name = subprocess.check_output(["kubectl", "get", "pod", "-n", namespace, "-l", pod_labels, "-o", "name"]).decode('utf-8').strip().split('\n') 336 | logger.info(f"pod name: {pod_name[0]}") 337 | v_output = subprocess.check_output(["kubectl", "exec", "-n", namespace, "-it", pod_name[0], "--", "curl", "--max-time", "5", endpoint]).decode('utf-8').strip().split('\n') 338 | logger.info(v_output) 339 | conn_check = True 340 | except subprocess.CalledProcessError as e: 341 | logger.error(f"Error checking connectivity for {endpoint} App: {e}") 342 | 343 | return conn_check 344 | 345 | def validate_np(namespace, endpoint_demoapp): 346 | # Initialize a flag to keep track of the overall test status 347 | all_tests_passed = True 348 | 349 | # Define the test scenarios 350 | test_scenarios = [ 351 | {"label": "app=client-one", "expected": True}, 352 | {"label": "app=client-two", "expected": False} 353 | ] 354 | 355 | # Loop through each test scenario 356 | for i, test in enumerate(test_scenarios): 357 | label = test["label"] 358 | expected = test["expected"] 359 | 360 | # Perform the connectivity test 361 | conn_status = check_connectivity(label, namespace, endpoint_demoapp) 362 | 363 | # Validate the result 364 | if conn_status == expected: 365 | logger.info(f"Test scenario {i+1} passed") 366 | else: 367 | logger.error(f"Test scenario {i+1} failed: Expected {expected}, got {conn_status}") 368 | all_tests_passed = False 369 | 370 | return all_tests_passed 371 | --------------------------------------------------------------------------------