├── .coveragerc ├── .gitignore ├── .gitreview ├── .mailmap ├── .stestr.conf ├── .zuul.yaml ├── CONTRIBUTING.rst ├── HACKING.rst ├── LICENSE ├── README.rst ├── api-ref └── source │ ├── certificates.inc │ ├── clusters.inc │ ├── clustertemplates.inc │ ├── conf.py │ ├── index.rst │ ├── mservices.inc │ ├── parameters.yaml │ ├── quotas.inc │ ├── samples │ ├── bay-create-resp.json │ ├── bay-update-req.json │ ├── baymodel-create-req.json │ ├── baymodel-update-req.json │ ├── certificates-ca-show-resp.json │ ├── certificates-ca-sign-req.json │ ├── certificates-ca-sign-resp.json │ ├── cluster-create-req.json │ ├── cluster-create-resp.json │ ├── cluster-get-all-resp.json │ ├── cluster-get-one-resp.json │ ├── cluster-resize-req.json │ ├── cluster-resize-resp.json │ ├── cluster-update-req.json │ ├── cluster-upgrade-req.json │ ├── cluster-upgrade-resp.json │ ├── clustertemplate-create-req.json │ ├── clustertemplate-create-resp.json │ ├── clustertemplate-get-all-resp.json │ ├── clustertemplate-update-req.json │ ├── mservice-get-resp.json │ ├── quota-create-req.json │ ├── quota-create-resp.json │ ├── quota-delete-req.json │ ├── quota-get-all-resp.json │ ├── quota-get-one-resp.json │ ├── quota-update-req.json │ ├── quota-update-resp.json │ ├── stats-get-resp.json │ ├── versions-01-get-resp.json │ └── versions-get-resp.json │ ├── stats.inc │ ├── status.yaml │ ├── urls.inc │ └── versions.inc ├── bindep.txt ├── contrib ├── drivers │ └── k8s_opensuse_v1 │ │ ├── README.md │ │ ├── __init__.py │ │ ├── driver.py │ │ ├── image │ │ ├── README.md │ │ ├── config.sh │ │ ├── images.sh │ │ └── openSUSE-Leap-42.1-JeOS-for-OpenStack-Magnum-K8s.kiwi │ │ ├── setup.py │ │ ├── template_def.py │ │ ├── templates │ │ ├── COPYING │ │ ├── README.md │ │ ├── fragments │ │ │ ├── add-proxy.sh │ │ │ ├── configure-docker.sh │ │ │ ├── configure-etcd.sh │ │ │ ├── configure-flanneld-master.sh │ │ │ ├── configure-flanneld-minion.sh │ │ │ ├── configure-kubernetes-master.sh │ │ │ ├── configure-kubernetes-minion.sh │ │ │ ├── create-kubernetes-user.yaml │ │ │ ├── make-cert-client.sh │ │ │ ├── make-cert.sh │ │ │ ├── write-heat-params-master.yaml │ │ │ ├── write-heat-params-minion.yaml │ │ │ └── write-kubeconfig.yaml │ │ ├── kubecluster.yaml │ │ ├── kubemaster.yaml │ │ └── kubeminion.yaml │ │ └── version.py └── templates │ └── example │ ├── example_template │ ├── __init__.py │ └── example.yaml │ └── setup.py ├── devstack ├── README.rst ├── lib │ └── magnum ├── plugin.sh └── settings ├── doc ├── examples │ └── etc │ │ ├── init │ │ ├── magnum-api.conf │ │ └── magnum-conductor.conf │ │ ├── logrotate.d │ │ └── magnum.logrotate │ │ └── systemd │ │ └── system │ │ ├── magnum-api.service │ │ └── magnum-conductor.service ├── requirements.txt └── source │ ├── admin │ ├── configuring.rst │ ├── gmr.rst │ ├── index.rst │ ├── magnum-proxy.rst │ └── troubleshooting-guide.rst │ ├── cli │ ├── index.rst │ └── magnum-status.rst │ ├── conf.py │ ├── configuration │ ├── index.rst │ ├── sample-config.rst │ ├── sample-policy.rst │ └── samples │ │ ├── index.rst │ │ └── policy-yaml.rst │ ├── contributor │ ├── api-microversion-history.rst │ ├── api-microversion.rst │ ├── contributing.rst │ ├── functional-test.rst │ ├── index.rst │ ├── objects.rst │ ├── policies.rst │ ├── quickstart.rst │ ├── reno.rst │ └── troubleshooting.rst │ ├── images │ ├── MagnumVolumeIntegration.png │ ├── cluster-create.png │ ├── cluster-template-details.png │ └── cluster-template.png │ ├── index.rst │ ├── install │ ├── common │ │ ├── configure_2_edit_magnum_conf.rst │ │ ├── configure_3_populate_database.rst │ │ └── prerequisites.rst │ ├── get_started.rst │ ├── index.rst │ ├── install-debian-manual.rst │ ├── install-guide-from-source.rst │ ├── install-rdo.rst │ ├── install-ubuntu.rst │ ├── install.rst │ ├── launch-instance.rst │ ├── next-steps.rst │ └── verify.rst │ └── user │ ├── glossary.rst │ ├── heat-templates.rst │ ├── index.rst │ ├── k8s-health-monitoring.rst │ ├── k8s-keystone-authN-authZ.rst │ ├── kubernetes-load-balancer.rst │ ├── monitoring.rst │ └── node-groups.rst ├── dockerfiles ├── cluster-autoscaler │ └── Dockerfile ├── heat-container-agent │ ├── Dockerfile │ ├── config.json.template │ ├── launch │ ├── manifest.json │ ├── scripts │ │ ├── 50-heat-config-docker-compose │ │ ├── 55-heat-config │ │ ├── configure_container_agent.sh │ │ ├── heat-config-notify │ │ ├── hooks │ │ │ ├── atomic │ │ │ ├── docker-compose │ │ │ └── script │ │ └── write-os-apply-config-templates.sh │ ├── service.template │ └── tmpfiles.template ├── helm-client │ └── Dockerfile ├── kubernetes-apiserver │ ├── Dockerfile │ ├── apiserver │ ├── config │ ├── config.json.template │ ├── launch.sh │ ├── service.template │ └── sources ├── kubernetes-controller-manager │ ├── Dockerfile │ ├── config │ ├── config.json.template │ ├── controller-manager │ ├── launch.sh │ ├── service.template │ └── sources ├── kubernetes-kubelet │ ├── Dockerfile │ ├── config │ ├── config.json.template │ ├── kubelet │ ├── launch.sh │ ├── manifest.json │ ├── service.template │ ├── sources │ └── tmpfiles.template ├── kubernetes-proxy │ ├── Dockerfile │ ├── config │ ├── config.json.template │ ├── launch.sh │ ├── proxy │ ├── service.template │ └── sources └── kubernetes-scheduler │ ├── Dockerfile │ ├── config │ ├── config.json.template │ ├── launch.sh │ ├── scheduler │ └── service.template ├── etc └── magnum │ ├── README-magnum.conf.txt │ ├── api-paste.ini │ ├── keystone_auth_default_policy.sample │ ├── magnum-config-generator.conf │ └── magnum-policy-generator.conf ├── functional_creds.conf.sample ├── magnum ├── __init__.py ├── api │ ├── __init__.py │ ├── app.py │ ├── attr_validator.py │ ├── config.py │ ├── controllers │ │ ├── __init__.py │ │ ├── base.py │ │ ├── link.py │ │ ├── root.py │ │ ├── v1 │ │ │ ├── __init__.py │ │ │ ├── certificate.py │ │ │ ├── cluster.py │ │ │ ├── cluster_actions.py │ │ │ ├── cluster_template.py │ │ │ ├── collection.py │ │ │ ├── federation.py │ │ │ ├── magnum_services.py │ │ │ ├── nodegroup.py │ │ │ ├── quota.py │ │ │ ├── stats.py │ │ │ └── types.py │ │ └── versions.py │ ├── expose.py │ ├── hooks.py │ ├── http_error.py │ ├── middleware │ │ ├── __init__.py │ │ ├── auth_token.py │ │ └── parsable_error.py │ ├── rest_api_version_history.rst │ ├── servicegroup.py │ ├── utils.py │ ├── validation.py │ └── versioned_method.py ├── cmd │ ├── __init__.py │ ├── api.py │ ├── conductor.py │ ├── db_manage.py │ ├── driver_manage.py │ └── status.py ├── common │ ├── __init__.py │ ├── cert_manager │ │ ├── __init__.py │ │ ├── barbican_cert_manager.py │ │ ├── cert_manager.py │ │ ├── local_cert_manager.py │ │ └── x509keypair_cert_manager.py │ ├── cinder.py │ ├── clients.py │ ├── config.py │ ├── context.py │ ├── exception.py │ ├── keystone.py │ ├── name_generator.py │ ├── neutron.py │ ├── nova.py │ ├── octavia.py │ ├── policies │ │ ├── __init__.py │ │ ├── base.py │ │ ├── certificate.py │ │ ├── cluster.py │ │ ├── cluster_template.py │ │ ├── federation.py │ │ ├── magnum_service.py │ │ ├── nodegroup.py │ │ ├── quota.py │ │ └── stats.py │ ├── policy.py │ ├── profiler.py │ ├── rpc.py │ ├── rpc_service.py │ ├── service.py │ ├── short_id.py │ ├── urlfetch.py │ ├── utils.py │ └── x509 │ │ ├── __init__.py │ │ ├── extensions.py │ │ ├── operations.py │ │ └── validator.py ├── conductor │ ├── __init__.py │ ├── api.py │ ├── handlers │ │ ├── __init__.py │ │ ├── ca_conductor.py │ │ ├── cluster_conductor.py │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── cert_manager.py │ │ │ └── trust_manager.py │ │ ├── conductor_listener.py │ │ ├── federation_conductor.py │ │ ├── indirection_api.py │ │ └── nodegroup_conductor.py │ ├── k8s_api.py │ ├── monitors.py │ ├── scale_manager.py │ ├── tasks │ │ ├── __init__.py │ │ └── heat_tasks.py │ └── utils.py ├── conf │ ├── __init__.py │ ├── api.py │ ├── barbican.py │ ├── certificates.py │ ├── cinder.py │ ├── cluster.py │ ├── cluster_heat.py │ ├── cluster_templates.py │ ├── conductor.py │ ├── database.py │ ├── docker.py │ ├── docker_registry.py │ ├── drivers.py │ ├── glance.py │ ├── heat.py │ ├── keystone.py │ ├── kubernetes.py │ ├── magnum_client.py │ ├── neutron.py │ ├── nova.py │ ├── octavia.py │ ├── opts.py │ ├── paths.py │ ├── profiler.py │ ├── quota.py │ ├── rpc.py │ ├── services.py │ ├── trust.py │ ├── utils.py │ └── x509.py ├── db │ ├── __init__.py │ ├── api.py │ ├── migration.py │ └── sqlalchemy │ │ ├── __init__.py │ │ ├── alembic.ini │ │ ├── alembic │ │ ├── README │ │ ├── env.py │ │ ├── script.py.mako │ │ └── versions │ │ │ ├── 041d9a0f1159_add_flavor_id_to_cluster.py │ │ │ ├── 049f81f6f584_remove_ssh_authorized_key_from_baymodel.py │ │ │ ├── 04c625aa95ba_change_storage_driver_to_string.py │ │ │ ├── 05d3e97de9ee_add_volume_driver.py │ │ │ ├── 085e601a39f6_remove_service.py │ │ │ ├── 14328d6a57e3_add_master_count_to_bay.py │ │ │ ├── 1481f5b560dd_add_labels_column_to_baymodel_table.py │ │ │ ├── 156ceb17fb0a_add_bay_status_reason.py │ │ │ ├── 1afee1db6cd0_add_master_flavor.py │ │ │ ├── 1c1ff5e56048_rename_container_image_id.py │ │ │ ├── 1d045384b966_add_insecure_baymodel_attr.py │ │ │ ├── 1f196a3dabae_remove_container.py │ │ │ ├── 2581ebaf0cb2_initial_migration.py │ │ │ ├── 27ad304554e2_adding_magnum_service_functionality.py │ │ │ ├── 29affeaa2bc2_rename_bay_master_address.py │ │ │ ├── 2ace4006498_rename_bay_minions_address.py │ │ │ ├── 2ae93c9c6191_add_public_column_to_baymodel_table.py │ │ │ ├── 2b5f24dd95de_rename_service_port.py │ │ │ ├── 2d1354bbf76e_ssh_authorized_key.py │ │ │ ├── 2d8657c0cdc_add_bay_uuid.py │ │ │ ├── 33ef79969018_add_memory_to_container.py │ │ │ ├── 35cff7c86221_add_private_network_to_baymodel.py │ │ │ ├── 3a938526b35d_add_docker_volume_size.py │ │ │ ├── 3b6c4c42adb4_add_unique_constraints.py │ │ │ ├── 3be65537a94a_add_network_driver_baymodel_column.py │ │ │ ├── 3bea56f25597_multi_tenant.py │ │ │ ├── 40f325033343_add_bay_create_timeout_to_bay.py │ │ │ ├── 417917e778f5_add_server_type_to_baymodel.py │ │ │ ├── 421102d1f2d2_create_x509keypair_table.py │ │ │ ├── 456126c6c9e9_create_baylock_table.py │ │ │ ├── 461d798132c7_change_cluster_to_support_nodegroups.py │ │ │ ├── 47380964133d_add_network_subnet_fip_to_cluster.py │ │ │ ├── 4956f03cabad_add_cluster_distro.py │ │ │ ├── 4e263f236334_add_registry_enabled.py │ │ │ ├── 4ea34a59a64c_add_discovery_url_to_bay.py │ │ │ ├── 52bcaf58fecb_add_master_flavor_id_to_cluster.py │ │ │ ├── 53882537ac57_add_host_column_to_pod.py │ │ │ ├── 5518af8dbc21_rename_cert_uuid.py │ │ │ ├── 5793cd26898d_add_bay_status.py │ │ │ ├── 57fbdf2327a2_remove_baylock.py │ │ │ ├── 592131657ca1_add_coe_column_to_baymodel.py │ │ │ ├── 5977879072a7_add_env_to_container.py │ │ │ ├── 59e7664a8ba1_add_container_status.py │ │ │ ├── 5ad410481b88_rename_insecure.py │ │ │ ├── 5d4caa6e0a42_create_trustee_for_each_bay.py │ │ │ ├── 68ce16dfd341_add_master_lb_enabled_column_to_baymodel_table.py │ │ │ ├── 6f21dc920bb_add_cert_uuid_to_bay.py │ │ │ ├── 6f21dc998bb_add_master_addresses_to_bay.py │ │ │ ├── 720f640f43d1_rename_bay_table_to_cluster.py │ │ │ ├── 7da8489d6a68_separated_ca_cert_for_etcd_and_front_.py │ │ │ ├── 859fb45df249_remove_replication_controller.py │ │ │ ├── 87e62e3c7abc_add_hidden_to_cluster_template.py │ │ │ ├── 95096e2334ee_add_master_lb_enabled_to_cluster.py │ │ │ ├── 966a99e70ff_add_proxy.py │ │ │ ├── 9a1539f1cd2c_add_federation_table.py │ │ │ ├── a0e7c8450ab1_add_labels_to_cluster.py │ │ │ ├── a1136d335540_add_docker_storage_driver_column.py │ │ │ ├── aa0cc27839af_add_docker_volume_size_to_cluster.py │ │ │ ├── ac92cbae311c_add_nodegoup_table.py │ │ │ ├── adc3b7679ae_add_registry_trust_id_to_bay.py │ │ │ ├── b1f612248cab_add_floating_ip_enabled_column_to_.py │ │ │ ├── bb42b7cad130_remove_node_object.py │ │ │ ├── bc46ba6cf949_add_keypair_to_cluster.py │ │ │ ├── c04e925e65c2_nodegroups_v2.py │ │ │ ├── c0f832afc4fd_add_driver_to_cluster_template.py │ │ │ ├── cbbc65a86986_add_health_status_to_cluster.py │ │ │ ├── d072f58ab240_modify_x509keypair_table.py │ │ │ ├── e0653b2d5271_add_fixed_subnet_column_to_baymodel_table.py │ │ │ ├── e647f5931da8_add_insecure_registry_to_baymodel.py │ │ │ ├── e772b2598d9_add_container_command.py │ │ │ ├── ee92b41b8809_create_quotas_table.py │ │ │ ├── ef08a5e057bd_remove_pod.py │ │ │ ├── f1d8b0ab8b8d_added_observations_to_cluster_template.py │ │ │ ├── fb03fdef8919_rename_baymodel_to_clustertemplate.py │ │ │ └── fcb4efee8f8b_add_version_info_to_bay.py │ │ ├── api.py │ │ ├── migration.py │ │ └── models.py ├── drivers │ ├── __init__.py │ ├── common │ │ ├── __init__.py │ │ ├── driver.py │ │ ├── k8s_monitor.py │ │ ├── k8s_scale_manager.py │ │ └── templates │ │ │ ├── environments │ │ │ ├── disable_floating_ip.yaml │ │ │ ├── disable_lb_floating_ip.yaml │ │ │ ├── enable_floating_ip.yaml │ │ │ ├── enable_lb_floating_ip.yaml │ │ │ ├── no_etcd_volume.yaml │ │ │ ├── no_master_lb.yaml │ │ │ ├── no_private_network.yaml │ │ │ ├── no_volume.yaml │ │ │ ├── with_etcd_volume.yaml │ │ │ ├── with_master_lb.yaml │ │ │ ├── with_master_lb_octavia.yaml │ │ │ ├── with_private_network.yaml │ │ │ └── with_volume.yaml │ │ │ ├── fragments │ │ │ ├── api_gateway_switcher_master.yaml │ │ │ ├── api_gateway_switcher_pool.yaml │ │ │ ├── atomic-install-openstack-ca.sh │ │ │ ├── configure-docker-registry.sh │ │ │ ├── configure-docker-storage.sh │ │ │ ├── configure_docker_storage_driver_fedora_coreos.sh │ │ │ ├── enable-docker-registry.sh │ │ │ ├── floating_ip_address_switcher_private.yaml │ │ │ ├── floating_ip_address_switcher_public.yaml │ │ │ ├── network_switcher_existing.yaml │ │ │ └── network_switcher_private.yaml │ │ │ ├── kubernetes │ │ │ ├── fragments │ │ │ │ ├── add-proxy.sh │ │ │ │ ├── calico-service-v3-21-x.sh │ │ │ │ ├── calico-service-v3-26-x.sh │ │ │ │ ├── configure-etcd.sh │ │ │ │ ├── configure-kubernetes-master.sh │ │ │ │ ├── configure-kubernetes-minion.sh │ │ │ │ ├── core-dns-service.sh │ │ │ │ ├── disable-selinux.sh │ │ │ │ ├── enable-auto-healing.sh │ │ │ │ ├── enable-auto-scaling.sh │ │ │ │ ├── enable-cert-api-manager.sh │ │ │ │ ├── enable-cinder-csi.sh │ │ │ │ ├── enable-ingress-controller.sh │ │ │ │ ├── enable-ingress-octavia.sh │ │ │ │ ├── enable-ingress-traefik.sh │ │ │ │ ├── enable-keystone-auth.sh │ │ │ │ ├── enable-prometheus-monitoring.sh │ │ │ │ ├── enable-services-master.sh │ │ │ │ ├── enable-services-minion.sh │ │ │ │ ├── flannel-service.sh │ │ │ │ ├── install-clients.sh │ │ │ │ ├── install-cri.sh │ │ │ │ ├── install-helm-modules.sh │ │ │ │ ├── install-helm.sh │ │ │ │ ├── kube-apiserver-to-kubelet-role.sh │ │ │ │ ├── kube-dashboard-service.sh │ │ │ │ ├── make-cert-client.sh │ │ │ │ ├── make-cert.sh │ │ │ │ ├── rotate-kubernetes-ca-certs-master.sh │ │ │ │ ├── rotate-kubernetes-ca-certs-worker.sh │ │ │ │ ├── start-container-agent.sh │ │ │ │ ├── upgrade-kubernetes.sh │ │ │ │ ├── wc-notify-master.sh │ │ │ │ ├── write-heat-params-master.sh │ │ │ │ ├── write-heat-params.sh │ │ │ │ └── write-kube-os-config.sh │ │ │ └── helm │ │ │ │ ├── ingress-nginx.sh │ │ │ │ ├── metrics-server.sh │ │ │ │ ├── prometheus-adapter.sh │ │ │ │ └── prometheus-operator.sh │ │ │ ├── lb_api.yaml │ │ │ ├── lb_etcd.yaml │ │ │ └── network.yaml │ ├── heat │ │ ├── __init__.py │ │ ├── driver.py │ │ ├── k8s_coreos_template_def.py │ │ ├── k8s_fedora_template_def.py │ │ ├── k8s_template_def.py │ │ └── template_def.py │ └── k8s_fedora_coreos_v1 │ │ ├── __init__.py │ │ ├── driver.py │ │ ├── template_def.py │ │ ├── templates │ │ ├── COPYING │ │ ├── fcct-config.yaml │ │ ├── kubecluster.yaml │ │ ├── kubemaster.yaml │ │ ├── kubeminion.yaml │ │ └── user_data.json │ │ └── version.py ├── hacking │ ├── __init__.py │ └── checks.py ├── i18n.py ├── objects │ ├── __init__.py │ ├── base.py │ ├── certificate.py │ ├── cluster.py │ ├── cluster_template.py │ ├── federation.py │ ├── fields.py │ ├── magnum_service.py │ ├── nodegroup.py │ ├── quota.py │ ├── stats.py │ └── x509keypair.py ├── service │ ├── __init__.py │ └── periodic.py ├── servicegroup │ ├── __init__.py │ └── magnum_service_periodic.py ├── tests │ ├── __init__.py │ ├── base.py │ ├── conf_fixture.py │ ├── contrib │ │ ├── copy_instance_logs.sh │ │ ├── gate_hook.sh │ │ └── post_test_hook.sh │ ├── fake_notifier.py │ ├── fakes.py │ ├── functional │ │ ├── __init__.py │ │ ├── api │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── v1 │ │ │ │ ├── __init__.py │ │ │ │ ├── clients │ │ │ │ ├── __init__.py │ │ │ │ ├── cert_client.py │ │ │ │ ├── cluster_client.py │ │ │ │ ├── cluster_template_client.py │ │ │ │ └── magnum_service_client.py │ │ │ │ └── models │ │ │ │ ├── __init__.py │ │ │ │ ├── cert_model.py │ │ │ │ ├── cluster_id_model.py │ │ │ │ ├── cluster_model.py │ │ │ │ ├── cluster_template_model.py │ │ │ │ ├── cluster_templatepatch_model.py │ │ │ │ ├── clusterpatch_model.py │ │ │ │ └── magnum_service_model.py │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── client.py │ │ │ ├── config.py │ │ │ ├── datagen.py │ │ │ ├── manager.py │ │ │ ├── models.py │ │ │ └── utils.py │ │ ├── k8s │ │ │ ├── __init__.py │ │ │ ├── test_k8s_python_client.py │ │ │ └── test_magnum_python_client.py │ │ ├── k8s_fcos │ │ │ ├── __init__.py │ │ │ └── test_k8s_python_client.py │ │ ├── k8s_ironic │ │ │ ├── __init__.py │ │ │ └── test_k8s_python_client.py │ │ └── python_client_base.py │ ├── output_fixture.py │ ├── policy_fixture.py │ ├── releasenotes │ │ └── notes │ │ │ └── separated-ca-certs-299c95eea1ffd9b1.yaml │ ├── unit │ │ ├── __init__.py │ │ ├── api │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── controllers │ │ │ │ ├── __init__.py │ │ │ │ ├── auth-paste.ini │ │ │ │ ├── auth-root-access.ini │ │ │ │ ├── auth-v1-access.ini │ │ │ │ ├── noauth-paste.ini │ │ │ │ ├── test_base.py │ │ │ │ ├── test_root.py │ │ │ │ └── v1 │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_certificate.py │ │ │ │ │ ├── test_cluster.py │ │ │ │ │ ├── test_cluster_actions.py │ │ │ │ │ ├── test_cluster_template.py │ │ │ │ │ ├── test_federation.py │ │ │ │ │ ├── test_magnum_service.py │ │ │ │ │ ├── test_nodegroup.py │ │ │ │ │ ├── test_quota.py │ │ │ │ │ ├── test_stats.py │ │ │ │ │ ├── test_types.py │ │ │ │ │ └── test_utils.py │ │ │ ├── test_app.py │ │ │ ├── test_attr_validator.py │ │ │ ├── test_expose.py │ │ │ ├── test_hooks.py │ │ │ ├── test_servicegroup.py │ │ │ ├── test_validation.py │ │ │ └── utils.py │ │ ├── cmd │ │ │ ├── __init__.py │ │ │ ├── test_api.py │ │ │ ├── test_conductor.py │ │ │ ├── test_db_manage.py │ │ │ ├── test_driver_manage.py │ │ │ └── test_status.py │ │ ├── common │ │ │ ├── __init__.py │ │ │ ├── cert_manager │ │ │ │ ├── __init__.py │ │ │ │ ├── test_barbican.py │ │ │ │ ├── test_cert_manager.py │ │ │ │ ├── test_local.py │ │ │ │ └── test_x509keypair_cert_manager.py │ │ │ ├── policies │ │ │ │ ├── __init__.py │ │ │ │ ├── base.py │ │ │ │ ├── test_certificate_policy.py │ │ │ │ ├── test_cluster_policy.py │ │ │ │ ├── test_cluster_template_policy.py │ │ │ │ ├── test_federation_policy.py │ │ │ │ ├── test_magnum_service_policy.py │ │ │ │ ├── test_nodegroup_policy.py │ │ │ │ ├── test_quota_policy.py │ │ │ │ └── test_stats_policy.py │ │ │ ├── test_clients.py │ │ │ ├── test_context.py │ │ │ ├── test_exception.py │ │ │ ├── test_keystone.py │ │ │ ├── test_neutron.py │ │ │ ├── test_octavia.py │ │ │ ├── test_policy.py │ │ │ ├── test_profiler.py │ │ │ ├── test_rpc.py │ │ │ ├── test_service.py │ │ │ ├── test_short_id.py │ │ │ ├── test_urlfetch.py │ │ │ ├── test_utils.py │ │ │ └── x509 │ │ │ │ ├── __init__.py │ │ │ │ ├── test_operations.py │ │ │ │ ├── test_sign.py │ │ │ │ └── test_validator.py │ │ ├── conductor │ │ │ ├── __init__.py │ │ │ ├── handlers │ │ │ │ ├── __init__.py │ │ │ │ ├── common │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_cert_manager.py │ │ │ │ │ └── test_trust_manager.py │ │ │ │ ├── test_ca_conductor.py │ │ │ │ ├── test_cluster_conductor.py │ │ │ │ ├── test_conductor_listener.py │ │ │ │ ├── test_federation_conductor.py │ │ │ │ ├── test_indirection_api.py │ │ │ │ ├── test_k8s_cluster_conductor.py │ │ │ │ └── test_nodegroup_conductor.py │ │ │ ├── tasks │ │ │ │ ├── __init__.py │ │ │ │ └── test_heat_tasks.py │ │ │ ├── test_k8s_api.py │ │ │ ├── test_monitors.py │ │ │ ├── test_rpcapi.py │ │ │ ├── test_scale_manager.py │ │ │ └── test_utils.py │ │ ├── conf │ │ │ ├── __init__.py │ │ │ └── test_conf.py │ │ ├── db │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── sqlalchemy │ │ │ │ ├── __init__.py │ │ │ │ └── test_types.py │ │ │ ├── test_cluster.py │ │ │ ├── test_cluster_template.py │ │ │ ├── test_federation.py │ │ │ ├── test_magnum_service.py │ │ │ ├── test_nodegroup.py │ │ │ ├── test_quota.py │ │ │ ├── test_x509keypair.py │ │ │ └── utils.py │ │ ├── drivers │ │ │ ├── __init__.py │ │ │ ├── test_heat_driver.py │ │ │ └── test_template_definition.py │ │ ├── objects │ │ │ ├── __init__.py │ │ │ ├── test_cluster.py │ │ │ ├── test_cluster_template.py │ │ │ ├── test_federation.py │ │ │ ├── test_fields.py │ │ │ ├── test_magnum_service.py │ │ │ ├── test_nodegroup.py │ │ │ ├── test_objects.py │ │ │ ├── test_x509keypair.py │ │ │ └── utils.py │ │ ├── service │ │ │ ├── __init__.py │ │ │ └── test_periodic.py │ │ ├── servicegroup │ │ │ ├── __init__.py │ │ │ └── test_magnum_service.py │ │ ├── template │ │ │ ├── __init__.py │ │ │ └── test_template.py │ │ └── test_hacking.py │ └── utils.py ├── version.py └── wsgi │ └── api.py ├── playbooks ├── container-builder-copy-logs.yaml ├── container-builder-setup-gate.yaml ├── container-builder-vars.yaml ├── container-builder.yaml ├── container-publish.yaml ├── post │ └── upload-logs.yaml └── pre │ ├── prepare-workspace-images.yaml │ └── prepare-workspace.yaml ├── releasenotes ├── notes │ ├── .placeholder │ ├── CVE-2016-7404-f53e62a4a40e4d30.yaml │ ├── Deploy-traefik-from-the-heat-agent-0bb32f0f2c97405d.yaml │ ├── RBAC-and-client-incompatibility-fdfeab326dfda3bf.yaml │ ├── add-boot-volume-size-check-0262c2b61abc7ccf.yaml │ ├── add-cilium-network-driver-8715190b14cb4f89.yaml │ ├── add-container_infra_prefix-516cc43fbc5a0617.yaml │ ├── add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml │ ├── add-federation-api-cf55d04f96772b0f.yaml │ ├── add-hostgw-backend-option-1d1f9d8d95ec374f.yaml │ ├── add-information-about-cluster-in-event-notifications-a3c992ab24b32fbd.yaml │ ├── add-k8s-label-for-portal-network-cidr-a09edab29da6e7da.yaml │ ├── add-kubelet-to-master-nodes-da2d4ea0d3a332cd.yaml │ ├── add-master_lb_enabled-to-cluster-c773fac9086b2531.yaml │ ├── add-octavia-client-4e5520084eae3c2b.yaml │ ├── add-opensuse-driver-f69b6d346ca82b87.yaml │ ├── add-overlay-networks-to-swarm-4467986d7853fcd8.yaml │ ├── add-upgrade-check-framework-5057ad67a7690a14.yaml │ ├── add_cluster_template_observations_db_and_api_objects-d7350c8193da9470.yaml │ ├── affinity-policy-for-mesos-template-def-82627eb231aa4d28.yaml │ ├── allow-cluster-template-being-renamed-82f7d5d1f33a7957.yaml │ ├── allow-empty-node_groups-ec16898bfc82aec0.yaml │ ├── allow-multimaster-no-fip-b11520485012d949.yaml │ ├── allow-setting-network-subnet-FIP-when-creating-cluster-ae0cda35ade28a9f.yaml │ ├── allow_admin_perform_acitons-cc988655bb72b3f3.yaml │ ├── altered_grafanaUI_dashboards_persistency-1106b2e259a769b0.yaml │ ├── async-bay-operations-support-9819bd06122ea9e5.yaml │ ├── availability_zone-2d73671f5ea065d8.yaml │ ├── boot-from-volume-7c73df68d7f325aa.yaml │ ├── bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml │ ├── bp-auto-generate-name-052ea3fdf05fdbbf.yaml │ ├── bp-barbican-alternative-store-35ec3eda0abb0e25.yaml │ ├── bp-container-monitoring-d4bb1cbd0a4e44cc.yaml │ ├── bp-decouple-lbaas-c8f2d73313c40b98.yaml │ ├── bp-keypair-override-on-create-ca8f12ffca41cd62.yaml │ ├── bp-magnum-notifications-8bd44cfe9e80f82b.yaml │ ├── bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml │ ├── bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml │ ├── broken-kuberenetes-client-d2d1da6029825208.yaml │ ├── bug-1580704-32a0e91e285792ea.yaml │ ├── bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml │ ├── bug-1663757-198e1aa8fa810984.yaml │ ├── bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml │ ├── bug-1718947-0d4e67529e2817d7.yaml │ ├── bug-1722522-d94743c6362a5e48.yaml │ ├── bug-1766284-k8s-fedora-admin-user-e760f9b0edf49391.yaml │ ├── bug-2002728-kube-os-conf-region-46cd60537bdabdb2.yaml │ ├── bug-2002981-trustee-auth-region-name-37796a4e6a274fb8.yaml │ ├── bug-2004942-052321df27529562.yaml │ ├── calico-3.21.2-193c895134e9c3c1.yaml │ ├── calico-configuration-label-ae0b43a7c7123f02.yaml │ ├── calico-network-driver-0199c2459041ae81.yaml │ ├── cert-manager-api-ee0cf7f3b767bb5d.yaml │ ├── change-bay-to-cluster-in-config-1f2b95d1176d7231.yaml │ ├── change-service-name-ce5c72642fe1d3d1.yaml │ ├── cinder-csi-enabled-label-ab2b8ade63c57cf3.yaml │ ├── client-embed-certs-322701471e4d6e1d.yaml │ ├── cluster_template_update_labels-10ce66c87795f11c.yaml │ ├── configurable-k8s-health-polling-interval-75bb83b4701d48c5.yaml │ ├── configure-etcd-auth-bug-1759813-baac5e0fe8a2e97f.yaml │ ├── configure_monitoring_app_endpoints-f00600c244a76cf4.yaml │ ├── containerd-598761bb536af6ba.yaml │ ├── control-plane-taint-c6194f968f0817e8.yaml │ ├── coredns-update-9b03da4b89be18ad.yaml │ ├── default-admission-controller-04398548cf63597c.yaml │ ├── default-ng-worker-node-count-a88911a0b7a760a7.yaml │ ├── default-policy-k8s-keystone-auth-fa74aa03dcc12ef3.yaml │ ├── deploy-tiller-in-k8s-df12ee41d00dd7ff.yaml │ ├── deprecate-coreos-8240e173af9fd931.yaml │ ├── deprecate-docker-swarm-b506a766b91fe98e.yaml │ ├── deprecate-fedora-atomic-a5e7e361053253b7.yaml │ ├── deprecate-heapster-7e8dea0bab06aa51.yaml │ ├── deprecate-heat-driver-930d999afde1eece.yaml │ ├── deprecate-in-tree-cinder-c781a5c160d45ab6.yaml │ ├── deprecate-json-formatted-policy-file-b52d805359bc73b7.yaml │ ├── deprecate-k8s-fedora-ironic-f806cbdb090431e2.yaml │ ├── deprecate-magnum-api-wsgi-entrypoint-25878b2d8b7d30b3.yaml │ ├── deprecate-send_cluster_metrics-8adaac64a979f720.yaml │ ├── devicemapper-deprecation-46a59adbf131bde1.yaml │ ├── disable-mesos-from-api-0087ef02ba0477df.yaml │ ├── disable-ssh-password-authn-f2baf619710e52aa.yaml │ ├── dns-autoscale-90b63e3d71d7794e.yaml │ ├── docker-volume-type-46044734f5a27661.yaml │ ├── drop-calico-v3-3-7d47eb04fcb392dc.yaml │ ├── drop-fedora-atomic-driver-76da9f0ea0cf20bb.yaml │ ├── drop-k8s-coreos-9604dd23b0e884b6.yaml │ ├── drop-k8s-fedora-ironic-6c9750a0913435e2.yaml │ ├── drop-py27-support-7e2c4300341f9719.yaml │ ├── drop-python-3-6-and-3-7-68ad47ae9d14dca7.yaml │ ├── drop-tiller-5b98862961003df8.yaml │ ├── drop_mesos-DzAlnyYHjbQC6IfMq.yaml │ ├── drop_mesos_driver-pBmrJ9gAqX3EUROBS2g.yaml │ ├── drop_swarm_driver-3a2e1927053cf372.yaml │ ├── enable-enforce-scope-and-new-defaults-572730ea8804a843.yaml │ ├── enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml │ ├── enable_cloud_provider_label-ed79295041bc46a8.yaml │ ├── ensure-delete-complete-2f9bb53616e1e02b.yaml │ ├── expose_autoscaler_metrics-0ea9c61660409efe.yaml │ ├── expose_traefik_metrics-aebbde99d4ecc231.yaml │ ├── fedora_coreos-e66b44d86dea380f.yaml │ ├── fix-cert-apimanager-527352622c5a9c3b.yaml │ ├── fix-cluster-floating-ip-enabled-default-value-4e24d4bf09fc08c8.yaml │ ├── fix-cluster-update-886bd2d1156bef88.yaml │ ├── fix-driver-token-scope-a2c2b4b4ef813ec7.yaml │ ├── fix-fedora-proxy-a4b8d5fc4ec65e80.yaml │ ├── fix-global-stack-list-7a3a66169f5c4aa8.yaml │ ├── fix-k8s-coe-version-a8ea38f327ea6bb3.yaml │ ├── fix-label-fixed_network_cidr-95d6a2571b58a8fc.yaml │ ├── fix-nginx-getting-oom-killed-76139fd8b57e6c15.yaml │ ├── fix-proxy-of-grafana-script-8b408d9d103dfc06.yaml │ ├── fix-race-condition-for-k8s-multi-masters-29bd36de57df355a.yaml │ ├── fix-serveraddressoutputmapping-for-private-clusters-73a874bb4827d568.yaml │ ├── fix-volume-api-version-908c3f1cf154b231.yaml │ ├── flannel-cni-4a5c9f574325761e.yaml │ ├── flannel-reboot-fix-f1382818daed4fa8.yaml │ ├── grafana_prometheus_tag_label-78540ea106677485.yaml │ ├── heapster-enabled-label-292ca1ddac68a156.yaml │ ├── heat-container-agent-for-train-e63bc1559750fe9c.yaml │ ├── heat-container-agent-tag-92848c1062c16c76.yaml │ ├── heat-container-agent-tag-fe7cec6b890329af.yaml │ ├── helm-install-ingress-nginx-fe2acec1dd3032e3.yaml │ ├── helm-install-metrics-service-cd18be76c4ed0e5f.yaml │ ├── helm-install-metrics-service-e7a5459417504a75.yaml │ ├── helm-install-prometheus-operator-ea87752bc57a0945.yaml │ ├── helm_client_label-1d6e70dfcf8ecd0d.yaml │ ├── hyperkube-prefix-01b9a5f4664edc90.yaml │ ├── ignore-calico-devices-in-network-manager-e1bdb052834e11e9.yaml │ ├── improve-driver-discovery-df61e03c8749a34d.yaml │ ├── improve-k8s-master-kubelet-taint-0c56ffede270116d.yaml │ ├── ingress-controller-552ea956ceabdd25.yaml │ ├── ingress-ngnix-de3c70ca48552833.yaml │ ├── integrate-osprofiler-79bdf2d0cd8a39fb.yaml │ ├── k8s-cluster-creation-speedup-21b5b368184d7bf0.yaml │ ├── k8s-dashboard-v2.0.0-771ce78b527209d3.yaml │ ├── k8s-delete-vip-fip-b2ddf61ddbc080bc.yaml │ ├── k8s-fcos-version-bumps-ca89507d2cf15384.yaml │ ├── k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml │ ├── k8s-improve-floating-ip-enabled-84cd00224d6b7bc1.yaml │ ├── k8s-keystone-auth-6c88c1a2d406fb61.yaml │ ├── k8s-nodes-security-group-9d8dbb91b006d9dd.yaml │ ├── k8s-octavia-ingress-controller-32c0b97031fd0dd4.yaml │ ├── k8s-prometheus-clusterip-b191fa163e3f1125.yaml │ ├── k8s-volumes-az-fix-85ad48998d2c12aa.yaml │ ├── k8s_fedora_atomic_apply_cluster_role-8a46c881de1a1fa3.yaml │ ├── k8s_fedora_protect_kubelet-8468ddcb92c2a624.yaml │ ├── keystone-auth-repo-6970c05f44299326.yaml │ ├── keystone_trustee_interface-6d63b74616dda1d4.yaml │ ├── kubelet-nfs-b51e572adfb56378.yaml │ ├── kubernetes-cloud-config-6c9a4bfec47e3bb4.yaml │ ├── lb-algorithm-36a15eb21fd5c4b1.yaml │ ├── make-keypair-optional-fcf4a17e440d0879.yaml │ ├── master-lb-allowed-cidrs-cc599da4eb96e983.yaml │ ├── merge-labels-9ba7deffc5bb3c7f.yaml │ ├── migrations-1.3.20-60e5f990422f2ca5.yaml │ ├── missing-ip-in-api-address-c25eef757d5336aa.yaml │ ├── monitoring_persistent_storage-c5857fc099bd2f65.yaml │ ├── monitoring_scrape_ca_and_traefik-5544d8dd5ab7c234.yaml │ ├── monitoring_scrape_internal-6697e50f091b0c9c.yaml │ ├── no-cinder-volume-87b9339e066c30a0.yaml │ ├── nodegroup-limit-89930d45ee06c621.yaml │ ├── octavia-provider-3984ee3bf381ced1.yaml │ ├── periodic-logs-use-uuid-65b257ab9c227494.yaml │ ├── podsecuritypolicy-2400063d73524e06.yaml │ ├── pre-delete-all-loadbalancers-350a69ec787e11ea.yaml │ ├── pre-delete-cluster-5e27cfdf45e25805.yaml │ ├── prometheus-adapter-15fba9d739676e70.yaml │ ├── prometheus-operator-compatible-with-k8s-1-16-f8be99cf527075b8.yaml │ ├── quota-api-182cd1bc9e706b17.yaml │ ├── remove-container-endpoint-3494eb8bd2406e87.yaml │ ├── remove-podsecuritypolicy-5851f4009f1a166c.yaml │ ├── remove-send_cluster_metrics-2a09eba8627c7ceb.yaml │ ├── rename-minion-to-node-9d32fe77d765f149.yaml │ ├── resize-api-2bf1fb164484dea9.yaml │ ├── return-clusterid-for-resize-upgrade-6e841c7b568fa807.yaml │ ├── return-server-id-in-kubeminion-cb33f5141e0b7fa9.yaml │ ├── rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml │ ├── rotate-cluster-cert-9f84deb0adf9afb1.yaml │ ├── server-groups-for-both-master-and-workder-bdd491e4323955d4.yaml │ ├── set-traefik-tag-7d4aca5685147970.yaml │ ├── stats-api-68bc66147ac027e6.yaml │ ├── story-2008548-65a571ad15451937.yaml │ ├── strip-ca-certificate-a09d0c31c45973df.yaml │ ├── support-all-tenants-for-admin-a042f5c520d35837.yaml │ ├── support-auto-healing-3e07c16c55209b0a.yaml │ ├── support-auto-healing-controller-333d1266918111e9.yaml │ ├── support-docker-storage-driver-for-fedora-coreos-697ffcc47e7e8359.yaml │ ├── support-dockershim-removal-cad104d069f1a50b.yaml │ ├── support-fedora-atomic-os-upgrade-9f47182b21c6c028.yaml │ ├── support-helm-v3-5c68eca89fc9446b.yaml │ ├── support-multi-dns-server-0528be20f0e6aa62.yaml │ ├── support-octavia-for-k8s-service-d5d7fd041f9d76fa.yaml │ ├── support-policy-and-doc-in-code-0c19e479dbd953c9.yaml │ ├── support-post-install-file-1fe7afe7698dd7b2.yaml │ ├── support-rotate-ca-certs-913a6ef1b571733c.yaml │ ├── support-selinux-mode-5bd2a3ece23a2caa.yaml │ ├── support-sha256-verification-for-hyperkube-fb2292c6a8bb00ba.yaml │ ├── support-updating-k8s-cluster-health-via-api-b8a3cac3031c50a5.yaml │ ├── support-upgrade-on-behalf-of-user-c04994831360f8c1.yaml │ ├── support_nodes_affinity_policy-22253fb9cf6739ec.yaml │ ├── swarm-integration-with-cinder-e3068138a3f75dbe.yaml │ ├── swarm-live-restore-b03ad192367abced.yaml │ ├── sync-service-account-keys-for-multi-masters-71217c4cf4dd472c.yaml │ ├── traefik-compatible-with-k8s-1-16-9a9ef6d3ccc92fb4.yaml │ ├── update-certificate-api-policy-rules-027c80f2c9ff4598.yaml │ ├── update-cloud-provider-openstack-repo-e6209ce2e3986e12.yaml │ ├── update-containerd-version-url-c095c0ee3c1a538b.yaml │ ├── update-flannel-version.yaml │ ├── update-kubernetes-dashboard-5196831c32d55aee.yaml │ ├── update-swarm-73d4340a881bff2f.yaml │ ├── update-to-f27-cc8aa873cdf111bc.yaml │ ├── update-traefik-min-tls-protocol-de7e36de90c1a2f3.yaml │ ├── update_prometheus_monitoring-342a86f826be6579.yaml │ ├── upgrade-api-975233ab93c0c092.yaml │ ├── upgrade-api-heat-removal-300f15d863515257.yaml │ ├── upgrade-calico-6912a6f4fb5c21de.yaml │ ├── upgrade-coredns-25f3879c3a658309.yaml │ ├── upgrade-etcd-and-use-quay-io-coreos-etcd-1cb8e38e974f5975.yaml │ ├── upgrade-flannel-db5ef049e23fc4a8.yaml │ ├── upgrade-to-k8s-v1.11.1-8065fd768873295d.yaml │ ├── upgrade_api-1fecc206e5b0ef99.yaml │ ├── use_podman-39532143be2296c2.yaml │ └── using-vxlan-for-flannel-backend-8d82a290ca97d6e2.yaml └── source │ ├── 2023.1.rst │ ├── 2023.2.rst │ ├── 2024.1.rst │ ├── 2024.2.rst │ ├── 2025.1.rst │ ├── _static │ └── .placeholder │ ├── _templates │ └── .placeholder │ ├── conf.py │ ├── index.rst │ ├── liberty.rst │ ├── locale │ ├── en_GB │ │ └── LC_MESSAGES │ │ │ └── releasenotes.po │ ├── fr │ │ └── LC_MESSAGES │ │ │ └── releasenotes.po │ └── ja │ │ └── LC_MESSAGES │ │ └── releasenotes.po │ ├── mitaka.rst │ ├── newton.rst │ ├── ocata.rst │ ├── pike.rst │ ├── queens.rst │ ├── rocky.rst │ ├── stein.rst │ ├── train.rst │ ├── unreleased.rst │ ├── ussuri.rst │ ├── victoria.rst │ ├── wallaby.rst │ ├── xena.rst │ ├── yoga.rst │ └── zed.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── specs ├── async-container-operation.rst ├── bay-drivers.rst ├── container-networking-model.rst ├── container-volume-integration-model.rst ├── containers-service.rst ├── create-trustee-user-for-each-bay.rst ├── flatten_attributes.rst ├── magnum-horizon-plugin.rst ├── open-dcos.rst ├── resource-quotas.rst ├── stats-api-spec.rst └── tls-support-magnum.rst ├── test-requirements.txt ├── tools ├── cover.sh ├── flake8wrap.sh └── sync │ └── cinder-csi └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = magnum 4 | omit = magnum/tests/* 5 | 6 | [report] 7 | ignore_errors = True 8 | exclude_lines = 9 | pass 10 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.opendev.org 3 | port=29418 4 | project=openstack/magnum.git 5 | -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | # Format is: 2 | # 3 | # 4 | -------------------------------------------------------------------------------- /.stestr.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | test_path=${OS_TEST_PATH:-./magnum/tests/unit} 3 | top_dir=./ 4 | 5 | -------------------------------------------------------------------------------- /api-ref/source/index.rst: -------------------------------------------------------------------------------- 1 | :tocdepth: 2 2 | 3 | ======================================== 4 | Container Infrastructure Management API 5 | ======================================== 6 | 7 | .. rest_expand_all:: 8 | 9 | .. include:: versions.inc 10 | .. include:: urls.inc 11 | .. include:: clusters.inc 12 | .. include:: clustertemplates.inc 13 | .. include:: certificates.inc 14 | .. include:: mservices.inc 15 | .. include:: stats.inc 16 | .. include:: quotas.inc 17 | -------------------------------------------------------------------------------- /api-ref/source/samples/bay-create-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "uuid":"746e779a-751a-456b-a3e9-c883d734946f" 3 | } -------------------------------------------------------------------------------- /api-ref/source/samples/bay-update-req.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "path":"/node_count", 4 | "value":2, 5 | "op":"replace" 6 | } 7 | ] -------------------------------------------------------------------------------- /api-ref/source/samples/baymodel-create-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "labels":{ 3 | 4 | }, 5 | "fixed_subnet":null, 6 | "master_flavor_id":null, 7 | "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", 8 | "https_proxy":"http://10.164.177.169:8080", 9 | "tls_disabled":false, 10 | "keypair_id":"kp", 11 | "public":false, 12 | "http_proxy":"http://10.164.177.169:8080", 13 | "docker_volume_size":3, 14 | "server_type":"vm", 15 | "external_network_id":"public", 16 | "image_id":"fedora-atomic-latest", 17 | "volume_driver":"cinder", 18 | "registry_enabled":false, 19 | "docker_storage_driver":"devicemapper", 20 | "name":"k8s-bm2", 21 | "network_driver":"flannel", 22 | "fixed_network":null, 23 | "coe":"kubernetes", 24 | "flavor_id":"m1.small", 25 | "master_lb_enabled":true, 26 | "dns_nameserver":"8.8.8.8" 27 | } -------------------------------------------------------------------------------- /api-ref/source/samples/baymodel-update-req.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "path":"/master_lb_enabled", 4 | "value":"True", 5 | "op":"replace" 6 | }, 7 | { 8 | "path":"/registry_enabled", 9 | "value":"True", 10 | "op":"replace" 11 | } 12 | ] -------------------------------------------------------------------------------- /api-ref/source/samples/certificates-ca-show-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", 3 | "pem":"-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80GoUs4xDANBgkqhkiG9w0BAQsFADAO\n-----END CERTIFICATE-----\n", 4 | "links":[ 5 | { 6 | "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", 7 | "rel":"self" 8 | }, 9 | { 10 | "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", 11 | "rel":"bookmark" 12 | } 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /api-ref/source/samples/certificates-ca-sign-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", 3 | "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" 4 | } 5 | -------------------------------------------------------------------------------- /api-ref/source/samples/certificates-ca-sign-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "pem":"-----BEGIN CERTIFICATE-----\nMIIDxDCCAqygAwIBAgIRALgUbIjdKUy8lqErJmCxVfkwDQYJKoZIhvcNAQELBQAw\n-----END CERTIFICATE-----\n", 3 | "cluster_uuid":"0b4b766f-1500-44b3-9804-5a6e12fe6df4", 4 | "links":[ 5 | { 6 | "href":"http://10.164.180.104:9511/v1/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", 7 | "rel":"self" 8 | }, 9 | { 10 | "href":"http://10.164.180.104:9511/certificates/0b4b766f-1500-44b3-9804-5a6e12fe6df4", 11 | "rel":"bookmark" 12 | } 13 | ], 14 | "csr":"-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n" 15 | } 16 | -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-create-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "name":"k8s", 3 | "discovery_url":null, 4 | "master_count":2, 5 | "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", 6 | "node_count":2, 7 | "create_timeout":60, 8 | "keypair":"my_keypair", 9 | "master_flavor_id":null, 10 | "labels":{ 11 | }, 12 | "flavor_id":null 13 | } 14 | -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-create-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "uuid":"746e779a-751a-456b-a3e9-c883d734946f" 3 | } -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-get-all-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "clusters":[ 3 | { 4 | "status":"CREATE_IN_PROGRESS", 5 | "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", 6 | "uuid":"731387cf-a92b-4c36-981e-3271d63e5597", 7 | "links":[ 8 | { 9 | "href":"http://10.164.180.104:9511/v1/clusters/731387cf-a92b-4c36-981e-3271d63e5597", 10 | "rel":"self" 11 | }, 12 | { 13 | "href":"http://10.164.180.104:9511/clusters/731387cf-a92b-4c36-981e-3271d63e5597", 14 | "rel":"bookmark" 15 | } 16 | ], 17 | "stack_id":"31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", 18 | "keypair":"my_keypair", 19 | "master_count":1, 20 | "create_timeout":60, 21 | "node_count":1, 22 | "name":"k8s" 23 | } 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-resize-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "node_count": 3, 3 | "nodes_to_remove": ["e74c40e0-d825-11e2-a28f-0800200c9a66"], 4 | "nodegroup": "production_group" 5 | } -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-resize-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "uuid":"746e779a-751a-456b-a3e9-c883d734946f" 3 | } -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-update-req.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "path":"/node_count", 4 | "value":2, 5 | "op":"replace" 6 | } 7 | ] -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-upgrade-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_template": "e74c40e0-d825-11e2-a28f-0800200c9a66", 3 | "max_batch_size": 1, 4 | "nodegroup": "production_group" 5 | } -------------------------------------------------------------------------------- /api-ref/source/samples/cluster-upgrade-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "uuid":"746e779a-751a-456b-a3e9-c883d734946f" 3 | } 4 | -------------------------------------------------------------------------------- /api-ref/source/samples/clustertemplate-create-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "labels":{ 3 | 4 | }, 5 | "fixed_subnet":null, 6 | "master_flavor_id":null, 7 | "no_proxy":"10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", 8 | "https_proxy":"http://10.164.177.169:8080", 9 | "tls_disabled":false, 10 | "keypair_id":"kp", 11 | "public":false, 12 | "http_proxy":"http://10.164.177.169:8080", 13 | "docker_volume_size":3, 14 | "server_type":"vm", 15 | "external_network_id":"public", 16 | "image_id":"fedora-atomic-latest", 17 | "volume_driver":"cinder", 18 | "registry_enabled":false, 19 | "docker_storage_driver":"devicemapper", 20 | "name":"k8s-bm2", 21 | "network_driver":"flannel", 22 | "fixed_network":null, 23 | "coe":"kubernetes", 24 | "flavor_id":"m1.small", 25 | "master_lb_enabled":true, 26 | "dns_nameserver":"8.8.8.8" 27 | } -------------------------------------------------------------------------------- /api-ref/source/samples/clustertemplate-update-req.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "path":"/master_lb_enabled", 4 | "value":"True", 5 | "op":"replace" 6 | }, 7 | { 8 | "path":"/registry_enabled", 9 | "value":"True", 10 | "op":"replace" 11 | } 12 | ] -------------------------------------------------------------------------------- /api-ref/source/samples/mservice-get-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "mservices":[ 3 | { 4 | "binary":"magnum-conductor", 5 | "created_at":"2016-08-23T10:52:13+00:00", 6 | "state":"up", 7 | "report_count":2179, 8 | "updated_at":"2016-08-25T01:13:16+00:00", 9 | "host":"magnum-manager", 10 | "disabled_reason":null, 11 | "id":1 12 | } 13 | ] 14 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-create-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 3 | "resource": "Cluster", 4 | "hard_limit": 10 5 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-create-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "Cluster", 3 | "created_at": "2017-01-17T17:35:48+00:00", 4 | "updated_at": null, 5 | "hard_limit": 1, 6 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 7 | "id": 26 8 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-delete-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 3 | "resource": "Cluster" 4 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-get-all-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "quotas": [ 3 | { 4 | "resource": "Cluster", 5 | "created_at": "2017-01-17T17:35:49+00:00", 6 | "updated_at": "2017-01-17T17:38:21+00:00", 7 | "hard_limit": 10, 8 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 9 | "id": 26 10 | } 11 | ] 12 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-get-one-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "Cluster", 3 | "created_at": "2017-01-17T17:35:49+00:00", 4 | "updated_at": "2017-01-17T17:38:20+00:00", 5 | "hard_limit": 10, 6 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 7 | "id": 26 8 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-update-req.json: -------------------------------------------------------------------------------- 1 | { 2 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 3 | "resource": "Cluster", 4 | "hard_limit": 10 5 | } -------------------------------------------------------------------------------- /api-ref/source/samples/quota-update-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "resource": "Cluster", 3 | "created_at": "2017-01-17T17:35:49+00:00", 4 | "updated_at": "2017-01-17T17:38:20+00:00", 5 | "hard_limit": 10, 6 | "project_id": "aa5436ab58144c768ca4e9d2e9f5c3b2", 7 | "id": 26 8 | } -------------------------------------------------------------------------------- /api-ref/source/samples/stats-get-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "clusters": 1, 3 | "nodes": 2 4 | } 5 | -------------------------------------------------------------------------------- /api-ref/source/samples/versions-get-resp.json: -------------------------------------------------------------------------------- 1 | { 2 | "versions":[ 3 | { 4 | "status":"CURRENT", 5 | "min_version":"1.1", 6 | "max_version":"1.4", 7 | "id":"v1", 8 | "links":[ 9 | { 10 | "href":"http://10.164.180.104:9511/v1/", 11 | "rel":"self" 12 | } 13 | ] 14 | } 15 | ], 16 | "name":"OpenStack Magnum API", 17 | "description":"Magnum is an OpenStack project which aims to provide container management." 18 | } -------------------------------------------------------------------------------- /bindep.txt: -------------------------------------------------------------------------------- 1 | # This is a cross-platform list tracking distribution packages needed by tests; 2 | # see http://docs.openstack.org/infra/bindep/ for additional information. 3 | graphviz [doc test] 4 | 5 | # PDF Docs package dependencies 6 | tex-gyre [doc platform:dpkg] 7 | -------------------------------------------------------------------------------- /contrib/drivers/k8s_opensuse_v1/README.md: -------------------------------------------------------------------------------- 1 | # Magnum openSUSE K8s driver 2 | 3 | This is openSUSE Kubernetes driver for Magnum, which allow to deploy Kubernetes cluster on openSUSE. 4 | 5 | ## Installation 6 | 7 | ### 1. Install the openSUSE K8s driver in Magnum 8 | 9 | - To install the driver, from this directory run: 10 | 11 | `python ./setup.py install` 12 | 13 | ### 2. Enable driver in magnum.conf 14 | 15 | enabled_definitions = ...,magnum_vm_opensuse_k8s 16 | 17 | ### 2. Restart Magnum 18 | 19 | Both Magnum services has to restarted `magnum-api` and `magnum-conductor` 20 | -------------------------------------------------------------------------------- /contrib/drivers/k8s_opensuse_v1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/contrib/drivers/k8s_opensuse_v1/__init__.py -------------------------------------------------------------------------------- /contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/sysconfig/heat-params 4 | 5 | myip="$KUBE_NODE_IP" 6 | 7 | sed -i ' 8 | /ETCD_NAME=/c ETCD_NAME="'$myip'" 9 | /ETCD_DATA_DIR=/c ETCD_DATA_DIR="/var/lib/etcd/default.etcd" 10 | /ETCD_LISTEN_CLIENT_URLS=/c ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" 11 | /ETCD_LISTEN_PEER_URLS=/c ETCD_LISTEN_PEER_URLS="http://'$myip':2380" 12 | /ETCD_ADVERTISE_CLIENT_URLS=/c ETCD_ADVERTISE_CLIENT_URLS="http://'$myip':2379" 13 | /ETCD_INITIAL_ADVERTISE_PEER_URLS=/c ETCD_INITIAL_ADVERTISE_PEER_URLS="http://'$myip':2380" 14 | /ETCD_DISCOVERY=/c ETCD_DISCOVERY="'$ETCD_DISCOVERY_URL'" 15 | ' /etc/sysconfig/etcd 16 | 17 | echo "activating etcd service" 18 | systemctl enable etcd 19 | 20 | echo "starting etcd service" 21 | systemctl --no-block start etcd 22 | -------------------------------------------------------------------------------- /contrib/drivers/k8s_opensuse_v1/templates/fragments/configure-flanneld-minion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/sysconfig/heat-params 4 | 5 | if [ "$NETWORK_DRIVER" != "flannel" ]; then 6 | exit 0 7 | fi 8 | 9 | sed -i ' 10 | /^FLANNEL_ETCD_ENDPOINTS=/ s|=.*|="http://'"$ETCD_SERVER_IP"':2379"| 11 | /^#FLANNEL_OPTIONS=/ s//FLANNEL_OPTIONS="-iface eth0 --ip-masq"/ 12 | ' /etc/sysconfig/flanneld 13 | 14 | cat >> /etc/sysconfig/flanneld <=2.2.1 # Apache-2.0 2 | osprofiler>=1.4.0 # Apache-2.0 3 | os-api-ref>=1.4.0 # Apache-2.0 4 | sphinx>=2.0.0,!=2.1.0 # BSD 5 | reno>=3.1.0 # Apache-2.0 6 | -------------------------------------------------------------------------------- /doc/source/admin/index.rst: -------------------------------------------------------------------------------- 1 | Administrator's Guide 2 | ===================== 3 | 4 | Installation & Operations 5 | ------------------------- 6 | 7 | If you are a system administrator running Magnum, this section contains 8 | information that should help you understand how to deploy, operate, and upgrade 9 | the services. 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Magnum Proxy 15 | gmr 16 | Troubleshooting FAQ 17 | 18 | Configuration 19 | ------------- 20 | 21 | Following pages will be helpful in configuring specific aspects 22 | of Magnum that may or may not be suitable to every situation. 23 | 24 | .. toctree:: 25 | :maxdepth: 1 26 | 27 | configuring 28 | -------------------------------------------------------------------------------- /doc/source/cli/index.rst: -------------------------------------------------------------------------------- 1 | Magnum CLI Documentation 2 | ======================== 3 | 4 | In this section you will find information on Magnum’s command line 5 | interface. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | magnum-status 11 | -------------------------------------------------------------------------------- /doc/source/configuration/index.rst: -------------------------------------------------------------------------------- 1 | Sample Configuration and Policy File 2 | ------------------------------------ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | sample-config.rst 8 | sample-policy.rst 9 | samples/index.rst 10 | -------------------------------------------------------------------------------- /doc/source/configuration/sample-config.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Magnum Configuration Options 3 | ============================ 4 | 5 | The following is a sample Magnum configuration for adaptation and use. It is 6 | auto-generated from Magnum when this documentation is built, so 7 | if you are having issues with an option, please compare your version of 8 | Magnum with the version of this documentation. 9 | 10 | .. only:: html 11 | 12 | The sample configuration can also be viewed in :download:`file form 13 | `. 14 | 15 | .. literalinclude:: /_static/magnum.conf.sample 16 | 17 | .. only:: latex 18 | 19 | See the online version of this documentation for the full example config 20 | file. 21 | -------------------------------------------------------------------------------- /doc/source/configuration/sample-policy.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Policy configuration 3 | ==================== 4 | 5 | Configuration 6 | ~~~~~~~~~~~~~ 7 | 8 | .. warning:: 9 | 10 | JSON formatted policy file is deprecated since Magnum 12.0.0 (Wallaby). 11 | This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing 12 | JSON-formatted policy file to YAML in a backward-compatible way. 13 | 14 | .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html 15 | 16 | The following is an overview of all available policies in Magnum. For a sample 17 | configuration file, refer to :doc:`samples/policy-yaml`. 18 | 19 | .. show-policy:: 20 | :config-file: ../../etc/magnum/magnum-policy-generator.conf 21 | -------------------------------------------------------------------------------- /doc/source/configuration/samples/index.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Sample configuration files 3 | ========================== 4 | 5 | Configuration files can alter how Magnum behaves at runtime and by default 6 | are located in ``/etc/magnum/``. Links to sample configuration files can be 7 | found below: 8 | 9 | .. toctree:: 10 | 11 | policy-yaml.rst 12 | -------------------------------------------------------------------------------- /doc/source/configuration/samples/policy-yaml.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | policy.yaml 3 | =========== 4 | 5 | .. warning:: 6 | 7 | JSON formatted policy file is deprecated since Magnum 12.0.0 (Wallaby). 8 | This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing 9 | JSON-formatted policy file to YAML in a backward-compatible way. 10 | 11 | .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html 12 | 13 | Use the ``policy.yaml`` file to define additional access controls that apply to 14 | the Container Infrastructure Management service: 15 | 16 | .. literalinclude:: ../../_static/magnum.policy.yaml.sample 17 | 18 | -------------------------------------------------------------------------------- /doc/source/contributor/api-microversion-history.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../../magnum/api/rest_api_version_history.rst 2 | -------------------------------------------------------------------------------- /doc/source/contributor/contributing.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../../CONTRIBUTING.rst 2 | -------------------------------------------------------------------------------- /doc/source/contributor/index.rst: -------------------------------------------------------------------------------- 1 | Contributor's Guide 2 | =================== 3 | 4 | Getting Started 5 | --------------- 6 | 7 | If you are new to Magnum, this section contains information that should help 8 | you get started as a developer working on the project or contributing to the 9 | project. 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Developer Contribution Guide 15 | Setting Up Your Development Environment 16 | Running Tempest Tests 17 | Developer Troubleshooting Guide 18 | 19 | There are some other important documents also that helps new contributors to 20 | contribute effectively towards code standards to the project. 21 | 22 | .. toctree:: 23 | :maxdepth: 1 24 | 25 | Writing a Release Note 26 | Adding a New API Method 27 | Changing Magnum DB Objects 28 | api-microversion-history 29 | policies 30 | -------------------------------------------------------------------------------- /doc/source/images/MagnumVolumeIntegration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/doc/source/images/MagnumVolumeIntegration.png -------------------------------------------------------------------------------- /doc/source/images/cluster-create.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/doc/source/images/cluster-create.png -------------------------------------------------------------------------------- /doc/source/images/cluster-template-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/doc/source/images/cluster-template-details.png -------------------------------------------------------------------------------- /doc/source/images/cluster-template.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/doc/source/images/cluster-template.png -------------------------------------------------------------------------------- /doc/source/install/common/configure_3_populate_database.rst: -------------------------------------------------------------------------------- 1 | 3. Populate Magnum database: 2 | 3 | .. code-block:: console 4 | 5 | # su -s /bin/sh -c "magnum-db-manage upgrade" magnum 6 | -------------------------------------------------------------------------------- /doc/source/install/get_started.rst: -------------------------------------------------------------------------------- 1 | ==================================================== 2 | Container Infrastructure Management service overview 3 | ==================================================== 4 | 5 | The Container Infrastructure Management service consists of the 6 | following components: 7 | 8 | ``magnum`` command-line client 9 | A CLI that communicates with the ``magnum-api`` to create and manage 10 | container clusters. End developers can directly use the magnum 11 | REST API. 12 | 13 | ``magnum-api`` service 14 | An OpenStack-native REST API that processes API requests by sending 15 | them to the ``magnum-conductor`` via AMQP. 16 | 17 | ``magnum-conductor`` service 18 | Runs on a controller machine and connects to heat to orchestrate a 19 | cluster. Additionally, it connects to a Kubernetes API endpoint. 20 | -------------------------------------------------------------------------------- /doc/source/install/index.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Magnum Installation Guide 3 | ========================= 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | get_started.rst 9 | install.rst 10 | verify.rst 11 | launch-instance.rst 12 | next-steps.rst 13 | 14 | The Container Infrastructure Management service codenamed (magnum) is an 15 | OpenStack API service developed by the OpenStack Containers Team making 16 | container orchestration engines (COE) such as Kubernetes 17 | available as first class resources in OpenStack. Magnum uses 18 | Heat to orchestrate an OS image which contains Docker and Kubernetes and 19 | runs that image in either virtual machines or bare metal in a cluster 20 | configuration. 21 | 22 | This chapter assumes a working setup of OpenStack following `OpenStack 23 | Installation Tutorial `_. 24 | -------------------------------------------------------------------------------- /doc/source/install/install-debian-manual.rst: -------------------------------------------------------------------------------- 1 | .. _install-debian-manual: 2 | 3 | Install and configure for Debian 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | This section describes how to install and configure the Container 7 | Infrastructure Management service for Debian. 8 | 9 | .. include:: common/prerequisites.rst 10 | 11 | Install and configure components 12 | -------------------------------- 13 | 14 | #. Install the common and library packages: 15 | 16 | .. code-block:: console 17 | 18 | # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor 19 | 20 | .. include:: common/configure_2_edit_magnum_conf.rst 21 | 22 | .. include:: common/configure_3_populate_database.rst 23 | 24 | Finalize installation 25 | --------------------- 26 | 27 | * Restart the Container Infrastructure Management services: 28 | 29 | .. code-block:: console 30 | 31 | # service magnum-api restart 32 | # service magnum-conductor restart 33 | -------------------------------------------------------------------------------- /doc/source/install/install-ubuntu.rst: -------------------------------------------------------------------------------- 1 | .. _install-ubuntu: 2 | 3 | Install and configure for Ubuntu 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | This section describes how to install and configure the Container 7 | Infrastructure Management service for Ubuntu 14.04 (LTS). 8 | 9 | .. include:: common/prerequisites.rst 10 | 11 | Install and configure components 12 | -------------------------------- 13 | 14 | #. Install the common and library packages: 15 | 16 | .. code-block:: console 17 | 18 | # DEBIAN_FRONTEND=noninteractive apt-get install magnum-api magnum-conductor python3-magnumclient 19 | 20 | .. include:: common/configure_2_edit_magnum_conf.rst 21 | 22 | .. include:: common/configure_3_populate_database.rst 23 | 24 | Finalize installation 25 | --------------------- 26 | 27 | * Restart the Container Infrastructure Management services: 28 | 29 | .. code-block:: console 30 | 31 | # service magnum-api restart 32 | # service magnum-conductor restart 33 | -------------------------------------------------------------------------------- /doc/source/install/next-steps.rst: -------------------------------------------------------------------------------- 1 | .. _next-steps: 2 | 3 | Next steps 4 | ~~~~~~~~~~ 5 | 6 | Your OpenStack environment now includes the magnum service. 7 | 8 | To add more services, see the `additional documentation on installing OpenStack 9 | `_ . 10 | -------------------------------------------------------------------------------- /doc/source/user/heat-templates.rst: -------------------------------------------------------------------------------- 1 | Heat Stack Templates are what Magnum passes to Heat to generate a cluster. For 2 | each ClusterTemplate resource in Magnum, a Heat stack is created to arrange all 3 | of the cloud resources needed to support the container orchestration 4 | environment. These Heat stack templates provide a mapping of Magnum object 5 | attributes to Heat template parameters, along with Magnum consumable stack 6 | outputs. Magnum passes the Heat Stack Template to the Heat service to create a 7 | Heat stack. The result is a full Container Orchestration Environment. 8 | 9 | .. list-plugins:: magnum.template_definitions 10 | :detailed: 11 | -------------------------------------------------------------------------------- /dockerfiles/cluster-autoscaler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.4 as builder 2 | 3 | ARG AUTOSCALER_VERSION 4 | 5 | ENV GOPATH=/go 6 | 7 | WORKDIR $GOPATH/src/k8s.io/ 8 | RUN git clone -b ${AUTOSCALER_VERSION} --single-branch http://github.com/kubernetes/autoscaler.git autoscaler 9 | WORKDIR autoscaler/cluster-autoscaler 10 | RUN CGO_ENABLED=0 GO111MODULE=off GOOS=linux go build -o cluster-autoscaler --ldflags=-s --tags magnum 11 | 12 | FROM gcr.io/distroless/static:latest 13 | 14 | COPY --from=builder /go/src/k8s.io/autoscaler/cluster-autoscaler/cluster-autoscaler /cluster-autoscaler 15 | CMD ["/cluster-autoscaler"] 16 | -------------------------------------------------------------------------------- /dockerfiles/heat-container-agent/launch: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /opt/heat-container-agent/configure_container_agent.sh 4 | 5 | export LC_ALL=C 6 | 7 | exec os-collect-config --debug 8 | -------------------------------------------------------------------------------- /dockerfiles/heat-container-agent/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "defaultValues": {}, 3 | "version": "1.0" 4 | } -------------------------------------------------------------------------------- /dockerfiles/heat-container-agent/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Heat Container Agent system image 3 | 4 | [Service] 5 | ExecStart=$EXEC_START 6 | ExecStop=$EXEC_STOP 7 | WorkingDirectory=$DESTDIR 8 | Restart=always 9 | StartLimitInterval=0 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /dockerfiles/heat-container-agent/tmpfiles.template: -------------------------------------------------------------------------------- 1 | d /var/lib/heat-container-agent - - - - - 2 | Z /var/lib/heat-container-agent - - - - - 3 | d /var/run/heat-config - - - - - 4 | Z /var/run/heat-config - - - - - 5 | d /var/run/os-collect-config - - - - - 6 | Z /var/run/os-collect-config - - - - - 7 | d /opt/stack/os-config-refresh - - - - - 8 | Z /opt/stack/os-config-refresh - - - - - 9 | d /srv/magnum - - - - - 10 | Z /srv/magnum - - - - - 11 | -------------------------------------------------------------------------------- /dockerfiles/helm-client/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG HELM_VERSION=v3.2.0 2 | FROM debian:buster-slim 3 | 4 | ARG HELM_VERSION 5 | 6 | RUN apt-get update \ 7 | && apt-get install -y \ 8 | curl \ 9 | bash \ 10 | && curl -o helm.tar.gz https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz \ 11 | && mkdir -p helm \ 12 | && tar zxvf helm.tar.gz -C helm \ 13 | && cp helm/linux-amd64/helm /usr/local/bin \ 14 | && chmod +x /usr/local/bin/helm \ 15 | && rm -rf helm* 16 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-apiserver/apiserver: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure the kube-apiserver 5 | # 6 | 7 | # The address on the local server to listen to. 8 | KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" 9 | 10 | # The port on the local server to listen on. 11 | # KUBE_API_PORT="--port=8080" 12 | 13 | # Port minions listen on 14 | # KUBELET_PORT="--kubelet-port=10250" 15 | 16 | # Comma separated list of nodes in the etcd cluster 17 | KUBE_ETCD_SERVERS="--etcd-servers=http://127.0.0.1:2379,http://127.0.0.1:4001" 18 | 19 | # Address range to use for services 20 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16" 21 | 22 | # default admission control policies 23 | KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" 24 | 25 | # Add your own! 26 | KUBE_API_ARGS="" 27 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-apiserver/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=0" 17 | 18 | # How the controller-manager, scheduler, and proxy find the apiserver 19 | KUBE_MASTER="--master=http://127.0.0.1:8080" 20 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-apiserver/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/kubernetes/apiserver 4 | . /etc/kubernetes/config 5 | 6 | ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_ETCD_SERVERS $KUBE_API_ADDRESS $KUBE_API_PORT $KUBELET_PORT $KUBE_ALLOW_PRIV $KUBE_SERVICE_ADDRESSES $KUBE_ADMISSION_CONTROL $KUBE_API_ARGS" 7 | 8 | ARGS=$(echo $ARGS | sed s#--tls-ca-file=/etc/kubernetes/certs/ca.crt##) 9 | # KubeletPluginsWatcher=true, 10 | ARGS=$(echo $ARGS | sed s/KubeletPluginsWatcher=true,//) 11 | 12 | exec /usr/local/bin/kube-apiserver $ARGS 13 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-apiserver/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubernetes-apiserver 3 | After=network-online.target 4 | 5 | [Service] 6 | ExecStart=$EXEC_START 7 | ExecStop=$EXEC_STOP 8 | WorkingDirectory=$DESTDIR 9 | Restart=always 10 | StartLimitInterval=0 11 | RestartSec=10 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-apiserver/sources: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/dockerfiles/kubernetes-apiserver/sources -------------------------------------------------------------------------------- /dockerfiles/kubernetes-controller-manager/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=0" 17 | 18 | # How the controller-manager, scheduler, and proxy find the apiserver 19 | KUBE_MASTER="--master=http://127.0.0.1:8080" 20 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-controller-manager/controller-manager: -------------------------------------------------------------------------------- 1 | ### 2 | # The following values are used to configure the kubernetes controller-manager 3 | 4 | # defaults from config and apiserver should be adequate 5 | 6 | # Add your own! 7 | KUBE_CONTROLLER_MANAGER_ARGS="" 8 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-controller-manager/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/kubernetes/controller-manager 4 | . /etc/kubernetes/config 5 | 6 | ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_CONTROLLER_MANAGER_ARGS" 7 | 8 | ARGS="${ARGS} --secure-port=0" 9 | # KubeletPluginsWatcher=true, 10 | ARGS=$(echo $ARGS | sed s/KubeletPluginsWatcher=true,//) 11 | 12 | exec /usr/local/bin/kube-controller-manager $ARGS 13 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-controller-manager/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubernetes-controller-manager 3 | 4 | [Service] 5 | ExecStart=$EXEC_START 6 | ExecStop=$EXEC_STOP 7 | WorkingDirectory=$DESTDIR 8 | Restart=always 9 | StartLimitInterval=0 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | 15 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-controller-manager/sources: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/dockerfiles/kubernetes-controller-manager/sources -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=0" 17 | 18 | # How the controller-manager, scheduler, and proxy find the apiserver 19 | KUBE_MASTER="--master=http://127.0.0.1:8080" 20 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet (minion) config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--address=127.0.0.1" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override=127.0.0.1" 12 | 13 | # Edit the kubelet.kubeconfig to have correct cluster server address 14 | KUBELET_KUBECONFIG=/etc/kubernetes/kubelet.kubeconfig 15 | 16 | # Add your own! 17 | KUBELET_ARGS="--cgroup-driver=systemd --fail-swap-on=false" 18 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/kubernetes/kubelet 4 | . /etc/kubernetes/config 5 | 6 | TEMP_KUBELET_ARGS='--cgroups-per-qos=false --enforce-node-allocatable=' 7 | 8 | ARGS="$@ $TEMP_KUBELET_ARGS $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBELET_API_SERVER $KUBELET_ADDRESS $KUBELET_PORT $KUBELET_HOSTNAME $KUBE_ALLOW_PRIV $KUBELET_ARGS" 9 | 10 | ARGS=$(echo $ARGS | sed s/--cadvisor-port=0//) 11 | ARGS=$(echo $ARGS | sed s/--require-kubeconfig//) 12 | ARGS=$(echo $ARGS | sed s/node-role/node/) 13 | 14 | exec /hyperkube kubelet $ARGS --containerized 15 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0", 3 | "defaultValues": { 4 | "ADDTL_MOUNTS": "" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubernetes-kubelet 3 | After=docker.service 4 | 5 | [Service] 6 | ExecStart=$EXEC_START 7 | ExecStop=$EXEC_STOP 8 | WorkingDirectory=$DESTDIR 9 | Restart=always 10 | StartLimitInterval=0 11 | RestartSec=10 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/sources: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/dockerfiles/kubernetes-kubelet/sources -------------------------------------------------------------------------------- /dockerfiles/kubernetes-kubelet/tmpfiles.template: -------------------------------------------------------------------------------- 1 | d ${STATE_DIRECTORY}/kubelet - - - - - 2 | d /var/lib/cni - - - - - 3 | d /var/run/secrets - - - - - 4 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-proxy/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=0" 17 | 18 | # How the controller-manager, scheduler, and proxy find the apiserver 19 | KUBE_MASTER="--master=http://127.0.0.1:8080" 20 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-proxy/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/kubernetes/proxy 4 | . /etc/kubernetes/config 5 | 6 | ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_PROXY_ARGS" 7 | 8 | exec /usr/local/bin/kube-proxy $ARGS 9 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-proxy/proxy: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes proxy config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_PROXY_ARGS="" 8 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-proxy/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubernetes-proxy 3 | 4 | [Service] 5 | ExecStart=$EXEC_START 6 | ExecStop=$EXEC_STOP 7 | WorkingDirectory=$DESTDIR 8 | Restart=always 9 | StartLimitInterval=0 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | 15 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-proxy/sources: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/dockerfiles/kubernetes-proxy/sources -------------------------------------------------------------------------------- /dockerfiles/kubernetes-scheduler/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v=0" 17 | 18 | # How the controller-manager, scheduler, and proxy find the apiserver 19 | KUBE_MASTER="--master=http://127.0.0.1:8080" 20 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-scheduler/launch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | . /etc/kubernetes/scheduler 4 | . /etc/kubernetes/config 5 | 6 | ARGS="$@ $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_MASTER $KUBE_SCHEDULER_ARGS" 7 | 8 | exec /usr/local/bin/kube-scheduler $ARGS 9 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-scheduler/scheduler: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes scheduler config 3 | 4 | # default config should be adequate 5 | 6 | # Add your own! 7 | KUBE_SCHEDULER_ARGS="" 8 | -------------------------------------------------------------------------------- /dockerfiles/kubernetes-scheduler/service.template: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubernetes-scheduler 3 | 4 | [Service] 5 | ExecStart=$EXEC_START 6 | ExecStop=$EXEC_STOP 7 | WorkingDirectory=$DESTDIR 8 | Restart=always 9 | StartLimitInterval=0 10 | RestartSec=10 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | 15 | -------------------------------------------------------------------------------- /etc/magnum/README-magnum.conf.txt: -------------------------------------------------------------------------------- 1 | To generate the sample magnum.conf file, run the following 2 | command from the top level of the magnum directory: 3 | 4 | tox -egenconfig 5 | 6 | -------------------------------------------------------------------------------- /etc/magnum/magnum-config-generator.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | output_file = etc/magnum/magnum.conf.sample 3 | wrap_width = 79 4 | 5 | namespace = magnum.conf 6 | namespace = oslo.concurrency 7 | namespace = oslo.db 8 | namespace = oslo.log 9 | namespace = oslo.messaging 10 | namespace = oslo.middleware.cors 11 | namespace = oslo.policy 12 | namespace = oslo.reports 13 | namespace = oslo.service.periodic_task 14 | namespace = oslo.service.service 15 | namespace = oslo.versionedobjects 16 | namespace = keystonemiddleware.auth_token 17 | -------------------------------------------------------------------------------- /etc/magnum/magnum-policy-generator.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | output_file = etc/magnum/policy.yaml.sample 3 | namespace = magnum -------------------------------------------------------------------------------- /functional_creds.conf.sample: -------------------------------------------------------------------------------- 1 | # Credentials for functional testing 2 | [auth] 3 | auth_url = http://127.0.0.1:5000/v3 4 | magnum_url = http://127.0.0.1:9511/v1 5 | username = demo 6 | project_name = demo 7 | project_domain_id = default 8 | user_domain_id = default 9 | password = password 10 | auth_version = v3 11 | insecure=False 12 | [admin] 13 | user = admin 14 | project_name = admin 15 | pass = password 16 | project_domain_id = default 17 | user_domain_id = default 18 | [magnum] 19 | image_id = fedora-atomic-latest 20 | nic_id = public 21 | keypair_id = default 22 | flavor_id = s1.magnum 23 | master_flavor_id = m1.magnum 24 | -------------------------------------------------------------------------------- /magnum/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | import threading 14 | 15 | import pbr.version 16 | 17 | 18 | __version__ = pbr.version.VersionInfo( 19 | 'magnum').version_string() 20 | 21 | # Make a project global TLS trace storage repository 22 | TLS = threading.local() 23 | -------------------------------------------------------------------------------- /magnum/api/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | import paste.urlmap 13 | 14 | 15 | def root_app_factory(loader, global_conf, **local_conf): 16 | return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf) 17 | -------------------------------------------------------------------------------- /magnum/api/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/api/controllers/__init__.py -------------------------------------------------------------------------------- /magnum/api/expose.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); 2 | # you may not use this file except in compliance with the License. 3 | # You may obtain a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | import wsmeext.pecan as wsme_pecan 14 | 15 | 16 | def expose(*args, **kwargs): 17 | """Ensure that only JSON, and not XML, is supported.""" 18 | if 'rest_content_types' not in kwargs: 19 | kwargs['rest_content_types'] = ('json',) 20 | return wsme_pecan.wsexpose(*args, **kwargs) 21 | -------------------------------------------------------------------------------- /magnum/api/middleware/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from magnum.api.middleware import auth_token 14 | from magnum.api.middleware import parsable_error 15 | 16 | 17 | AuthTokenMiddleware = auth_token.AuthTokenMiddleware 18 | ParsableErrorMiddleware = parsable_error.ParsableErrorMiddleware 19 | 20 | __all__ = (AuthTokenMiddleware, 21 | ParsableErrorMiddleware) 22 | -------------------------------------------------------------------------------- /magnum/cmd/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Fujitsu Ltd. 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | # NOTE(hieulq): we monkey patch all eventlet services for easier tracking/debug 17 | 18 | import eventlet 19 | 20 | eventlet.monkey_patch() 21 | -------------------------------------------------------------------------------- /magnum/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/common/__init__.py -------------------------------------------------------------------------------- /magnum/common/x509/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/common/x509/__init__.py -------------------------------------------------------------------------------- /magnum/conductor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/conductor/__init__.py -------------------------------------------------------------------------------- /magnum/conductor/handlers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/conductor/handlers/__init__.py -------------------------------------------------------------------------------- /magnum/conductor/handlers/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/conductor/handlers/common/__init__.py -------------------------------------------------------------------------------- /magnum/conductor/tasks/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 NEC Corporation. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import taskflow.task as task 16 | 17 | 18 | class OSBaseTask(task.Task): 19 | def __init__(self, os_client, name=None, **kwargs): 20 | self.os_client = os_client 21 | 22 | super(OSBaseTask, self).__init__(name=name, **kwargs) 23 | -------------------------------------------------------------------------------- /magnum/conf/profiler.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not 2 | # use this file except in compliance with the License. You may obtain a copy 3 | # of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, 9 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 | # See the License for the specific language governing permissions and 11 | # limitations under the License. 12 | 13 | from oslo_utils import importutils 14 | 15 | 16 | profiler_opts = importutils.try_import('osprofiler.opts') 17 | 18 | 19 | def register_opts(conf): 20 | if profiler_opts: 21 | profiler_opts.set_defaults(conf) 22 | 23 | 24 | def list_opts(): 25 | return { 26 | profiler_opts._profiler_opt_group: profiler_opts._PROFILER_OPTS 27 | } 28 | -------------------------------------------------------------------------------- /magnum/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/db/__init__.py -------------------------------------------------------------------------------- /magnum/db/sqlalchemy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/db/sqlalchemy/__init__.py -------------------------------------------------------------------------------- /magnum/db/sqlalchemy/alembic/README: -------------------------------------------------------------------------------- 1 | Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation 2 | 3 | To create alembic migrations use: 4 | $ magnum-db-manage revision --message "description of revision" --autogenerate 5 | 6 | Stamp db with most recent migration version, without actually running migrations 7 | $ magnum-db-manage stamp head 8 | 9 | Upgrade can be performed by: 10 | $ magnum-db-manage upgrade 11 | $ magnum-db-manage upgrade head 12 | -------------------------------------------------------------------------------- /magnum/db/sqlalchemy/alembic/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | 9 | # revision identifiers, used by Alembic. 10 | revision = ${repr(up_revision)} 11 | down_revision = ${repr(down_revision)} 12 | 13 | from alembic import op 14 | import sqlalchemy as sa 15 | ${imports if imports else ""} 16 | 17 | def upgrade(): 18 | ${upgrades if upgrades else "pass"} 19 | -------------------------------------------------------------------------------- /magnum/drivers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/drivers/__init__.py -------------------------------------------------------------------------------- /magnum/drivers/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/drivers/common/__init__.py -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/disable_floating_ip.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to disable FloatingIP in a Kubernetes cluster by mapping 2 | # FloatingIP-related resource types to OS::Heat::None 3 | resource_registry: 4 | "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_private.yaml" 5 | 6 | # kubemaster.yaml 7 | "Magnum::Optional::KubeMaster::Neutron::FloatingIP": "OS::Heat::None" 8 | 9 | # kubeminion.yaml 10 | "Magnum::Optional::KubeMinion::Neutron::FloatingIP": "OS::Heat::None" 11 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/disable_lb_floating_ip.yaml: -------------------------------------------------------------------------------- 1 | # disables the use of floating ip on the load balancer 2 | 3 | resource_registry: 4 | "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Heat::None" 5 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/enable_floating_ip.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_public.yaml" 3 | 4 | # kubemaster.yaml 5 | "Magnum::Optional::KubeMaster::Neutron::FloatingIP": "OS::Neutron::FloatingIP" 6 | 7 | # kubeminion.yaml 8 | "Magnum::Optional::KubeMinion::Neutron::FloatingIP": "OS::Neutron::FloatingIP" 9 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/enable_lb_floating_ip.yaml: -------------------------------------------------------------------------------- 1 | # enables the use of floating ip on the load balancer 2 | 3 | resource_registry: 4 | "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Neutron::FloatingIP" 5 | 6 | "Magnum::FloatingIPAddressSwitcher": "../fragments/floating_ip_address_switcher_public.yaml" 7 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/no_etcd_volume.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to not use a cinder volume for etcd storage 2 | resource_registry: 3 | "Magnum::Optional::Etcd::Volume": "OS::Heat::None" 4 | "Magnum::Optional::Etcd::VolumeAttachment": "OS::Heat::None" 5 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/no_master_lb.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to disable LBaaS in a cluster by mapping 2 | # LBaaS-related resource types to OS::Heat::None 3 | resource_registry: 4 | "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_master.yaml 5 | 6 | # Cluster template 7 | "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Heat::None" 8 | "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Heat::None" 9 | "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Heat::None" 10 | "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Heat::None" 11 | "Magnum::Optional::Neutron::LBaaS::FloatingIP": "OS::Heat::None" 12 | 13 | # Master node template 14 | "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Heat::None" 15 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/no_private_network.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | "Magnum::NetworkSwitcher": ../fragments/network_switcher_existing.yaml 3 | 4 | # Cluster template 5 | "Magnum::Optional::Neutron::Subnet": "OS::Heat::None" 6 | "Magnum::Optional::Neutron::Net": "OS::Heat::None" 7 | "Magnum::Optional::Neutron::Router": "OS::Heat::None" 8 | "Magnum::Optional::Neutron::RouterInterface": "OS::Heat::None" 9 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/no_volume.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to NOT use a cinder volume to store containers 2 | resource_registry: 3 | "Magnum::Optional::Cinder::Volume": "OS::Heat::None" 4 | "Magnum::Optional::Cinder::VolumeAttachment": "OS::Heat::None" 5 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/with_etcd_volume.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to use a volume for etcd storage 2 | resource_registry: 3 | "Magnum::Optional::Etcd::Volume": "OS::Cinder::Volume" 4 | "Magnum::Optional::Etcd::VolumeAttachment": "OS::Cinder::VolumeAttachment" 5 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/with_master_lb.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to enable LBaaS in a cluster by mapping 2 | # LBaaS-related resource types to the real LBaaS resource types. 3 | resource_registry: 4 | "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_pool.yaml 5 | 6 | # Cluster template 7 | "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Neutron::LBaaS::LoadBalancer" 8 | "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Neutron::LBaaS::Listener" 9 | "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Neutron::LBaaS::Pool" 10 | "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Neutron::LBaaS::HealthMonitor" 11 | 12 | # Master node template 13 | "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Neutron::LBaaS::PoolMember" 14 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/with_master_lb_octavia.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to enable LBaaS in a cluster by mapping 2 | # LBaaS-related resource types to the real Octavia resource types. 3 | resource_registry: 4 | "Magnum::ApiGatewaySwitcher": ../fragments/api_gateway_switcher_pool.yaml 5 | 6 | # Cluster template 7 | "Magnum::Optional::Neutron::LBaaS::LoadBalancer": "OS::Octavia::LoadBalancer" 8 | "Magnum::Optional::Neutron::LBaaS::Listener": "OS::Octavia::Listener" 9 | "Magnum::Optional::Neutron::LBaaS::Pool": "OS::Octavia::Pool" 10 | "Magnum::Optional::Neutron::LBaaS::HealthMonitor": "OS::Octavia::HealthMonitor" 11 | 12 | # Master node template 13 | "Magnum::Optional::Neutron::LBaaS::PoolMember": "OS::Octavia::PoolMember" 14 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/with_private_network.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | "Magnum::NetworkSwitcher": ../fragments/network_switcher_private.yaml 3 | 4 | # Cluster template 5 | "Magnum::Optional::Neutron::Subnet": "OS::Neutron::Subnet" 6 | "Magnum::Optional::Neutron::Net": "OS::Neutron::Net" 7 | "Magnum::Optional::Neutron::Router": "OS::Neutron::Router" 8 | "Magnum::Optional::Neutron::RouterInterface": "OS::Neutron::RouterInterface" 9 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/environments/with_volume.yaml: -------------------------------------------------------------------------------- 1 | # Environment file to use a cinder volume to store containers 2 | resource_registry: 3 | "Magnum::Optional::Cinder::Volume": "OS::Cinder::Volume" 4 | "Magnum::Optional::Cinder::VolumeAttachment": "OS::Cinder::VolumeAttachment" 5 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/api_gateway_switcher_master.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | This is a template resource that accepts public and private IPs from both 5 | a Neutron LBaaS Pool and a master node. It connects the master inputs 6 | to its outputs, essentially acting as one state of a multiplexer. 7 | 8 | parameters: 9 | 10 | pool_public_ip: 11 | type: string 12 | default: "" 13 | 14 | pool_private_ip: 15 | type: string 16 | default: "" 17 | 18 | master_public_ip: 19 | type: string 20 | default: "" 21 | 22 | master_private_ip: 23 | type: string 24 | default: "" 25 | 26 | outputs: 27 | 28 | public_ip: 29 | value: {get_param: master_public_ip} 30 | 31 | private_ip: 32 | value: {get_param: master_private_ip} 33 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/api_gateway_switcher_pool.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | This is a template resource that accepts public and private IPs from both 5 | a Neutron LBaaS Pool and a master node. It connects the pool inputs 6 | to its outputs, essentially acting as one state of a multiplexer. 7 | 8 | parameters: 9 | 10 | pool_public_ip: 11 | type: string 12 | default: "" 13 | 14 | pool_private_ip: 15 | type: string 16 | default: "" 17 | 18 | master_public_ip: 19 | type: string 20 | default: "" 21 | 22 | master_private_ip: 23 | type: string 24 | default: "" 25 | 26 | outputs: 27 | 28 | public_ip: 29 | value: {get_param: pool_public_ip} 30 | 31 | private_ip: 32 | value: {get_param: pool_private_ip} 33 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/atomic-install-openstack-ca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ux 2 | 3 | CA_FILE=/etc/pki/ca-trust/source/anchors/openstack-ca.pem 4 | 5 | if [ -n "$OPENSTACK_CA" ] ; then 6 | cat >> $CA_FILE < 4 | This is a template resource that accepts public and private IPs. 5 | It connects private ip address to its outputs, essentially acting as 6 | one state of a multiplexer. 7 | 8 | parameters: 9 | 10 | public_ip: 11 | type: string 12 | default: "" 13 | 14 | private_ip: 15 | type: string 16 | default: "" 17 | 18 | outputs: 19 | 20 | ip_address: 21 | value: {get_param: private_ip} 22 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/floating_ip_address_switcher_public.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | description: > 4 | This is a template resource that accepts public and private IPs. 5 | It connects public ip address to its outputs, essentially acting as 6 | one state of a multiplexer. 7 | 8 | parameters: 9 | 10 | public_ip: 11 | type: string 12 | default: "" 13 | 14 | private_ip: 15 | type: string 16 | default: "" 17 | 18 | outputs: 19 | 20 | ip_address: 21 | value: {get_param: public_ip} 22 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/network_switcher_existing.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | parameters: 4 | 5 | private_network: 6 | type: string 7 | default: "" 8 | 9 | existing_network: 10 | type: string 11 | default: "" 12 | 13 | private_subnet: 14 | type: string 15 | default: "" 16 | 17 | existing_subnet: 18 | type: string 19 | default: "" 20 | 21 | outputs: 22 | 23 | network: 24 | value: {get_param: existing_network} 25 | 26 | subnet: 27 | value: {get_param: existing_subnet} 28 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/fragments/network_switcher_private.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2014-10-16 2 | 3 | parameters: 4 | 5 | private_network: 6 | type: string 7 | default: "" 8 | 9 | existing_network: 10 | type: string 11 | default: "" 12 | 13 | private_subnet: 14 | type: string 15 | default: "" 16 | 17 | existing_subnet: 18 | type: string 19 | default: "" 20 | 21 | outputs: 22 | 23 | network: 24 | value: {get_param: private_network} 25 | 26 | subnet: 27 | value: {get_param: private_subnet} 28 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/fragments/disable-selinux.sh: -------------------------------------------------------------------------------- 1 | #cloud-boothook 2 | 3 | setenforce `[[ "$SELINUX_MODE" == "enforcing" ]] && echo 1 || echo 0` 4 | sed -i ' 5 | /^SELINUX=/ s/=.*/=$SELINUX_MODE/ 6 | ' /etc/selinux/config 7 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/fragments/enable-cert-api-manager.sh: -------------------------------------------------------------------------------- 1 | step="enable-cert-api-manager" 2 | printf "Starting to run ${step}\n" 3 | 4 | . /etc/sysconfig/heat-params 5 | 6 | if [ "$(echo "${CERT_MANAGER_API}" | tr '[:upper:]' '[:lower:]')" = "true" ]; then 7 | cert_dir=/etc/kubernetes/certs 8 | 9 | echo -e "${CA_KEY}" > ${cert_dir}/ca.key 10 | 11 | # chown kube:kube ${cert_dir}/ca.key 12 | chmod 400 ${cert_dir}/ca.key 13 | fi 14 | 15 | printf "Finished running ${step}\n" 16 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/fragments/enable-ingress-controller.sh: -------------------------------------------------------------------------------- 1 | step="enable-ingress-controller" 2 | printf "Starting to run ${step}\n" 3 | 4 | . /etc/sysconfig/heat-params 5 | 6 | function writeFile { 7 | # $1 is filename 8 | # $2 is file content 9 | 10 | [ -f ${1} ] || { 11 | echo "Writing File: $1" 12 | mkdir -p $(dirname ${1}) 13 | cat << EOF > ${1} 14 | $2 15 | EOF 16 | } 17 | } 18 | 19 | ingress_controller=$(echo $INGRESS_CONTROLLER | tr '[:upper:]' '[:lower:]') 20 | case "$ingress_controller" in 21 | "") 22 | echo "No ingress controller configured." 23 | ;; 24 | "traefik") 25 | $enable-ingress-traefik 26 | ;; 27 | "octavia") 28 | $enable-ingress-octavia 29 | ;; 30 | *) 31 | echo "Ingress controller $ingress_controller not supported." 32 | ;; 33 | esac 34 | 35 | printf "Finished running ${step}\n" 36 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/fragments/enable-services-minion.sh: -------------------------------------------------------------------------------- 1 | set -x 2 | 3 | ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" 4 | 5 | # docker is already enabled and possibly running on centos atomic host 6 | # so we need to stop it first and delete the docker0 bridge (which will 7 | # be re-created using the flannel-provided subnet). 8 | echo "stopping docker" 9 | if [ ${CONTAINER_RUNTIME} != "containerd" ] ; then 10 | $ssh_cmd systemctl stop docker 11 | fi 12 | 13 | # make sure we pick up any modified unit files 14 | $ssh_cmd systemctl daemon-reload 15 | 16 | if [ ${CONTAINER_RUNTIME} = "containerd" ] ; then 17 | container_runtime_service="containerd" 18 | else 19 | container_runtime_service="docker" 20 | fi 21 | for action in enable restart; do 22 | for service in ${container_runtime_service} kubelet kube-proxy; do 23 | echo "$action service $service" 24 | $ssh_cmd systemctl $action $service 25 | done 26 | done 27 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/fragments/rotate-kubernetes-ca-certs-worker.sh: -------------------------------------------------------------------------------- 1 | echo "START: rotate CA certs on worker" 2 | 3 | set +x 4 | . /etc/sysconfig/heat-params 5 | set -x 6 | 7 | set -eu -o pipefail 8 | 9 | ssh_cmd="ssh -F /srv/magnum/.ssh/config root@localhost" 10 | 11 | service_account_key=$kube_service_account_key_input 12 | service_account_private_key=$kube_service_account_private_key_input 13 | 14 | if [ ! -z "$service_account_key" ] && [ ! -z "$service_account_private_key" ] ; then 15 | 16 | for service in kubelet kube-proxy; do 17 | echo "restart service $service" 18 | $ssh_cmd systemctl restart $service 19 | done 20 | fi 21 | 22 | echo "END: rotate CA certs on worker" 23 | -------------------------------------------------------------------------------- /magnum/drivers/common/templates/kubernetes/helm/metrics-server.sh: -------------------------------------------------------------------------------- 1 | set +x 2 | . /etc/sysconfig/heat-params 3 | set -ex 4 | 5 | CHART_NAME="metrics-server" 6 | 7 | if [ "$(echo ${METRICS_SERVER_ENABLED} | tr '[:upper:]' '[:lower:]')" = "true" ]; then 8 | echo "Writing ${CHART_NAME} config" 9 | 10 | HELM_CHART_DIR="/srv/magnum/kubernetes/helm/magnum" 11 | mkdir -p ${HELM_CHART_DIR} 12 | 13 | cat << EOF >> ${HELM_CHART_DIR}/requirements.yaml 14 | - name: ${CHART_NAME} 15 | version: ${METRICS_SERVER_CHART_TAG} 16 | repository: https://kubernetes-sigs.github.io/metrics-server/ 17 | EOF 18 | 19 | cat << EOF >> ${HELM_CHART_DIR}/values.yaml 20 | metrics-server: 21 | image: 22 | repository: ${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/metrics-server/}metrics-server 23 | resources: 24 | requests: 25 | cpu: 100m 26 | memory: 200Mi 27 | args: 28 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 29 | EOF 30 | fi 31 | -------------------------------------------------------------------------------- /magnum/drivers/heat/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/drivers/heat/__init__.py -------------------------------------------------------------------------------- /magnum/drivers/k8s_fedora_coreos_v1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/drivers/k8s_fedora_coreos_v1/__init__.py -------------------------------------------------------------------------------- /magnum/drivers/k8s_fedora_coreos_v1/version.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 - Rackspace Hosting 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | version = '1.0.0' 16 | driver = 'k8s_fedora_coreos_v1' 17 | container_version = '1.12.6' 18 | -------------------------------------------------------------------------------- /magnum/hacking/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/hacking/__init__.py -------------------------------------------------------------------------------- /magnum/service/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/service/__init__.py -------------------------------------------------------------------------------- /magnum/servicegroup/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/servicegroup/__init__.py -------------------------------------------------------------------------------- /magnum/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | import logging 14 | 15 | logging.basicConfig( 16 | filename='functional-tests.log', 17 | filemode='w', 18 | level=logging.DEBUG, 19 | ) 20 | -------------------------------------------------------------------------------- /magnum/tests/functional/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/api/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/api/v1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/api/v1/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/api/v1/clients/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/api/v1/clients/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/api/v1/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/api/v1/models/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/common/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/k8s/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/k8s/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/k8s/test_magnum_python_client.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | 14 | from magnum.tests.functional.python_client_base import BaseMagnumClient 15 | 16 | 17 | class TestListResources(BaseMagnumClient): 18 | def test_cluster_model_list(self): 19 | self.assertIsNotNone(self.cs.cluster_templates.list()) 20 | 21 | def test_cluster_list(self): 22 | self.assertIsNotNone(self.cs.clusters.list()) 23 | -------------------------------------------------------------------------------- /magnum/tests/functional/k8s_fcos/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/k8s_fcos/__init__.py -------------------------------------------------------------------------------- /magnum/tests/functional/k8s_ironic/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/functional/k8s_ironic/__init__.py -------------------------------------------------------------------------------- /magnum/tests/releasenotes/notes/separated-ca-certs-299c95eea1ffd9b1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Support creating different CA for kubernetes, etcd and front-proxy. 5 | -------------------------------------------------------------------------------- /magnum/tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/api/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/api/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/api/controllers/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/auth-paste.ini: -------------------------------------------------------------------------------- 1 | [composite:main] 2 | paste.composite_factory = magnum.api:root_app_factory 3 | /: api 4 | /healthcheck: healthcheck 5 | 6 | [pipeline:api] 7 | pipeline = cors request_id authtoken api_v1 8 | 9 | [app:api_v1] 10 | paste.app_factory = magnum.api.app:app_factory 11 | 12 | [filter:authtoken] 13 | paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory 14 | 15 | [filter:request_id] 16 | paste.filter_factory = oslo_middleware:RequestId.factory 17 | 18 | [filter:cors] 19 | paste.filter_factory = oslo_middleware.cors:filter_factory 20 | oslo_config_project = magnum 21 | 22 | [app:healthcheck] 23 | paste.app_factory = oslo_middleware:Healthcheck.app_factory 24 | backends = disable_by_file 25 | disable_by_file_path = /tmp/magnum_healthcheck_disable 26 | -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/auth-root-access.ini: -------------------------------------------------------------------------------- 1 | [composite:main] 2 | paste.composite_factory = magnum.api:root_app_factory 3 | /: api 4 | /healthcheck: healthcheck 5 | 6 | [pipeline:api] 7 | pipeline = cors request_id authtoken api_v1 8 | 9 | [app:api_v1] 10 | paste.app_factory = magnum.api.app:app_factory 11 | 12 | [filter:authtoken] 13 | acl_public_routes = / 14 | paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory 15 | 16 | [filter:request_id] 17 | paste.filter_factory = oslo_middleware:RequestId.factory 18 | 19 | [filter:cors] 20 | paste.filter_factory = oslo_middleware.cors:filter_factory 21 | oslo_config_project = magnum 22 | 23 | [app:healthcheck] 24 | paste.app_factory = oslo_middleware:Healthcheck.app_factory 25 | backends = disable_by_file 26 | disable_by_file_path = /tmp/magnum_healthcheck_disable 27 | -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/auth-v1-access.ini: -------------------------------------------------------------------------------- 1 | [composite:main] 2 | paste.composite_factory = magnum.api:root_app_factory 3 | /: api 4 | /healthcheck: healthcheck 5 | 6 | [pipeline:api] 7 | pipeline = cors request_id authtoken api_v1 8 | 9 | [app:api_v1] 10 | paste.app_factory = magnum.api.app:app_factory 11 | 12 | [filter:authtoken] 13 | acl_public_routes = /v1 14 | paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory 15 | 16 | [filter:request_id] 17 | paste.filter_factory = oslo_middleware:RequestId.factory 18 | 19 | [filter:cors] 20 | paste.filter_factory = oslo_middleware.cors:filter_factory 21 | oslo_config_project = magnum 22 | 23 | [app:healthcheck] 24 | paste.app_factory = oslo_middleware:Healthcheck.app_factory 25 | backends = disable_by_file 26 | disable_by_file_path = /tmp/magnum_healthcheck_disable 27 | -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/noauth-paste.ini: -------------------------------------------------------------------------------- 1 | [pipeline:main] 2 | pipeline = cors request_id api_v1 3 | 4 | [app:api_v1] 5 | paste.app_factory = magnum.api.app:app_factory 6 | 7 | [filter:authtoken] 8 | acl_public_routes = / 9 | paste.filter_factory = magnum.api.middleware.auth_token:AuthTokenMiddleware.factory 10 | 11 | [filter:request_id] 12 | paste.filter_factory = oslo_middleware:RequestId.factory 13 | 14 | [filter:cors] 15 | paste.filter_factory = oslo_middleware.cors:filter_factory 16 | oslo_config_project = magnum 17 | -------------------------------------------------------------------------------- /magnum/tests/unit/api/controllers/v1/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/api/controllers/v1/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/cmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/cmd/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/common/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/common/cert_manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/common/cert_manager/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/common/policies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/common/policies/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/common/x509/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/common/x509/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/conductor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/conductor/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/conductor/handlers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/conductor/handlers/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/conductor/handlers/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/conductor/handlers/common/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/conductor/tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/conductor/tasks/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/conf/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/conf/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/db/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/db/sqlalchemy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/db/sqlalchemy/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/drivers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/drivers/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/objects/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/objects/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/service/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/service/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/servicegroup/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/servicegroup/__init__.py -------------------------------------------------------------------------------- /magnum/tests/unit/template/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/magnum/tests/unit/template/__init__.py -------------------------------------------------------------------------------- /magnum/tests/utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013 - Red Hat, Inc. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from magnum.common import context as magnum_context 16 | 17 | 18 | def dummy_context(user='test_username', project_id='test_tenant_id'): 19 | return magnum_context.RequestContext(user=user, project_id=project_id) 20 | -------------------------------------------------------------------------------- /magnum/version.py: -------------------------------------------------------------------------------- 1 | # Copyright 2013 - Noorul Islam K M 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import pbr.version 16 | 17 | 18 | version_info = pbr.version.VersionInfo('magnum') 19 | version_string = version_info.version_string 20 | -------------------------------------------------------------------------------- /magnum/wsgi/api.py: -------------------------------------------------------------------------------- 1 | # -*- mode: python -*- 2 | # 3 | # Copyright 2017 SUSE Linux GmbH 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 6 | # not use this file except in compliance with the License. You may obtain 7 | # a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 14 | # License for the specific language governing permissions and limitations 15 | # under the License. 16 | 17 | import sys 18 | 19 | from magnum.api import app as api_app 20 | from magnum.common import service 21 | 22 | service.prepare_service(sys.argv) 23 | 24 | application = api_app.load_app() 25 | -------------------------------------------------------------------------------- /playbooks/container-builder-setup-gate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - configure-swap 5 | - ensure-docker 6 | tasks: 7 | - name: Create logging folders 8 | file: 9 | path: "/tmp/logs/{{ item }}" 10 | state: directory 11 | with_items: 12 | - ansible 13 | - build 14 | 15 | - name: Link logs output folder 16 | file: 17 | src: /tmp/logs 18 | dest: "{{ zuul.project.src_dir }}/logs" 19 | state: link 20 | 21 | - name: Install python3-docker 22 | become: true 23 | package: 24 | name: python3-docker 25 | state: present 26 | -------------------------------------------------------------------------------- /playbooks/post/upload-logs.yaml: -------------------------------------------------------------------------------- 1 | - hosts: primary 2 | tasks: 3 | - name: Copy files from {{ ansible_user_dir }}/workspace/ on node 4 | synchronize: 5 | src: '{{ ansible_user_dir }}/workspace/' 6 | dest: '{{ zuul.executor.log_root }}' 7 | mode: pull 8 | copy_links: true 9 | verify_host: true 10 | rsync_opts: 11 | - --include=/logs/** 12 | - --include=*/ 13 | - --exclude=* 14 | - --prune-empty-dirs 15 | -------------------------------------------------------------------------------- /playbooks/pre/prepare-workspace-images.yaml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | roles: 3 | - bindep 4 | 5 | tasks: 6 | 7 | - name: Ensure legacy workspace directory 8 | file: 9 | path: '{{ ansible_user_dir }}/workspace' 10 | state: directory 11 | -------------------------------------------------------------------------------- /playbooks/pre/prepare-workspace.yaml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | name: magnum-prepare-workspace 3 | tasks: 4 | - name: Ensure workspace directory exists 5 | file: 6 | path: '{{ ansible_user_dir }}/workspace' 7 | state: directory 8 | 9 | - shell: 10 | cmd: | 11 | set -e 12 | set -x 13 | cat > clonemap.yaml << EOF 14 | clonemap: 15 | - name: openstack/devstack-gate 16 | dest: devstack-gate 17 | EOF 18 | /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ 19 | https://opendev.org \ 20 | openstack/devstack-gate 21 | executable: /bin/bash 22 | chdir: '{{ ansible_user_dir }}/workspace' 23 | environment: '{{ zuul | zuul_legacy_vars }}' 24 | -------------------------------------------------------------------------------- /releasenotes/notes/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/releasenotes/notes/.placeholder -------------------------------------------------------------------------------- /releasenotes/notes/Deploy-traefik-from-the-heat-agent-0bb32f0f2c97405d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Deploy traefik from the heat-agent 5 | 6 | Use kubectl from the heat agent to apply the 7 | traefik deployment. Current behaviour was to 8 | create a systemd unit to send the manifests 9 | to the API. 10 | 11 | This way we will have only one way for applying 12 | manifests to the API. 13 | 14 | This change is triggered to adddress the kubectl 15 | change [0] that is not using 127.0.0.1:8080 as 16 | the default kubernetes API. 17 | 18 | [0] https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.18.md#kubectl 19 | -------------------------------------------------------------------------------- /releasenotes/notes/add-boot-volume-size-check-0262c2b61abc7ccf.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add a validation for the case when boot_volume_size 5 | label and flavor's disk are both zero. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/add-cilium-network-driver-8715190b14cb4f89.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add Cilium as a supported network driver of Kubernetes 5 | -------------------------------------------------------------------------------- /releasenotes/notes/add-container_infra_prefix-516cc43fbc5a0617.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Prefix of all container images used in the cluster (kubernetes components, 5 | coredns, kubernetes-dashboard, node-exporter). For example, 6 | kubernetes-apiserver is pulled from 7 | docker.io/openstackmagnum/kubernetes-apiserver, with this label it can be 8 | changed to myregistry.example.com/mycloud/kubernetes-apiserver. Similarly, 9 | all other components used in the cluster will be prefixed with this label, 10 | which assumes an operator has cloned all expected images in 11 | myregistry.example.com/mycloud. 12 | -------------------------------------------------------------------------------- /releasenotes/notes/add-docker-storage-driver-to-baymodel-1ed9ba8d43ecfea1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add docker-storage-driver parameter to baymodel to 4 | allow user select from the supported drivers. Until 5 | now, only devicemapper was supported. This release 6 | adds support for OverlayFS on Fedora Atomic hosts with 7 | kernel version >= 3.18 (Fedora 22 or higher) resulting 8 | significant performance improvement. To use OverlayFS, 9 | SELinux must be enabled and in enforcing mode on the 10 | physical machine, but must be disabled in the container. 11 | Thus, if you select overlay for docker-storage-driver 12 | SELinux will be disable inside the containers. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/add-federation-api-cf55d04f96772b0f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | This release introduces 'federations' endpoint 5 | to Magnum API, which allows an admin to create 6 | and manage federations of clusters through Magnum. 7 | As the feature is still under development, 8 | the endpoints are not bound to any driver yet. 9 | For more details, please refer to bp/federation-api [1]. 10 | 11 | [1] https://review.openstack.org/#/q/topic:bp/federation-api 12 | -------------------------------------------------------------------------------- /releasenotes/notes/add-hostgw-backend-option-1d1f9d8d95ec374f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add flannel's host-gw backend option. Magnum deploys cluster over a 4 | dedicated neutron private network by using flannel. Flannel's host-gw 5 | backend gives the best performance in this topopolgy (private layer2) since 6 | there is no packet processing overhead, no reduction to MTU, scales to many 7 | hosts as well as the alternatives. The label "flannel_use_vxlan" was 8 | repurposed when the network driver is flannel. First, rename the label 9 | flannel_use_vxlan to flannel_backend. Second, redefine the value of this 10 | label from "yes/no" to "udp/vxlan/host-gw". 11 | -------------------------------------------------------------------------------- /releasenotes/notes/add-information-about-cluster-in-event-notifications-a3c992ab24b32fbd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add information about the cluster in magnum event notifications. 5 | Previously the CADF notification's target ID was randomly generated and 6 | no other relevant info about the cluster was sent. Cluster details are 7 | now included in the notifications. This is useful for other OpenStack 8 | projects like Searchlight or third party projects that cache information 9 | regarding OpenStack objects or have custom actions running on 10 | notification. Caching systems can now efficiently update one single 11 | object (e.g. cluster), while without notifications they need to 12 | periodically retrieve object list, which is inefficient. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/add-k8s-label-for-portal-network-cidr-a09edab29da6e7da.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Add a new label `service_cluster_ip_range` for kubernetes so that user can 5 | set the IP range for service portals to avoid conflicts with pod IP range. 6 | 7 | -------------------------------------------------------------------------------- /releasenotes/notes/add-kubelet-to-master-nodes-da2d4ea0d3a332cd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Deploy kubelet in master nodes for the k8s_fedora_atomic driver. 5 | Previously it was done only for calico, now kubelet will run in all 6 | cases. Really useful, for monitoing the master nodes (eg deploy fluentd) 7 | or run the kubernetes control-plance self-hosted. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/add-master_lb_enabled-to-cluster-c773fac9086b2531.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Users can enable or disable master_lb_enabled when creating 5 | a cluster. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/add-octavia-client-4e5520084eae3c2b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | This will add the octavia client code for client to interact with 5 | the Octavia component of OpenStack 6 | -------------------------------------------------------------------------------- /releasenotes/notes/add-opensuse-driver-f69b6d346ca82b87.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add support for a new OpenSUSE driver for running k8s cluster on OpenSUSE. 4 | This driver is experimental for now, and operators need to get it from 5 | /contrib folder. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/add-overlay-networks-to-swarm-4467986d7853fcd8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add configuration for overlay networks for the docker 4 | network driver in swarm. To use this feature, users need 5 | to create a swarm cluster with network_driver set to 'docker'. 6 | After the cluster is created, users can create an overlay network 7 | (docker network create -d overlay mynetwork) and use it when 8 | launching a new container (docker run --net=mynetwork ...). 9 | 10 | -------------------------------------------------------------------------------- /releasenotes/notes/add-upgrade-check-framework-5057ad67a7690a14.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | prelude: > 3 | Added new tool ``magnum-status upgrade check``. 4 | features: 5 | - | 6 | New framework for ``magnum-status upgrade check`` command is added. 7 | This framework allows adding various checks which can be run before a 8 | Magnum upgrade to ensure if the upgrade can be performed safely. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/add_cluster_template_observations_db_and_api_objects-d7350c8193da9470.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | When creating a cluster template the administrator can 5 | use --tags argument to add any information 6 | that he considers important. The received text is a 7 | comma separated list with the pretended tags. 8 | This information is also shown when the user lists all 9 | the available cluster templates. 10 | upgrade: 11 | - | 12 | A new column was added to the cluster_templates DB table. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/affinity-policy-for-mesos-template-def-82627eb231aa4d28.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixes the problem with Mesos cluster creation where the 5 | nodes_affinity_policy was not properly conveyed as it is required 6 | in order to create the corresponding server group in Nova. 7 | https://storyboard.openstack.org/#!/story/2005116 8 | -------------------------------------------------------------------------------- /releasenotes/notes/allow-cluster-template-being-renamed-82f7d5d1f33a7957.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | To get a better cluster template versioning and relieve the pain 5 | of maintaining public cluster template, now the name of cluster template 6 | can be changed. 7 | 8 | -------------------------------------------------------------------------------- /releasenotes/notes/allow-empty-node_groups-ec16898bfc82aec0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Clusters can now be created with empty nodegroups. Existing nodegroups 5 | can be set to node_count = 0. min_node_count defaults to 0. 6 | This is usefull for HA or special hardware clusters with multiple 7 | nodegroups managed by the cluster auto-scaller. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/allow-multimaster-no-fip-b11520485012d949.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | This is allowing no floating IP to be usable with a multimaster 5 | configuration in terms of load balancers. -------------------------------------------------------------------------------- /releasenotes/notes/allow-setting-network-subnet-FIP-when-creating-cluster-ae0cda35ade28a9f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | When using a public cluster template, user still need the capability 5 | to reuse their existing network/subnet, and they also need to be 6 | able to turn of/off the floating IP to overwrite the setting in the 7 | public template. Now this is supported by adding those three 8 | items as parameters when creating cluster. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/allow_admin_perform_acitons-cc988655bb72b3f3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | To make sure better have backward compatibility, 5 | we set specific rule to allow admin perform all actions. 6 | This will apply on part of APIs in 7 | * Cluster 8 | * Cluster Template 9 | * federation 10 | -------------------------------------------------------------------------------- /releasenotes/notes/altered_grafanaUI_dashboards_persistency-1106b2e259a769b0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add persistency for grafana UI altered dashboards. To enable this use 5 | monitoring_storage_class_name label. 6 | It is recommended that dashboards be persisted by other means, mainly 7 | by using kubernetes configMaps. More info [0]. 8 | 9 | [0] https://github.com/helm/charts/tree/master/stable/grafana#sidecar-for-dashboards 10 | -------------------------------------------------------------------------------- /releasenotes/notes/async-bay-operations-support-9819bd06122ea9e5.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Current implementation of magnum bay operations are 4 | synchronous and as a result API requests are blocked 5 | until response from HEAT service is received. This release 6 | adds support for asynchronous bay operations (bay-create, 7 | bay-update, and bay-delete). Please note that with this 8 | change, bay-create, bay-update API calls will return bay uuid 9 | instead of bay object and also return HTTP status code 202 10 | instead of 201. Microversion 1.2 is added for new behavior. 11 | 12 | upgrade: 13 | - Magnum bay operations API default behavior changed from 14 | synchronous to asynchronous. User can specify 15 | OpenStack-API-Version 1.1 in request header for synchronous 16 | bay operations. 17 | -------------------------------------------------------------------------------- /releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Support passing an availability zone where all cluster nodes should be 5 | deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 6 | and k8s_fedora_atomic_v1 support this new label. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/boot-from-volume-7c73df68d7f325aa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Support boot from volume for Kubernetes all nodes (master and worker) 5 | so that user can create a big size root volume, which could be more 6 | flexible than using docker_volume_size. And user can specify the 7 | volume type so that user can leverage high performance storage, e.g. 8 | NVMe etc. And a new label etcd_volme_type is added as well so that 9 | user can set volume type for etcd volume. If the boot_volume_type 10 | or etcd_volume_type are not passed by labels, Magnum will try to 11 | read them from config option default_boot_volume_type and 12 | default_etcd_volume_type. A random volume type from Cinder will 13 | be used if those options are not set. 14 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-add-kube-dashboard-8a9f7d7c73c2debd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Include kubernetes dashboard in kubernetes cluster by default. Users 5 | can use this kubernetes dashboard to manage the kubernetes cluster. 6 | Dashboard can be disabled by setting the label 'kube_dashboard_enabled' 7 | to false. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-auto-generate-name-052ea3fdf05fdbbf.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Auto generate name for cluster and cluster-template. If users create 4 | a cluster/cluster-template without specifying a name, the name will be 5 | auto-generated. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-barbican-alternative-store-35ec3eda0abb0e25.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Decouple the hard requirement on barbican. Introduce a new certificate 4 | store called x509keypair. If x509keypair is used, TLS certificates will 5 | be stored at magnum's database instead of barbican. To do that, set the 6 | value of the config ``cert_manager_type`` as ``x509keypair``. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-container-monitoring-d4bb1cbd0a4e44cc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Includes a monitoring stack based on cAdvisor, node-exporter, Prometheus 5 | and Grafana. Users can enable this stack through the label 6 | prometheus_monitoring. Prometheus scrapes metrics from the Kubernetes 7 | cluster and then serves them to Grafana through Grafana's Prometheus 8 | data source. Upon completion, a default Grafana dashboard is provided. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-decouple-lbaas-c8f2d73313c40b98.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Decouple the hard requirement on neutron-lbaas. Introduce a new property 4 | master_lb_enabled in cluster template. This property will determines if 5 | a cluster's master nodes should be load balanced. Set the value to false 6 | if neutron-lbaas is not installed. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-keypair-override-on-create-ca8f12ffca41cd62.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | prelude: > 3 | Magnum's keypair-override-on-create blueprint [1] 4 | allows for optional keypair value in ClusterTemplates 5 | and the ability to specify a keypair value during 6 | cluster creation. 7 | features: 8 | - Added parameter in cluster-create to specify the 9 | keypair. If keypair is not provided, the default 10 | value from the matching ClusterTemplate will be used. 11 | - Keypair is now optional for ClusterTemplate, in order 12 | to allow Clusters to use keypairs separate from their 13 | parent ClusterTemplate. 14 | deprecations: 15 | - --keypair-id parameter in magnum CLI 16 | cluster-template-create has been renamed to 17 | --keypair. 18 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-magnum-notifications-8bd44cfe9e80f82b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Emit notifications when there is an event on a cluster. An event could be 4 | a status change of the cluster due to an operation issued by end-users 5 | (i.e. users create, update or delete the cluster). Notifications are sent 6 | by using oslo.notify and PyCADF. Ceilometer can capture the events and 7 | generate samples for auditing, billing, monitoring, or quota purposes. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-mesos-slave-flags-de6cf8c4d2c3c916.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - > 4 | Enable Mesos cluster to export more slave flags via labels in 5 | cluster template. Add the following labels: mesos_slave_isolation, 6 | mesos_slave_image_providers, mesos_slave_work_dir, 7 | and mesos_slave_executor_environment_variables. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/bp-secure-etcd-cluster-coe-5abd22546f05a85b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Secure etcd cluster for swarm and k8s. Etcd cluster is 5 | secured using TLS by default. TLS can be disabled by 6 | passing --tls-disabled during cluster template creation. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1580704-32a0e91e285792ea.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | - | 4 | Add new configuration option `openstack_ca_file` in the `drivers` section 5 | to pass the CA bundle used for the OpenStack API. Setting this file and 6 | setting `verify_ca` to `true` will result to all requests from the cluster 7 | nodes to the OpenStack APIs to be verified. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1614596-support-ssl-magnum-api-e4896928c6562e03.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - Magnum now support SSL for API service. User can enable SSL for API 4 | via new 3 config options 'enabled_ssl', 'ssl_cert_file' and 'ssl_key_file'. 5 | - Change default API development service from wsgiref simple_server to 6 | werkzeug for better supporting SSL. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1663757-198e1aa8fa810984.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | [`bug 1663757 `_] 5 | A configuration parameter, verify_ca, was added to magnum.conf 6 | with a default value of True and passed to the heat templates to indicate 7 | whether the cluster nodes validate the Certificate Authority when making 8 | requests to the OpenStack APIs (Keystone, Magnum, Heat). This parameter 9 | can be set to False to disable CA validation if you have self-signed 10 | certificates for the OpenStack APIs or you have your own Certificate 11 | Authority and you have not installed the Certificate Authority to all 12 | nodes. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1697655-add-etcd-volume-size-label-abde0060595bbbeb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add support to store the etcd configuration in a cinder volume. 5 | k8s_fedora_atomic accepts a new label etcd_volume_size defining the size 6 | of the volume. A value of 0 or leaving the label unset means no volume 7 | should be used, and the data will go to the instance local storage. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1718947-0d4e67529e2817d7.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | From now on, server names are prefixed with the cluster name. 5 | The cluster name is truncated to 30 characters, ('_', '.') are mapped to '-' 6 | and non alpha-numeric characters are removed to ensure FQDN compatibility. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1722522-d94743c6362a5e48.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Allow any value to be passed on the docker_storage_driver field by turning it 5 | into a StringField (was EnumField), and remove the constraints limiting the 6 | values to 'devicemapper' and 'overlay'. 7 | upgrade: 8 | - | 9 | Requires a db upgrade to change the docker_storage_driver 10 | field to be a string instead of an enum. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-1766284-k8s-fedora-admin-user-e760f9b0edf49391.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | - | 4 | k8s_fedora Remove cluster role from the kubernetes-dashboard account. When 5 | accessing the dashboard and skip authentication, users login with the 6 | kunernetes-dashboard service account, if that service account has the 7 | cluster role, users have admin access without authentication. Create an 8 | admin service account for this use case and others. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-2002728-kube-os-conf-region-46cd60537bdabdb2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Add `region` parameter to the Global configuration section of the 5 | Kubernetes configuration file. Setting this parameter will allow Magnum 6 | cluster to be created in the multi-regional OpenStack installation. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-2002981-trustee-auth-region-name-37796a4e6a274fb8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Add `trustee_keystone_region_name` optional parameter to the `trust` 5 | section. This parameter is useful for multi-regional OpenStack 6 | installations with different Identity service for every region. 7 | In such installation it is necessary to specify a region when searching 8 | for `auth_url` to authenticate a trustee user. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/bug-2004942-052321df27529562.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Allow overriding cluster template labels for swarm mode clusters - this 5 | functionality was missed from this COE when it was introduced. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/calico-3.21.2-193c895134e9c3c1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Upgrade to calico_tag=v3.21.2. Additionally, use fixed subnet CIDR for 5 | IP_AUTODETECTION_METHOD supported from v3.16.x onwards. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/calico-configuration-label-ae0b43a7c7123f02.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added calico_ipv4pool_ipip label for configuring calico network_driver 5 | IPIP Mode to use for the IPv4 POOL created at start up. 6 | Allowed_values: Always, CrossSubnet, Never, Off. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/calico-network-driver-0199c2459041ae81.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | issues: 3 | - | 4 | Adding 'calico' as network driver for Kubernetes so as to support network 5 | isolation between namespace with k8s network policy. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/cert-manager-api-ee0cf7f3b767bb5d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add new label 'cert_manager_api' enabling the kubernetes certificate 5 | manager api. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/change-service-name-ce5c72642fe1d3d1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - Magnum default service type changed from "container" to 4 | "container-infra". It is recommended to update the service 5 | type at Keystone service catalog accordingly. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/cinder-csi-enabled-label-ab2b8ade63c57cf3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add cinder_csi_enabled label to support out of tree Cinder CSI. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/client-embed-certs-322701471e4d6e1d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Embed certificates in kubernetes config file when issuing 'cluster config', 4 | instead of generating additional files with the certificates. This is now 5 | the default behavior. To get the old behavior and still generate cert 6 | files, pass --output-certs. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/cluster_template_update_labels-10ce66c87795f11c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Now user can update labels in cluster-template. Previously string is 5 | passed as a value to labels, but we know that labels can only hold 6 | dictionary values. Now we are parsing the string and storing it as 7 | dictionary for labels in cluster-template. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/configurable-k8s-health-polling-interval-75bb83b4701d48c5.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | The default 10 seconds health polling interval is too frequent for most of 5 | the cases. Now it has been changed to 60s. A new config 6 | `health_polling_interval` is supported to make the interval configurable. 7 | Cloud admin can totally disable the health polling by set a negative value 8 | for the config. 9 | upgrade: 10 | - | 11 | If it's still preferred to have 10s health polling interval for Kubernetes 12 | cluster. It can be set by config `health_polling_interval` under 13 | `kubernetes` section. -------------------------------------------------------------------------------- /releasenotes/notes/configure-etcd-auth-bug-1759813-baac5e0fe8a2e97f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fix etcd configuration in k8s_fedora_atomic driver. Explicitly enable 5 | client and peer authentication and set trusted CA (ETCD_TRUSTED_CA_FILE, 6 | ETCD_PEER_TRUSTED_CA_FILE, ETCD_CLIENT_CERT_AUTH, 7 | ETCD_PEER_CLIENT_CERT_AUTH). Only new clusters will benefit from the fix. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/configure_monitoring_app_endpoints-f00600c244a76cf4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added monitoring_ingress_enabled magnum label to set up ingress with 5 | path based routing for all the configured services 6 | {alertmanager,grafana,prometheus}. When using this, 7 | cluster_root_domain_name magnum label must be used to setup base path 8 | where this services are available. 9 | Added cluster_basic_auth_secret magnum label to configure basic auth 10 | on unprotected services {alertmanager and prometheus}. This is only 11 | in effect when app access is routed by ingress. 12 | upgrade: 13 | - | 14 | Configured {alertmanager,grafana,prometheus} services logFormat to 15 | json to enable easier machine log parsing. 16 | -------------------------------------------------------------------------------- /releasenotes/notes/containerd-598761bb536af6ba.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | New labels to support containerd as a runtime. 5 | 6 | container_runtime 7 | The container runtime to use. Empty value means, use docker from the 8 | host. Since ussuri, apart from empty (host-docker), containerd is also 9 | an option. 10 | 11 | containerd_version 12 | The containerd version to use as released in 13 | https://github.com/containerd/containerd/releases and 14 | https://storage.googleapis.com/cri-containerd-release/ 15 | 16 | containerd_tarball_url 17 | Url with the tarball of containerd's binaries. 18 | 19 | containerd_tarball_sha256 20 | sha256 of the tarball fetched with containerd_tarball_url or from 21 | https://storage.googleapis.com/cri-containerd-release/. 22 | -------------------------------------------------------------------------------- /releasenotes/notes/control-plane-taint-c6194f968f0817e8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Adds initial support for Kubernetes v1.28 5 | upgrade: 6 | - | 7 | The taint for control plane nodes have been updated from 8 | 'node-role.kubernetes.io/master' to 9 | 'node-role.kubernetes.io/control-plane', in line with upstream. Starting 10 | from v1.28, the old taint no longer passes conformance. 11 | New clusters from existing cluster templates will have this change. 12 | Existing clusters are not affected. 13 | This will be a breaking change for Kubernetes `_ 9 | for more info. 10 | -------------------------------------------------------------------------------- /releasenotes/notes/deprecate-send_cluster_metrics-8adaac64a979f720.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Currently, Magnum is running periodic tasks to collect k8s cluster 5 | metrics to message bus. Unfortunately, it's collecting pods info 6 | only from "default" namespace which makes this function useless. 7 | What's more, even Magnum can get all pods from all namespaces, it 8 | doesn't make much sense to keep this function in Magnum. Because 9 | operators only care about the health of cluster nodes. If they 10 | want to know the status of pods, they can use heapster or other 11 | tools to get that. So the feauture is being deprecated now and will be 12 | removed in Stein release. And the default value is changed to False, which 13 | means won't send the metrics. 14 | 15 | -------------------------------------------------------------------------------- /releasenotes/notes/devicemapper-deprecation-46a59adbf131bde1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | The devicemapper and overlay storage driver is deprecated in favor 5 | of overlay2 in docker, and will be removed in a future release from 6 | docker. Users of the devicemapper and overlay storage driver are 7 | recommended to migrate to a different storage driver, such as overlay2. 8 | overlay2 will be set as the default storage driver from Victoria cycle. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/disable-mesos-from-api-0087ef02ba0477df.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Remove mesos from the API. This means new clusters of coe type 'mesos' 5 | cannot be created. 6 | The mesos driver will be removed in the next release. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/disable-ssh-password-authn-f2baf619710e52aa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | - | 4 | Regarding passwords, they could be guessed if there is no 5 | faild-to-ban-like solution. So it'd better to disable it for security 6 | reasons. It's only effected for fedora atomic images. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/dns-autoscale-90b63e3d71d7794e.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | issues: 3 | - | 4 | Currently, the replicas of coreDNS pod is hardcoded as 1. It's not a 5 | reasonable number for such a critical service. Without DNS, probably all 6 | workloads running on the k8s cluster will be broken. Now Magnum is making 7 | the coreDNS pod autoscaling based on the nodes and cores number. 8 | 9 | -------------------------------------------------------------------------------- /releasenotes/notes/docker-volume-type-46044734f5a27661.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Support different volume types for the drivers that support docker storage 5 | in cinder volumes. swarm_fedora_atomic and k8s_fedora_atomic accept a new 6 | label to specify a docker_volume_type. 7 | upgrade: 8 | - | 9 | A new section is created in magnum.conf named cinder. In this cinder 10 | section, you need to set a value for the key default_docker_volume_type, 11 | which should be a valid type for cinder volumes in your cinder deployment. 12 | This default value will be used if no volume_type is provided by the user 13 | when using a cinder volume for container storage. The suggested default 14 | value the one set in cinder.conf of your cinder deployment. 15 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-calico-v3-3-7d47eb04fcb392dc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Support for deploying ``Calico v3.3`` has been dropped. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-fedora-atomic-driver-76da9f0ea0cf20bb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | ``k8s_fedora_atomic_v1`` driver has been dropped. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-k8s-coreos-9604dd23b0e884b6.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | ``k8s_coreos_v1`` driver has been dropped. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-k8s-fedora-ironic-6c9750a0913435e2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | ``k8s_fedora_ironic_v1`` driver has been dropped. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-py27-support-7e2c4300341f9719.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Python 2.7 support has been dropped. Last release magnum support py2.7 5 | is OpenStack Train. The minimum version of Python now supported by magnum 6 | is Python 3.6. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/drop-python-3-6-and-3-7-68ad47ae9d14dca7.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now 5 | supported is Python 3.8. -------------------------------------------------------------------------------- /releasenotes/notes/drop-tiller-5b98862961003df8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | ``Tiller`` support has been dropped, following labels are not functional 5 | anymore: 6 | * ``tiller_enabled`` 7 | * ``tiller_tag`` 8 | * ``tiller_namespace`` 9 | -------------------------------------------------------------------------------- /releasenotes/notes/drop_mesos-DzAlnyYHjbQC6IfMq.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | other: 3 | - | 4 | We are dropping mesos for the lack of support/test 5 | and no usage from the community. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/drop_mesos_driver-pBmrJ9gAqX3EUROBS2g.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Removed mesos driver. Mesos is no longer supported in Magnum. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/drop_swarm_driver-3a2e1927053cf372.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Dropped swarm drivers, Docker Swarm is not supported in Magnum anymore. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/enable-enforce-scope-and-new-defaults-7e6e503f74283071.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The Magnum service now allows enables policies (RBAC) new defaults 5 | and scope checks. These are controlled by the following (default) config 6 | options in ``magnum.conf`` file:: 7 | 8 | [oslo_policy] 9 | enforce_new_defaults=False 10 | enforce_scope=False 11 | 12 | We will change the default to True in 2024.1 (Caracal) cycle. 13 | If you want to enable them then modify both values to True. 14 | -------------------------------------------------------------------------------- /releasenotes/notes/enable_cloud_provider_label-ed79295041bc46a8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add 'cloud_provider_enabled' label for the k8s_fedora_atomic driver. 5 | Defaults to true. For specific kubernetes versions if 'cinder' is 6 | selected as a 'volume_driver', it is implied that the cloud provider 7 | will be enabled since they are combined. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/ensure-delete-complete-2f9bb53616e1e02b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixes a regression which left behind trustee user accounts and certificates 5 | when a cluster is deleted. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/expose_autoscaler_metrics-0ea9c61660409efe.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Expose autoscaler prometheus metrics on pod port metrics (8085). 5 | -------------------------------------------------------------------------------- /releasenotes/notes/expose_traefik_metrics-aebbde99d4ecc231.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Expose traefik prometheus metrics. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/fedora_coreos-e66b44d86dea380f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add fedora coreos driver. To deploy clusters with fedora coreos operators 5 | or users need to add os_distro=fedora-coreos to the image. The scripts 6 | to deploy kubernetes on top are the same with fedora atomic. Note that 7 | this driver has selinux enabled. 8 | issues: 9 | - | 10 | The startup of the heat-container-agent uses a workaround to copy the 11 | SoftwareDeployment credentials to /var/lib/cloud/data/cfn-init-data. 12 | The fedora coreos driver requires heat train to support ignition. 13 | fixes: 14 | - | 15 | For k8s_coreos set REQUESTS_CA for heat-agent. The heat-agent as a python 16 | service needs to use the ca bundle of the host. 17 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-cert-apimanager-527352622c5a9c3b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixed the usage of cert_manager_api=true making cluster creation fail 5 | due to a logic lock between kubemaster.yaml and kubecluster.yaml 6 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-cluster-floating-ip-enabled-default-value-4e24d4bf09fc08c8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | There shouldn't be a default value for floating_ip_enabled when creating 5 | cluster. By default, when it's not set, the cluster's floating_ip_enabled 6 | attribute should be set with the value of cluster template. It's fixed 7 | by removing the default value from Magnum API. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-cluster-update-886bd2d1156bef88.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | When doing a cluster update magnum is now passing the existing parameter 5 | to heat which will use the heat templates stored in the heat db. This 6 | change will prevent heat from replacacing all nodes when the heat 7 | templates change, for example after an upgrade of the magnum server code. 8 | https://storyboard.openstack.org/#!/story/1722573 9 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-driver-token-scope-a2c2b4b4ef813ec7.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | We have corrected the authentication scope in Magnum drivers when 5 | authenticating to create certs, so that trusts can work properly. This will 6 | change the authenticated user from trustee to trustor (as trusts designed 7 | for). This change affects all drivers that inherit from common Magnum 8 | drivers (Heat drivers). 9 | If you have custom policies that checks for trustee user, you will need to 10 | update them to trustor. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-fedora-proxy-a4b8d5fc4ec65e80.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | A regression issue about downloading images has been fixed. Now both Fedora 5 | Atomic driver and Fedora CoreOS driver can support using proxy in template 6 | to create cluster. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-global-stack-list-7a3a66169f5c4aa8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | - Fix global stack list in periodic task. In before, magnum's periodic task 4 | performs a `stack-list` operation across all tenants. This is disabled 5 | by Heat by default since it causes a security issue. At this release, 6 | magnum performs a `stack-get` operation on each Heat stack by default. 7 | This might not be scalable and operators have an option to fall back to 8 | `stack-list` by setting the config `periodic_global_stack_list` to 9 | `True` (`False` by default) and updating the heat policy file (usually 10 | /etc/heat/policy.json) to allow magnum list stacks. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-k8s-coe-version-a8ea38f327ea6bb3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | The coe_version was out of sync with the k8s version deployed 5 | for the cluster. Now it is fixed by making sure the kube_version is 6 | consistent with the kube_tag when creating the cluster and upgrading 7 | the cluster. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-label-fixed_network_cidr-95d6a2571b58a8fc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Now the label `fixed_network_cidr` have been renamed with 5 | `fixed_subnet_cidr`. And it can be passed in and set 6 | correctly. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-nginx-getting-oom-killed-76139fd8b57e6c15.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | nginx-ingress-controller QoS changed from Guaranteed to Burstable. 5 | Priority class 'system-cluster-critical' or higher for 6 | nginx-ingress-controller. 7 | fixes: 8 | - | 9 | nginx-ingress-controller requests.memory increased to 256MiB. This is a 10 | result of tests that showed the pod getting oom killed by the node on 11 | a relatively generic use case. 12 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-proxy-of-grafana-script-8b408d9d103dfc06.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | This proxy issue of Prometheus/Grafana script has been fixed. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-race-condition-for-k8s-multi-masters-29bd36de57df355a.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | When creating a multi-master cluster, all master nodes will attempt to 5 | create kubernetes resources in the cluster at this same time, like 6 | coredns, the dashboard, calico etc. This race conditon shouldn't be 7 | a problem when doing declarative calls instead of imperative (kubectl 8 | apply instead of create). However, due to [1], kubectl fails to apply 9 | the changes and the deployemnt scripts fail causing cluster to creation 10 | to fail in the case of Heat SoftwareDeployments. This patch passes the 11 | ResourceGroup index of every master so that resource creation will be 12 | attempted only from the first master node. 13 | [1] https://github.com/kubernetes/kubernetes/issues/44165 14 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-serveraddressoutputmapping-for-private-clusters-73a874bb4827d568.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fix an issue with private clusters getting stuck in CREATE_IN_PROGRESS 5 | status where floating_ip_enabled=True in the cluster template but this is 6 | disabled when the cluster is created. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/fix-volume-api-version-908c3f1cf154b231.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Default value of ``[cinder_client] api_version`` has been updated from 5 | ``2`` to ``3``, because volume v2 API is no longer available. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/flannel-cni-4a5c9f574325761e.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | For k8s_fedora_atomic, run flannel as a cni plugin. The deployment method 5 | is taken from the flannel upstream documentation. One more label for the 6 | cni tag is added `flannel_cni_tag` for the container, 7 | quay.io/repository/coreos/flannel-cni. The flannel container is taken 8 | from flannel upsteam as well quay.io/repository/coreos/flannel. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/flannel-reboot-fix-f1382818daed4fa8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Add iptables -P FORWARD ACCEPT unit. On node reboot, kubelet and kube-proxy 5 | set iptables -P FORWARD DROP which doesn't work with flannel in the way we 6 | use it. Add a systemd unit to set the rule to ACCEPT after flannel, 7 | docker, kubelet, kube-proxy. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/grafana_prometheus_tag_label-78540ea106677485.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add 'grafana_tag' and 'prometheus_tag' labels for the k8s_fedora_atomic driver. 5 | Grafana defaults to 5.1.5 and Prometheus defaults to v1.8.2. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/heapster-enabled-label-292ca1ddac68a156.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added label heapster_enabled to control heapster installation in the 5 | cluster. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/heat-container-agent-for-train-e63bc1559750fe9c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | other: 3 | - | 4 | Now the heat-container-agent default tag for Train release is train-dev. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/heat-container-agent-tag-92848c1062c16c76.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add heat_container_agent_tag label to allow users select the heat-agent 5 | tag. Stein default: stein-dev 6 | -------------------------------------------------------------------------------- /releasenotes/notes/heat-container-agent-tag-fe7cec6b890329af.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add heat container agent into Kubernetes cluster worker nodes to support 5 | cluster rolling upgrade. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/helm-install-ingress-nginx-fe2acec1dd3032e3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add nginx as an additional Ingress controller option for Kubernetes. 5 | Installation is done via the upstream nginx-ingress helm chart, and 6 | selection can be done via label ingress_controller=nginx. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/helm-install-metrics-service-cd18be76c4ed0e5f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Installs the metrics-server service that is replacing kubernetes deprecated 5 | heapster as a cluster wide metrics reporting service used by schedulling, 6 | HPA and others. This service is installed and configured using helm and so 7 | tiller_enabled flag must be True. Heapster service is maintained active to 8 | allow compatibility. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/helm-install-metrics-service-e7a5459417504a75.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Installs the metrics-server service that is replacing kubernetes 5 | deprecated heapster as a cluster wide metrics reporting service used by 6 | schedulling, HPA and others. This service is installed and configured 7 | using helm and so tiller_enabled flag must be True. The label 8 | metrics_server_chart_tag can be used to specify the stable/metrics-server 9 | chart tag to be used. The label metrics_server_enabled is used to enable 10 | disable the installation of the metrics server (default: true). 11 | -------------------------------------------------------------------------------- /releasenotes/notes/helm-install-prometheus-operator-ea87752bc57a0945.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added monitoring_enabled to install prometheus-operator monitoring 5 | solution by means of helm stable/prometheus-operator public chart. 6 | Defaults to false. grafana_admin_passwd label can be used to set 7 | grafana dashboard admin access password. If grafana_admin_passwd 8 | is not set the password defaults to admin. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/helm_client_label-1d6e70dfcf8ecd0d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added label helm_client_tag to allow user to specify helm client container version. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/hyperkube-prefix-01b9a5f4664edc90.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Support `hyperkube_prefix` label which defaults to k8s.gcr.io/. Users now 5 | have the option to define alternative hyperkube image source since the 6 | default source has discontinued publication of hyperkube images for 7 | `kube_tag` greater than 1.18.x. Note that if `container_infra_prefix` label 8 | is define, it still takes precedence over this label. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/ignore-calico-devices-in-network-manager-e1bdb052834e11e9.yaml: -------------------------------------------------------------------------------- 1 | fixes: 2 | - Fixed an issue that applications running on master nodes which rely on 3 | network connection keep restarting because of timeout or connection lost, 4 | by making calico devices unmanaged in NetworkManager config on master 5 | nodes. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/improve-driver-discovery-df61e03c8749a34d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add a feature to prevent drivers clashing when multiple drivers are able to 5 | provide the same functionality. 6 | 7 | Drivers used to be selected based on a tuple of (server_type, os, coe). This 8 | can be a problem if multiple drivers provides the same functionality, e.g. a 9 | tuple like (vm, ubuntu, kubernetes). 10 | 11 | To allow for this, it is now possible to explicitly specify a driver name, 12 | instead of relying on the lookup. The driver name is the same as the 13 | entrypoint name, and can be specified by a Cluster Template through the 14 | Glance image property "magnum_driver". 15 | -------------------------------------------------------------------------------- /releasenotes/notes/improve-k8s-master-kubelet-taint-0c56ffede270116d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | The taint of master node kubelet has been improved to get the 5 | conformance test (sonobuoy) passed. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/ingress-controller-552ea956ceabdd25.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add new labels 'ingress_controller' and 'ingress_controller_role' enabling 5 | the deployment of a Kubernetes Ingress Controller backend for clusters. 6 | Default for 'ingress_controller' is '' (meaning no controller deployed), 7 | with possible values being 'traefik'. 8 | Default for 'ingress_controller_role' is 'ingress'. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/ingress-ngnix-de3c70ca48552833.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Upgrade of ingress controler. Chart name nginx-ingress has been changed to 5 | ingress-nginx. Chart repository also has been changed. More details about 6 | why this change take place can be found in github repository 7 | https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx 8 | -------------------------------------------------------------------------------- /releasenotes/notes/integrate-osprofiler-79bdf2d0cd8a39fb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Magnum now support OSProfiler for HTTP, RPC and DB request tracing. 4 | User can enable OSProfiler via Magnum configuration file in 'profiler' 5 | section. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-cluster-creation-speedup-21b5b368184d7bf0.yaml: -------------------------------------------------------------------------------- 1 | features: 2 | - | 3 | Start Kubernetes workers installation right after the master instances are 4 | created rather than waiting for all the services inside masters, which 5 | could decrease the Kubernetes cluster launch time significantly. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-dashboard-v2.0.0-771ce78b527209d3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The default version of Kubernetes dashboard has been upgraded to v2.0.0 and 5 | metrics-server is supported by k8s dashboard now. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-delete-vip-fip-b2ddf61ddbc080bc.yaml: -------------------------------------------------------------------------------- 1 | fixes: 2 | - | 3 | In kubernetes cluster, a floating IP is created and associated with the vip 4 | of a load balancer which is created corresponding to the service of 5 | LoadBalancer type inside kubernetes, it should be deleted when the cluster 6 | is deleted. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-fcos-version-bumps-ca89507d2cf15384.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Bump up default versions for fedora-coreos driver 5 | kube_tag: v1.18.2 6 | autoscaler_tag: v1.18.1 7 | cloud_provider_tag: v1.18.0 8 | cinder_csi_plugin_tag: v1.18.0 9 | k8s_keystone_auth_tag: v1.18.0 10 | magnum_auto_healer_tag: v1.18.0 11 | octavia_ingress_controller_tag: v1.18.0 12 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-fedora-atomic-rolling-upgrade-3d8edcdd91fa1529.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Now the fedora atomic Kubernetes driver can support rolling upgrade for k8s 5 | version change or the image change. User can call command 6 | `openstack coe cluster upgrade ` to 7 | upgrade current cluster to the new version defined in the new cluster 8 | template. At this moment, only the image change and the kube_tag change 9 | are supported. 10 | issues: 11 | - | 12 | There is a known issue when doing image(operating system) upgrade for k8s 13 | cluster. Because when doing image change for a server resource, Heat will 14 | trigger the Nova rebuild to rebuild the instnace and there is no chance to 15 | call kubectl drain to drain the node, so there could be a very minior 16 | downtime when doing(starting to do) the rebuild and meanwhile a request 17 | is routed to that node. 18 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-improve-floating-ip-enabled-84cd00224d6b7bc1.yaml: -------------------------------------------------------------------------------- 1 | upgrade: 2 | - The etcd service for Kubernetes cluster is no longer allocated a floating 3 | IP. 4 | features: 5 | - A new label named ``master_lb_floating_ip_enabled`` is introduced which 6 | controls if Magnum allocates floating IP for the load balancer of master 7 | nodes. This label only takes effect when the ``master_lb_enabled`` is set. 8 | The default value is the same as ``floating_ip_enabled``. The 9 | ``floating_ip_enabled`` property now only controls if Magnum should 10 | allocate the floating IPs for the master and worker nodes. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-keystone-auth-6c88c1a2d406fb61.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Now cloud-provider-openstack of Kubernetes has a webhook to support 5 | Keystone authorization and authentication. With this feature, user can use 6 | a new label 'keystone-auth-enabled' to enable the keystone authN and authZ. 7 | 8 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-nodes-security-group-9d8dbb91b006d9dd.yaml: -------------------------------------------------------------------------------- 1 | security: 2 | - | 3 | Defines more strict security group rules for kubernetes worker nodes. The 4 | ports that are open by default: default port range(30000-32767) for 5 | external service ports; kubelet healthcheck port; Calico BGP network ports; 6 | flannel overlay network ports. The cluster admin should manually config the 7 | security group on the nodes where Traefik is allowed. To allow traffic to 8 | the default ports (80, 443) that the traefik ingress controller exposes 9 | users will need to create additional rules or expose traefik with a 10 | kubernetes service with type: LoadBalaner. Finally, the ssh port in worker 11 | nodes is closed as well. If ssh access is required, users will need to 12 | create a rule for port 22 as well. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-octavia-ingress-controller-32c0b97031fd0dd4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add a new option 'octavia' for the label 'ingress_controller' and a new 5 | label 'octavia_ingress_controller_tag' to enable the deployment of 6 | `octavia-ingress-controller `_ 7 | in the kubernetes cluster. The 'ingress_controller_role' label is not used 8 | for this option. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-prometheus-clusterip-b191fa163e3f1125.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Use ClusterIP as the default Prometheus service type, because the NodePort 5 | type service has the requirement that extra security group rule is properly 6 | configured. Kubernetes cluster administrator could still change the service 7 | type after the cluster creation. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s-volumes-az-fix-85ad48998d2c12aa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | In a multi availability zone (AZ) environment, if Nova doesn't support 5 | cross AZ volume mount, then the cluster creation may fail because Nova can 6 | not mount volume in different AZ. This issue only impact Fedora Atomic and 7 | Fedora CoreOS drivers. Now this issue is fixed by passing in the AZ info 8 | when creating volumes. 9 | 10 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s_fedora_atomic_apply_cluster_role-8a46c881de1a1fa3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Create admin cluster role for k8s_fedora_atomic, it is defined in 5 | the configuration but it wasn't applied. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/k8s_fedora_protect_kubelet-8468ddcb92c2a624.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fix bug #1758672 [1] to protect kubelet in the k8s_fedora_atomic driver. 5 | Before this patch kubelet was listening to 0.0.0.0 and for clusters with 6 | floating IPs the kubelet was exposed. Also, even on clusters without fips 7 | the kubelet was exposed inside the cluster. This patch allows access to 8 | the kubelet only over https and with the appropriate roles. The apiserver 9 | and heapster have the appropriate roles to access it. Finally, all 10 | read-only ports have been closed to not expose any cluster data. The only 11 | remaining open ports without authentication are for healthz. 12 | [1] https://bugs.launchpad.net/magnum/+bug/1758672 13 | -------------------------------------------------------------------------------- /releasenotes/notes/keystone-auth-repo-6970c05f44299326.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | k8s-keystone-auth now uses the upstream k8scloudprovider docker repo instead 5 | of the openstackmagnum repo. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/keystone_trustee_interface-6d63b74616dda1d4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Keystone URL used by Cluster Templates instances to authenticate is now 4 | configurable with the ``trustee_keystone_interface`` parameter 5 | which default to ``public``. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/kubelet-nfs-b51e572adfb56378.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | For fcos-kubelet, add rpc-statd dependency. 5 | To mount nfs volumes with the embedded volume 6 | pkg [0], rpc-statd is required and should be 7 | started by mount.nfs. When running kubelet 8 | in a chroot this fails. With atomic containers 9 | it used to work. 10 | [0] https://github.com/kubernetes/kubernetes/tree/master/pkg/volume/nfs 11 | -------------------------------------------------------------------------------- /releasenotes/notes/kubernetes-cloud-config-6c9a4bfec47e3bb4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Use the external cloud provider in k8s_fedora_atomic. The 5 | cloud_provider_tag label can be used to select the container tag for it, 6 | together with the cloud_provider_enabled label. The cloud provider runs 7 | as a DaemonSet on all master nodes. 8 | upgrade: 9 | - | 10 | The cloud config for kubernets has been renamed from 11 | /etc/kubernetes/kube_openstack_config to /etc/kubernetes/cloud-config as 12 | the kubelet expects this exact name when the external cloud provider is 13 | used. A copy of /etc/kubernetes/kube_openstack_config is in place for 14 | applications developed for previous versions of magnum. 15 | -------------------------------------------------------------------------------- /releasenotes/notes/lb-algorithm-36a15eb21fd5c4b1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added support for choosing Octavia LB algorithm by using 5 | ``octavia_lb_algorithm`` tag. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/make-keypair-optional-fcf4a17e440d0879.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | This makes the keypair optional. The user should not have to include 5 | the keypair because they may use some other method of security such 6 | as using SSSD, preconfigured on the image. -------------------------------------------------------------------------------- /releasenotes/notes/master-lb-allowed-cidrs-cc599da4eb96e983.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add a new label named `master_lb_allowed_cidrs` to control the IP ranges 5 | which can access the k8s API and etcd load balancers of master. To get 6 | this feature, the minimum version of Heat is stable/ussuri and minimum 7 | version of Octavia is stable/train. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/merge-labels-9ba7deffc5bb3c7f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | A new boolean flag is introduced in the CLuster and Nodegroup create API 5 | calls. Using this flag, users can override label values when clusters or 6 | nodegroups are created without having to specify all the inherited values. 7 | To do that, users have to specify the labels with their new values and use 8 | the flag --merge-labels. At the same time, three new fields are added in 9 | the cluster and nodegroup show outputs, showing the differences between the 10 | actual and the iherited labels. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/migrations-1.3.20-60e5f990422f2ca5.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixes database migrations with SQLAlchemy 1.3.20. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/missing-ip-in-api-address-c25eef757d5336aa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | There was a corner case that when floating_ip_enabled=False, 5 | master_lb_enabled=True,master_lb_floating_ip_enabled=False in 6 | cluster template, but setting floating_ip_enabled=True when 7 | creating the cluster, which causes missing IP address in the 8 | api_address of cluster. Now the isssue has been fixed. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/monitoring_persistent_storage-c5857fc099bd2f65.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added metrics_retention_days magnum label allowing user to specify 5 | prometheus server scraped metrics retention days (default: 14). 6 | Added metrics_retention_size_gi magnum label allowing user to specify 7 | prometheus server metrics storage maximum size in Gi (default: 14). 8 | Added metrics_interval_seconds allowing user to specify prometheus 9 | scrape frequency in seconds (default: 30). 10 | Added metrics_storage_class_name allowing user to specify the 11 | storageClass to use as external retention for pod fail-over data 12 | persistency. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/monitoring_scrape_ca_and_traefik-5544d8dd5ab7c234.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Prometheus server now scrape metrics from traefik proxy. 5 | Prometheus server now scrape metrics from cluster autoscaler. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/monitoring_scrape_internal-6697e50f091b0c9c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Scrape metrics from kube-{controller-manager,scheduler}. 5 | Disable PrometheusRule for etcd. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/no-cinder-volume-87b9339e066c30a0.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | prelude: > 3 | Currently, the swarm and the kubernetes drivers use 4 | a dedicated cinder volume to store the container 5 | images. It was been observed that one cinder volume 6 | per node is a bottleneck for large clusters. 7 | fixes: 8 | - Make the dedicated cinder volume per node an opt-in 9 | option. By default, no cinder volumes will be created 10 | unless the user passes the docker-volume-size argument. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/nodegroup-limit-89930d45ee06c621.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixes the next url in the list nodegroups API response. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/octavia-provider-3984ee3bf381ced1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added support for choosing Octavia provider driver by using 5 | ``octavia_provider`` tag. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/periodic-logs-use-uuid-65b257ab9c227494.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | other: 3 | - | 4 | When debug logging is enabled, periodic update logging will 5 | now output Cluster UUID instead of database ID to better 6 | identify the object being updated. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/podsecuritypolicy-2400063d73524e06.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | k8s_fedora_atomic_v1 Add PodSecurityPolicy for privileged pods. Use 5 | privileged PSP for calico and node-problem-detector. Add PSP for flannel 6 | from upstream. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/pre-delete-all-loadbalancers-350a69ec787e11ea.yaml: -------------------------------------------------------------------------------- 1 | features: 2 | - | 3 | Magnum now cascade deletes all the load balancers before deleting the 4 | cluster, not only including load balancers for the cluster services and 5 | ingresses, but also those for Kubernetes API/etcd endpoints. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/pre-delete-cluster-5e27cfdf45e25805.yaml: -------------------------------------------------------------------------------- 1 | features: 2 | - | 3 | Add Kubernetes cluster pre-delete support to remove the cloud resources 4 | before deleting the cluster. For now, only load balancers for Kubernetes 5 | services of LoadBalancer type are deleted. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/prometheus-adapter-15fba9d739676e70.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added custom.metrics.k8s.io API installer by means of stable/prometheus-adapter 5 | helm chart. The label prometheus_adapter_enabled (default: true) controls 6 | configuration. You can also use prometheus_adapter_chart_tag to select helm 7 | chart version, and prometheus_adapter_configmap if you would like to setup 8 | your own metrics (specifying this other than default overwrites default 9 | configurations). 10 | This feature requires the usage of label monitoring_enabled=true. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/prometheus-operator-compatible-with-k8s-1-16-f8be99cf527075b8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Bump up prometheus operator chart version to 8.2.2 so that it is compatible 5 | with k8s 1.16.x. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/quota-api-182cd1bc9e706b17.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - This release introduces 'quota' endpoint that enable admin 4 | users to set, update and show quota for a given tenant. 5 | A non-admin user can get self quota limits. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/remove-podsecuritypolicy-5851f4009f1a166c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | PodSecurityPolicy has been removed in Kubernetes v1.25 [1]. To allow Magnum 5 | to support Kubernetes v1.25 and above, PodSecurityPolicy Admission 6 | Controller has has been removed. 7 | 8 | This means that there is a behaviour change in Cluster Templates created 9 | after this change, where new Clusters with such Cluster Templates will not 10 | have PodSecurityPolicy. Please be aware of the subsequent impact on Helm 11 | Charts, etc. 12 | 13 | [1] https://kubernetes.io/docs/concepts/security/pod-security-policy/ 14 | -------------------------------------------------------------------------------- /releasenotes/notes/remove-send_cluster_metrics-2a09eba8627c7ceb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Remove period job send_cluster_metrics. This job has been deprecated since 5 | Rocky. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/rename-minion-to-node-9d32fe77d765f149.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | issues: 3 | - | 4 | Minion is not a good name for k8s worker node anymore, now it has been 5 | replaced in the fedora atomic driver with 'node' to align with the k8s 6 | terminologies. So the server name of a worker will be something like 7 | `k8s-1-lnveovyzpreg-node-0` instead of `k8s-1-lnveovyzpreg-worker-0`. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/resize-api-2bf1fb164484dea9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Now an OpenStack driver for Kubernetes Cluster Autoscaler is being 5 | proposed to support autoscaling when running k8s cluster on top of 6 | OpenStack. However, currently there is no way in Magnum to let 7 | the external consumer to control which node will be removed. The 8 | alternative option is calling Heat API directly but obviously it 9 | is not the best solution and it's confusing k8s community. So this 10 | new API is being added into Magnum: POST /actions/resize 11 | 12 | -------------------------------------------------------------------------------- /releasenotes/notes/return-clusterid-for-resize-upgrade-6e841c7b568fa807.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Now the resize and upgrade action of cluster will return cluster ID to be 5 | consistent with other actions of Magnum cluster. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/return-server-id-in-kubeminion-cb33f5141e0b7fa9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Return instance ID of workder node in k8s minion template so that 5 | consumer can send API request to Heat to remove a particular 6 | node with removal_policies. Otherwise, the consumer (e.g. AutoScaler) 7 | has to use index to do the remove which is confusing out of the 8 | OpenStack world. 9 | https://storyboard.openstack.org/#!/story/2005054 10 | 11 | 12 | -------------------------------------------------------------------------------- /releasenotes/notes/rollback-bay-on-update-failure-83e5ff8a7904d5c4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add Microversion 1.3 to support Magnum bay rollback, 4 | user can enable rollback on bay update failure by 5 | setting 'OpenStack-API-Version' to 'container-infra 1.3' 6 | in request header and passing 'rollback=True' param 7 | in bay update request. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/rotate-cluster-cert-9f84deb0adf9afb1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Add microversion 1.5 to support rotation of a cluster's CA 4 | certificate. This gives admins a way to restrict/deny access to 5 | an existing cluster once a user has been granted access. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/server-groups-for-both-master-and-workder-bdd491e4323955d4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Magnums onlys has one server group for all master and worker nodes 5 | per cluster, which is not very flexible for small cloud scale. For a 6 | 3+ master clusters, it's easily meeting the capacity when using hard 7 | anti-affinity policy. Now one server group is added for each 8 | master and worker nodes group to have better flexibility. 9 | 10 | -------------------------------------------------------------------------------- /releasenotes/notes/set-traefik-tag-7d4aca5685147970.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Added label traefik_ingress_controller_tag to enable specifying traefik container version. 5 | fixes: 6 | - | 7 | Traefik container now defaults to a fixed tag (v1.7.10) instead of tag 8 | (latest) 9 | -------------------------------------------------------------------------------- /releasenotes/notes/stats-api-68bc66147ac027e6.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - This release introduces 'stats' endpoint that provide the 4 | total number of clusters and the total number of nodes 5 | for the given tenant and also overall stats across all 6 | the tenants. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/story-2008548-65a571ad15451937.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixes an issue with cluster deletion if load balancers do not exist. See 5 | `story 2008548 ` for 6 | details. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/strip-ca-certificate-a09d0c31c45973df.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Strip signed certificate. Certificate (ca.crt) has to be striped 5 | for some application parsers as they might require pure base64 6 | representation of the certificate itself, without empty characters 7 | at the beginning nor the end of file. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/support-all-tenants-for-admin-a042f5c520d35837.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Now admin user can access all clusters across projects. 5 | 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support-auto-healing-3e07c16c55209b0a.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Using Node Problem Detector, Draino and AutoScaler to support 5 | auto healing for K8s cluster, user can use a new label 6 | "auto_healing_enabled' to turn on/off it. 7 | 8 | Meanwhile, a new label "auto_scaling_enabled" is also introduced 9 | to enable the capability to let the k8s cluster auto scale based 10 | its workload. 11 | 12 | -------------------------------------------------------------------------------- /releasenotes/notes/support-auto-healing-controller-333d1266918111e9.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - A new tag ``auto_healing_controller`` is introduced to allow the user to 4 | choose the auto-healing service when ``auto_healing_enabled`` is specified 5 | in the labels, ``draino`` and ``magnum-auto-healer`` are supported for now. 6 | Another label ``magnum_auto_healer_tag`` is also added to specify the 7 | ``magnum-auto-healer`` image tag. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/support-docker-storage-driver-for-fedora-coreos-697ffcc47e7e8359.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | issues: 3 | - | 4 | Now Fedora CoreOS driver can support using docker storage driver, 5 | only overlay2 is supported. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support-dockershim-removal-cad104d069f1a50b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Support K8s 1.24 which removed support of dockershim. Needs containerd as 5 | container runtime. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support-fedora-atomic-os-upgrade-9f47182b21c6c028.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Along with the kubernetes version upgrade support we just released, we're 5 | adding the support to upgrade the operating system of the k8s cluster 6 | (including master and worker nodes). It's an inplace upgrade leveraging the 7 | atomic/ostree upgrade capability. 8 | -------------------------------------------------------------------------------- /releasenotes/notes/support-multi-dns-server-0528be20f0e6aa62.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Support multi DNS server when creating template. 4 | User can use a comma delimited ipv4 address list 5 | to specify multi dns server, for example 6 | "8.8.8.8,114.114.114.114" -------------------------------------------------------------------------------- /releasenotes/notes/support-octavia-for-k8s-service-d5d7fd041f9d76fa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | In the OpenStack deployment with Octavia service enabled, the Octavia 5 | service should be used not only for master nodes high availability, but 6 | also for k8s LoadBalancer type service implementation as well. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/support-post-install-file-1fe7afe7698dd7b2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | A new config option `post_install_manifest_url` is added to support installing 5 | cloud provider/vendor specific manifest after booted the k8s cluster. 6 | It's an URL pointing to the manifest file. For example, cloud admin 7 | can set their specific storageclass into this file, then it will be 8 | automatically setup after created the cluster. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/support-rotate-ca-certs-913a6ef1b571733c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Kubernetes cluster owner can now do CA cert rotate to re-generate CA of 5 | the cluster, service account keys and the certs of all nodes will 6 | be regenerated as well. Cluster user needs to get a new kubeconfig 7 | to access kubernetes API. This function is only supported by 8 | Fedora CoreOS driver. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/support-selinux-mode-5bd2a3ece23a2caa.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add selinux_mode label. By default, selinux_mode=permissive with Fedora 5 | Atomic driver and selinux_mode=enforcing with Fedora CoreOS. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support-sha256-verification-for-hyperkube-fb2292c6a8bb00ba.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Now the Fedora CoreOS driver can support the sha256 verification for the 5 | hyperkube image when bootstraping the Kubernetes cluster. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support-updating-k8s-cluster-health-via-api-b8a3cac3031c50a5.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | The original design of k8s cluster health status is allowing 5 | the health status being updated by Magnum control plane. However, 6 | it doesn't work when the cluster is private. Now Magnum supports 7 | updating the k8s cluster health status via the Magnum cluster 8 | update API so that a controller (e.g. magnum-auto-healer) running 9 | inside the k8s cluster can call the Magnum update API to update 10 | the cluster health status. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/support-upgrade-on-behalf-of-user-c04994831360f8c1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Cloud admin user now can do rolling upgrade on behalf of end 5 | user so as to do urgent security patching when it's necessary. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/support_nodes_affinity_policy-22253fb9cf6739ec.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | issues: 3 | - | 4 | Enhancement to support anfinity policy for cluster nodes. Before this patch, 5 | There is no way to gurantee all nodes of a cluster created on different 6 | compute hosts to get high availbility. 7 | 8 | -------------------------------------------------------------------------------- /releasenotes/notes/swarm-integration-with-cinder-e3068138a3f75dbe.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Integrate Docker Swarm Fedora Atomic driver with the 4 | Block Storage Service (cinder). The rexray volume 5 | driver was added based on rexray v0.4. Users can 6 | create and attach volumes using docker's navive 7 | client and they will authenticate using the per 8 | cluster trustee user. Rexray can be either added 9 | in the Fedora Atomic image or can be used running 10 | in a container. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/swarm-live-restore-b03ad192367abced.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Fixed a bug where --live-restore was passed to Docker daemon causing the 5 | swarm init to fail. Magnum now ensures the --live-restore is not passed 6 | to the Docker daemon if it's default in an image. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/sync-service-account-keys-for-multi-masters-71217c4cf4dd472c.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Multi master deployments for k8s driver use different service account 5 | keys for each api/controller manager server which leads to 401 errors 6 | for service accounts. This patch will create a signed cert and private key 7 | for k8s service account keys explicitly, dedicatedly for the k8s 8 | cluster to avoid the inconsistent keys issue. 9 | 10 | -------------------------------------------------------------------------------- /releasenotes/notes/traefik-compatible-with-k8s-1-16-9a9ef6d3ccc92fb4.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Bump up traefik to 1.7.19 for compatibility with Kubernetes 1.16.x. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/update-certificate-api-policy-rules-027c80f2c9ff4598.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fixes: 3 | - | 4 | Remove checking cluster user from rules in default policy for 5 | Certificate APIs to reflect recent fixes 6 | (https://review.opendev.org/c/openstack/magnum/+/889144). 7 | -------------------------------------------------------------------------------- /releasenotes/notes/update-containerd-version-url-c095c0ee3c1a538b.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The default containerd version is updated with 1.4.3. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/update-flannel-version.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrades: 3 | - | 4 | Updates flannel to version 0.15.1. 5 | This will address issue where pods on multinode 6 | installations don't have network connectivity 7 | if they are spawned on different hosts. 8 | `More details `_ -------------------------------------------------------------------------------- /releasenotes/notes/update-kubernetes-dashboard-5196831c32d55aee.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Update kubernetes dashboard to `v1.8.3` which is compatible via kubectl 5 | proxy. Addionally, heapster is deployed as standalone deployemt and the 6 | user can enable a grafana-influx stack with the 7 | `influx_grafana_dashboard_enabled` label. See the kubernetes dashboard 8 | documenation for more details. https://github.com/kubernetes/dashboard/wiki 9 | -------------------------------------------------------------------------------- /releasenotes/notes/update-swarm-73d4340a881bff2f.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - Update Swarm default version to 1.2.5. 4 | It should be the last version since Docker people 5 | are now working on the new Swarm mode integrated in Docker. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/update-to-f27-cc8aa873cdf111bc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Update k8s_fedora_atomic driver to the latest Fedora Atomic 27 release 5 | and run etcd and flanneld in system containers which are removed from 6 | the base OS. 7 | upgrade: 8 | - | 9 | New clusters should be created with kube_tag=v1.9.3 or later. v1.9.3 is 10 | the default version in the queens release. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/update-traefik-min-tls-protocol-de7e36de90c1a2f3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Upgrade traefik version to v1.7.28 5 | security: 6 | - | 7 | Force traefik https port connections to use TLSv1.2 or greater 8 | -------------------------------------------------------------------------------- /releasenotes/notes/update_prometheus_monitoring-342a86f826be6579.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Add to prometheus federation exported metrics the cluster_uuid label. 5 | upgrade: 6 | - | 7 | Bumped prometheus-operator chart tag to 8.12.13. 8 | Added container_infra_prefix to missing prometheusOperator images. 9 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-api-975233ab93c0c092.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | A new API endpoint /actions/upgrade is added to support rolling 5 | upgrade the base OS of nodes and the version of Kubernetes. More details 6 | please refer the API Refreence document. 7 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-api-heat-removal-300f15d863515257.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | deprecations: 3 | - | 4 | Remove support for cluster upgrades with the Heat driver. 5 | The Heat driver can longer support cluster upgrades due to these being 6 | unreliable and untested. The action now returns an HTTP 500 error. 7 | A Cluster API driver provides a way forward for Magnum to support this 8 | api action again for Kubernetes. 9 | In the meantime blue/green deployments, where a replacement cluster is 10 | created, remain a viable alternative to cluster upgrades. 11 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-calico-6912a6f4fb5c21de.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The default Calico version has been upgraded from v3.3.6 to v3.13.1. 5 | Calico v3.3.6 is still a valid option. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-coredns-25f3879c3a658309.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | The default CoreDNS version has been upgraded to 1.6.6 and now 5 | it can be schedule to master nodes. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-etcd-and-use-quay-io-coreos-etcd-1cb8e38e974f5975.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Upgrade etcd to v3.4.6 and use quay.io/coreos/etcd since the tags on follow 5 | the same format as https://github.com/etcd-io/etcd/releases compared to 6 | k8s.gcr.io which modifies the canonical version tag. Users will need to pay 7 | attention to the format of etcd_tag, e.g. v3.4.5 is valid whereas 3.4.5 is 8 | not. Existing cluster templates and clusters which which use the latter will 9 | fail to complete. 10 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-flannel-db5ef049e23fc4a8.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | Upgrade flannel version to v0.12.0-amd64 for Fedora CoreOS driver. 5 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade-to-k8s-v1.11.1-8065fd768873295d.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | upgrade: 3 | - | 4 | New clusters will be created with kube_tag=v1.11.1 or later. v1.11.1 is 5 | the default version in the Rocky release. 6 | -------------------------------------------------------------------------------- /releasenotes/notes/upgrade_api-1fecc206e5b0ef99.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | features: 3 | - | 4 | Cluster upgrade API supports upgrading specific nodegroups 5 | in kubernetes clusters. If a user chooses a default nodegroup to 6 | be upgraded, then both of the default nodegroups will be upgraded 7 | since they are in one stack. For non-default nodegroups users are 8 | allowed to use only the cluster template already set in the 9 | cluster. This means that the cluster (default nodegroups) has to 10 | be upgraded on the first hand. For now, the only label that is 11 | taken into consideration during upgrades is the kube_tag. All 12 | other labels are ignored. 13 | -------------------------------------------------------------------------------- /releasenotes/notes/using-vxlan-for-flannel-backend-8d82a290ca97d6e2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | other: 3 | - | 4 | The default value of flannel_backend will be replaced with `vxlan` which 5 | was `udp` based on the recommendation at 6 | https://github.com/coreos/flannel/blob/master/Documentation/backends.md 7 | -------------------------------------------------------------------------------- /releasenotes/source/2023.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2023.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: unmaintained/2023.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/2023.2.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2023.2 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2023.2 7 | -------------------------------------------------------------------------------- /releasenotes/source/2024.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2024.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2024.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/2024.2.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2024.2 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2024.2 7 | -------------------------------------------------------------------------------- /releasenotes/source/2025.1.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | 2025.1 Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/2025.1 7 | -------------------------------------------------------------------------------- /releasenotes/source/_static/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/releasenotes/source/_static/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/_templates/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/magnum/ef1a554e91da1ec10f06eb00c861c3896ea8dada/releasenotes/source/_templates/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Magnum Release Notes documentation master file, created by 2 | sphinx-quickstart on Tue Mar 29 10:17:02 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Magnum Release Notes's documentation! 7 | ================================================ 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | unreleased 15 | 2025.1 16 | 2024.2 17 | 2024.1 18 | 2023.2 19 | 2023.1 20 | zed 21 | yoga 22 | xena 23 | wallaby 24 | victoria 25 | ussuri 26 | train 27 | stein 28 | rocky 29 | queens 30 | pike 31 | ocata 32 | newton 33 | mitaka 34 | liberty 35 | 36 | Indices and tables 37 | ================== 38 | 39 | * :ref:`genindex` 40 | * :ref:`search` 41 | -------------------------------------------------------------------------------- /releasenotes/source/liberty.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Liberty Series Release Notes 3 | ============================= 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/liberty 7 | -------------------------------------------------------------------------------- /releasenotes/source/mitaka.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Mitaka Series Release Notes 3 | ============================ 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/mitaka 7 | -------------------------------------------------------------------------------- /releasenotes/source/newton.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Newton Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/newton 7 | -------------------------------------------------------------------------------- /releasenotes/source/ocata.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Ocata Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: origin/stable/ocata 7 | -------------------------------------------------------------------------------- /releasenotes/source/pike.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Pike Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/pike 7 | -------------------------------------------------------------------------------- /releasenotes/source/queens.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Queens Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/queens 7 | -------------------------------------------------------------------------------- /releasenotes/source/rocky.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Rocky Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/rocky 7 | -------------------------------------------------------------------------------- /releasenotes/source/stein.rst: -------------------------------------------------------------------------------- 1 | =================================== 2 | Stein Series Release Notes 3 | =================================== 4 | 5 | .. release-notes:: 6 | :branch: stable/stein 7 | -------------------------------------------------------------------------------- /releasenotes/source/train.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Train Series Release Notes 3 | ========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/train 7 | -------------------------------------------------------------------------------- /releasenotes/source/unreleased.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Current Series Release Notes 3 | ============================ 4 | 5 | .. release-notes:: 6 | -------------------------------------------------------------------------------- /releasenotes/source/ussuri.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Ussuri Series Release Notes 3 | =========================== 4 | 5 | .. release-notes:: 6 | :branch: stable/ussuri 7 | -------------------------------------------------------------------------------- /releasenotes/source/victoria.rst: -------------------------------------------------------------------------------- 1 | ============================= 2 | Victoria Series Release Notes 3 | ============================= 4 | 5 | .. release-notes:: 6 | :branch: stable/victoria 7 | -------------------------------------------------------------------------------- /releasenotes/source/wallaby.rst: -------------------------------------------------------------------------------- 1 | ============================ 2 | Wallaby Series Release Notes 3 | ============================ 4 | 5 | .. release-notes:: 6 | :branch: stable/wallaby 7 | -------------------------------------------------------------------------------- /releasenotes/source/xena.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Xena Series Release Notes 3 | ========================= 4 | 5 | .. release-notes:: 6 | :branch: stable/xena 7 | -------------------------------------------------------------------------------- /releasenotes/source/yoga.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Yoga Series Release Notes 3 | ========================= 4 | 5 | .. release-notes:: 6 | :branch: unmaintained/yoga 7 | -------------------------------------------------------------------------------- /releasenotes/source/zed.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Zed Series Release Notes 3 | ======================== 4 | 5 | .. release-notes:: 6 | :branch: unmaintained/zed 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import setuptools 17 | 18 | setuptools.setup( 19 | setup_requires=['pbr>=2.0.0'], 20 | pbr=True) 21 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | bandit!=1.6.0,>=1.1.0 # Apache-2.0 2 | bashate>=2.0.0 # Apache-2.0 3 | coverage>=5.3 # Apache-2.0 4 | doc8>=0.8.1 # Apache-2.0 5 | fixtures>=3.0.0 # Apache-2.0/BSD 6 | hacking>=6.1.0,<6.2.0 # Apache-2.0 7 | oslotest>=4.4.1 # Apache-2.0 8 | osprofiler>=3.4.0 # Apache-2.0 9 | Pygments>=2.7.2 # BSD license 10 | python-subunit>=1.4.0 # Apache-2.0/BSD 11 | requests-mock>=1.2.0 # Apache-2.0 12 | testrepository>=0.0.20 # Apache-2.0/BSD 13 | stestr>=3.1.0 # Apache-2.0 14 | testscenarios>=0.4 # Apache-2.0/BSD 15 | testtools>=2.4.0 # MIT 16 | WebTest>=2.0.27 # MIT 17 | -------------------------------------------------------------------------------- /tools/flake8wrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # A simple wrapper around flake8 which makes it possible 4 | # to ask it to only verify files changed in the current 5 | # git HEAD patch. 6 | # 7 | # Intended to be invoked via tox: 8 | # 9 | # tox -epep8 -- -HEAD 10 | # 11 | 12 | if test "x$1" = "x-HEAD" ; then 13 | shift 14 | files=$(git diff --name-only HEAD~1 | tr '\n' ' ') 15 | echo "Running flake8 on ${files}" 16 | diff -u --from-file /dev/null ${files} | flake8 --max-complexity 10 --diff "$@" 17 | else 18 | echo "Running flake8 on all files" 19 | exec flake8 --max-complexity 10 "$@" 20 | fi 21 | --------------------------------------------------------------------------------