├── .coveragerc ├── .gitignore ├── .mailmap ├── .stestr.conf ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── HACKING.rst ├── LICENSE ├── README.rst ├── babel.cfg ├── devstack ├── README.rst ├── plugin.sh ├── sample-local.conf ├── settings └── sr_agent ├── doc ├── requirements.txt └── source │ ├── admin │ └── index.rst │ ├── cli │ └── index.rst │ ├── conf.py │ ├── configuration │ └── index.rst │ ├── contributor │ ├── contributing.rst │ └── index.rst │ ├── index.rst │ ├── install │ ├── common_configure.rst │ ├── common_prerequisites.rst │ ├── get_started.rst │ ├── index.rst │ ├── install-obs.rst │ ├── install-rdo.rst │ ├── install-ubuntu.rst │ ├── install.rst │ ├── next-steps.rst │ └── verify.rst │ ├── library │ └── index.rst │ ├── readme.rst │ ├── reference │ └── index.rst │ └── user │ └── index.rst ├── etc └── neutron │ └── policy.d │ └── srv6.conf ├── networking_sr ├── __init__.py ├── agent │ ├── __init__.py │ ├── interface.py │ ├── iptables_vrf_firewall.py │ └── rpc.py ├── cmd │ ├── __init__.py │ └── eventlet │ │ ├── __init__.py │ │ ├── sr_agent.py │ │ └── srgw_agent.py ├── common │ ├── __init__.py │ ├── config.py │ └── vrf_utils.py ├── db │ ├── __init__.py │ ├── alembic.ini │ ├── migration │ │ ├── __init__.py │ │ └── alembic_migrations │ │ │ ├── README │ │ │ ├── __init__.py │ │ │ ├── env.py │ │ │ ├── script.py.mako │ │ │ └── versions │ │ │ ├── CONTRACT_HEAD │ │ │ ├── EXPAND_HEAD │ │ │ ├── __init__.py │ │ │ └── train │ │ │ ├── contract │ │ │ └── 927a16680421_initial.py │ │ │ └── expand │ │ │ └── 4db8684b17e9_initial.py │ └── srv6_encap_net_db.py ├── extensions │ ├── __init__.py │ └── srv6_encap_network.py ├── ml2 │ ├── __init__.py │ ├── agent │ │ ├── __init__.py │ │ ├── dnsmasq_manager.py │ │ ├── sr_agent.py │ │ └── sr_agent_loop.py │ ├── mech_driver │ │ ├── __init__.py │ │ └── mech_sr.py │ ├── type_srv6.py │ └── type_srv6vrf.py ├── objects │ ├── __init__.py │ └── srv6_encap_network.py ├── services │ ├── __init__.py │ └── plugin.py └── tests │ ├── __init__.py │ ├── base.py │ └── test_networking_sr.py ├── releasenotes ├── notes │ └── .placeholder └── source │ ├── _static │ └── .placeholder │ ├── _templates │ └── .placeholder │ ├── conf.py │ ├── index.rst │ └── unreleased.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = networking_sr 4 | 5 | [report] 6 | ignore_errors = True 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Add patterns in here to exclude files created by tools integrated with this 2 | # repository, such as test frameworks from the project's recommended workflow, 3 | # rendered documentation and package builds. 4 | # 5 | # Don't add patterns to exclude files created by preferred personal tools 6 | # (editors, IDEs, your operating system itself even). These should instead be 7 | # maintained outside the repository, for example in a ~/.gitignore file added 8 | # with: 9 | # 10 | # git config --global core.excludesfile '~/.gitignore' 11 | 12 | # Bytecompiled Python 13 | *.py[cod] 14 | 15 | # C extensions 16 | *.so 17 | 18 | # Packages 19 | *.egg* 20 | *.egg-info 21 | dist 22 | build 23 | eggs 24 | parts 25 | bin 26 | var 27 | sdist 28 | develop-eggs 29 | .installed.cfg 30 | lib 31 | lib64 32 | 33 | # Installer logs 34 | pip-log.txt 35 | 36 | # Unit test / coverage reports 37 | cover/ 38 | .coverage* 39 | !.coveragerc 40 | .tox 41 | nosetests.xml 42 | .testrepository 43 | .stestr 44 | .venv 45 | 46 | # Translations 47 | *.mo 48 | 49 | # Complexity 50 | output/*.html 51 | output/*/index.html 52 | 53 | # Sphinx 54 | doc/build 55 | 56 | # pbr generates these 57 | AUTHORS 58 | ChangeLog 59 | 60 | # Files created by releasenotes build 61 | releasenotes/build 62 | 63 | # Emacs 64 | *~ -------------------------------------------------------------------------------- /.mailmap: -------------------------------------------------------------------------------- 1 | # Format is: 2 | # 3 | # 4 | -------------------------------------------------------------------------------- /.stestr.conf: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | test_path=./networking_sr/tests 3 | top_dir=./ 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic 23 | address, without explicit permission 24 | * Other conduct which could reasonably be considered inappropriate in a 25 | professional setting 26 | 27 | ## Our Responsibilities 28 | 29 | Project maintainers are responsible for clarifying the standards of acceptable 30 | behavior and are expected to take appropriate and fair corrective action in 31 | response to any instances of unacceptable behavior. 32 | 33 | Project maintainers have the right and responsibility to remove, edit, or 34 | reject comments, commits, code, wiki edits, issues, and other contributions 35 | that are not aligned to this Code of Conduct, or to ban temporarily or 36 | permanently any contributor for other behaviors that they deem inappropriate, 37 | threatening, offensive, or harmful. 38 | 39 | ## Scope 40 | 41 | This Code of Conduct applies both within project spaces and in public spaces 42 | when an individual is representing the project or its community. Examples of 43 | representing a project or community include using an official project e-mail 44 | address, posting via an official social media account, or acting as an appointed 45 | representative at an online or offline event. Representation of a project may be 46 | further defined and clarified by project maintainers. 47 | 48 | ## Enforcement 49 | 50 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 51 | reported by contacting the project team at dl_oss_dev@linecorp.com. All 52 | complaints will be reviewed and investigated and will result in a response that 53 | is deemed necessary and appropriate to the circumstances. The project team is 54 | obligated to maintain confidentiality with regard to the reporter of an incident. 55 | Further details of specific enforcement policies may be posted separately. 56 | 57 | Project maintainers who do not follow or enforce the Code of Conduct in good 58 | faith may face temporary or permanent repercussions as determined by other 59 | members of the project's leadership. 60 | 61 | ## Attribution 62 | 63 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 64 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 65 | 66 | [homepage]: https://www.contributor-covenant.org 67 | 68 | For answers to common questions about this code of conduct, see 69 | https://www.contributor-covenant.org/faq 70 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## How to contribute to networking-sr 2 | 3 | First of all, thank you so much for taking your time to contribute! We always welcome your ideas and feedback. Please feel free to make any pull requests. 4 | 5 | * File an issue in [the issue tracker](https://github.com/line/networking-sr/issues) to report bugs and propose new features and improvements. 6 | * Ask a question using [the issue tracker](https://github.com/line/networking-sr/issues). 7 | * Contribute your work by sending [a pull request](https://github.com/line/networking-sr/pulls). 8 | 9 | ### Contributor license agreement 10 | 11 | When you are sending a pull request and it's a non-trivial change beyond fixing typos, please sign 12 | [the ICLA (individual contributor license agreement)](https://cla-assistant.io/line/networking-sr). 13 | Please [contact us](mailto:dl_oss_dev@linecorp.com) if you need the CCLA (corporate contributor license agreement). 14 | 15 | ### Code of conduct 16 | 17 | We expect contributors to follow [our code of conduct](CODE_OF_CONDUCT.md). 18 | -------------------------------------------------------------------------------- /HACKING.rst: -------------------------------------------------------------------------------- 1 | networking-sr Style Commandments 2 | =============================================== 3 | 4 | Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | =============================== 2 | networking-sr 3 | =============================== 4 | 5 | Networking Segment Routing is neutron plugin to manage segment routing in openstack. 6 | 7 | The networking-sr provides SRv6(Segment Routing for IPv6) networks as neutron network resources. This project can integrate with OpenStack Neutron using ML2 mechanism of Neutron. Users can create VMs with own segment network(Tenant network) isolated from other segment networks. The VMs can access to other VMs with same segment network but they cannot access to other VMs with other segment network only. This achieves Multi-tenancy network for users and projects. 8 | 9 | ------------------------------- 10 | Features 11 | ------------------------------- 12 | The networking-sr project includes the following features. 13 | 14 | * ML2 mechanism driver, type driver, and ML2 agent 15 | * Service plugin for extension API to add SRv6 encap rule 16 | 17 | The networking-sr project currently doesn't have the following feature. 18 | 19 | * Gateway agent for network node 20 | 21 | Note: This project is PoC because we removed and changed many codes from our production code to support latest OpenStack version. And also we have some patches to Nova and Neutron for known issues of networking-sr but they are not included in this repository. 22 | 23 | ------------------------------- 24 | Known issues 25 | ------------------------------- 26 | * Use Kernel 5.XX and not support Kernel 4.XX 27 | * DHCP address isn't released because DHCP release packet isn't sent 28 | 29 | ------------------------------- 30 | Performance points 31 | ------------------------------- 32 | * NIC offload issues 33 | * SRH overheads 34 | * VRF lookup 35 | * linuxbridge between tap and VRF 36 | * iptables for security group 37 | 38 | ------------------------------- 39 | TODO 40 | ------------------------------- 41 | * Add gateway agent 42 | 43 | ------------------------------- 44 | Documentation 45 | ------------------------------- 46 | * https://speakerdeck.com/line_developers/line-data-center-networking-with-srv6 47 | 48 | ------------------------------- 49 | License 50 | ------------------------------- 51 | 52 | :: 53 | 54 | Copyright 2020 LINE Corporation 55 | 56 | Licensed under the Apache License, Version 2.0 (the "License"); 57 | you may not use this file except in compliance with the License. 58 | You may obtain a copy of the License at 59 | 60 | http://www.apache.org/licenses/LICENSE-2.0 61 | 62 | Unless required by applicable law or agreed to in writing, software 63 | distributed under the License is distributed on an "AS IS" BASIS, 64 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 65 | See the License for the specific language governing permissions and 66 | limitations under the License. 67 | 68 | See `LICENSE `_ for more details. 69 | -------------------------------------------------------------------------------- /babel.cfg: -------------------------------------------------------------------------------- 1 | [python: **.py] 2 | 3 | -------------------------------------------------------------------------------- /devstack/README.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Enabling in Devstack 3 | ====================== 4 | 5 | 1. Download `DevStack `_ 6 | 7 | 2. Copy the sample local.conf over:: 8 | 9 | $ cp devstack/sample-local.conf local.conf 10 | 11 | 3. Run stack.sh:: 12 | 13 | $ ./stack.sh 14 | -------------------------------------------------------------------------------- /devstack/plugin.sh: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | # Save trace setting 14 | _XTRACE_NEUTRON_SR=$(set +o | grep xtrace) 15 | set +o xtrace 16 | 17 | dir=${GITDIR['networking-sr']} 18 | 19 | source $dir/devstack/settings 20 | source $dir/devstack/sr_agent 21 | 22 | if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then 23 | # n-api-metadata uses port 80 24 | sudo sed -i 's/Listen 80/Listen 8000/' /etc/httpd/conf/httpd.conf 25 | # Give a capability to use port 80 26 | if [ "$NOVA_USE_MOD_WSGI" == "False" ]; then 27 | sudo setcap 'cap_net_bind_service=+ep' "$NOVA_BIN_DIR/nova-api-metadata" 28 | else 29 | sudo setcap 'cap_net_bind_service=+ep' "$NOVA_BIN_DIR/uwsgi" 30 | fi 31 | fi 32 | 33 | # Restore xtrace 34 | $_XTRACE_NEUTRON_SR 35 | -------------------------------------------------------------------------------- /devstack/sample-local.conf: -------------------------------------------------------------------------------- 1 | [[local|localrc]] 2 | HOST_IP=127.0.0.1 3 | MYSQL_PASSWORD=mysql 4 | RABBIT_PASSWORD=rabbitmq 5 | ADMIN_PASSWORD=secret 6 | SERVICE_PASSWORD=secret 7 | 8 | NEUTRON_CREATE_INITIAL_NETWORKS=False 9 | ENABLE_ISOLATED_METADATA=True 10 | ENABLE_METADATA_NETWORK=True 11 | 12 | NOVA_VNC_ENABLED=True 13 | VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP 14 | VNCSERVER_LISTEN=0.0.0.0 15 | 16 | enable_plugin networking-sr https://github.com/line/networking-sr.git master 17 | 18 | [[post-config|/$Q_PLUGIN_CONF_FILE]] 19 | [ml2] 20 | type_drivers=srv6,srv6lb 21 | tenant_network_types=srv6,srv6lb 22 | mechanism_drivers=sr 23 | -------------------------------------------------------------------------------- /devstack/settings: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | SEGMENT_NODE_ID=${SEGMENT_NODE_ID:-} 16 | SEGMENT_GW_ID=${SEGMENT_GW_ID:-} 17 | SRV6_INTERFACES=${SRV6_INTERFACES:-eth1} 18 | -------------------------------------------------------------------------------- /devstack/sr_agent: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | # Save trace setting 16 | _XTRACE_NEUTRON_SR=$(set +o | grep xtrace) 17 | set +o xtrace 18 | 19 | function neutron_plugin_create_nova_conf { 20 | : 21 | } 22 | 23 | function neutron_plugin_install_agent_packages { 24 | : 25 | } 26 | 27 | function is_neutron_ovs_base_plugin { 28 | return 1 29 | } 30 | 31 | function neutron_plugin_configure_debug_command { 32 | : 33 | } 34 | 35 | function neutron_plugin_configure_dhcp_agent { 36 | : 37 | } 38 | 39 | function neutron_plugin_configure_l3_agent { 40 | : 41 | } 42 | 43 | function neutron_plugin_configure_plugin_agent { 44 | 45 | AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-sr-agent" 46 | iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver iptables_vrf 47 | 48 | # Install networking-sr 49 | dir=${GITDIR['networking-sr']} 50 | setup_package $dir 51 | 52 | # Install policy config 53 | mkdir -p $NEUTRON_CONF_DIR/policy.d 54 | cp $dir/etc/neutron/policy.d/srv6.conf $NEUTRON_CONF_DIR/policy.d 55 | 56 | iniset /$Q_PLUGIN_CONF_FILE sr segment_node_id $SEGMENT_NODE_ID 57 | iniset /$Q_PLUGIN_CONF_FILE sr segment_gw_id $SEGMENT_GW_ID 58 | iniset /$Q_PLUGIN_CONF_FILE sr srv6_interfaces $SRV6_INTERFACES 59 | 60 | neutron_service_plugin_class_add sr 61 | } 62 | 63 | function neutron_plugin_setup_interface_driver { 64 | local conf_file=$1 65 | # TODO: Adds sr interface_driver 66 | iniset $conf_file DEFAULT interface_driver sr 67 | } 68 | 69 | 70 | #function has_neutron_plugin_security_group { 71 | # # 1 means False here 72 | # return 0 73 | #} 74 | 75 | function neutron_plugin_check_adv_test_requirements { 76 | is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 77 | } 78 | 79 | 80 | function has_neutron_plugin_security_group { 81 | return 1 82 | } 83 | 84 | # Restore xtrace 85 | $_XTRACE_NEUTRON_SR 86 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD 2 | openstackdocstheme>=1.18.1 # Apache-2.0 3 | # releasenotes 4 | reno>=2.5.0 # Apache-2.0 5 | -------------------------------------------------------------------------------- /doc/source/admin/index.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Administrators guide 3 | ==================== 4 | 5 | Administrators guide of networking-sr. 6 | -------------------------------------------------------------------------------- /doc/source/cli/index.rst: -------------------------------------------------------------------------------- 1 | ================================ 2 | Command line interface reference 3 | ================================ 4 | 5 | CLI reference of networking-sr. 6 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 11 | # implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import os 16 | import sys 17 | 18 | sys.path.insert(0, os.path.abspath('../..')) 19 | # -- General configuration ---------------------------------------------------- 20 | 21 | # Add any Sphinx extension module names here, as strings. They can be 22 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 23 | extensions = [ 24 | 'sphinx.ext.autodoc', 25 | 'openstackdocstheme', 26 | #'sphinx.ext.intersphinx', 27 | ] 28 | 29 | # autodoc generation is a bit aggressive and a nuisance when doing heavy 30 | # text edit cycles. 31 | # execute "export SPHINX_DEBUG=1" in your terminal to disable 32 | 33 | # The suffix of source filenames. 34 | source_suffix = '.rst' 35 | 36 | # The master toctree document. 37 | master_doc = 'index' 38 | 39 | # General information about the project. 40 | project = u'networking-sr' 41 | copyright = u'2017, OpenStack Developers' 42 | 43 | # openstackdocstheme options 44 | repository_name = 'openstack/networking-sr' 45 | bug_project = 'replace with the name of the project on Launchpad or the ID from Storyboard' 46 | bug_tag = '' 47 | html_last_updated_fmt = '%Y-%m-%d %H:%M' 48 | 49 | # If true, '()' will be appended to :func: etc. cross-reference text. 50 | add_function_parentheses = True 51 | 52 | # If true, the current module name will be prepended to all description 53 | # unit titles (such as .. function::). 54 | add_module_names = True 55 | 56 | # The name of the Pygments (syntax highlighting) style to use. 57 | pygments_style = 'sphinx' 58 | 59 | # -- Options for HTML output -------------------------------------------------- 60 | 61 | # The theme to use for HTML and HTML Help pages. Major themes that come with 62 | # Sphinx are currently 'default' and 'sphinxdoc'. 63 | # html_theme_path = ["."] 64 | # html_theme = '_theme' 65 | # html_static_path = ['static'] 66 | html_theme = 'openstackdocs' 67 | 68 | # Output file base name for HTML help builder. 69 | htmlhelp_basename = '%sdoc' % project 70 | 71 | # Grouping the document tree into LaTeX files. List of tuples 72 | # (source start file, target name, title, author, documentclass 73 | # [howto/manual]). 74 | latex_documents = [ 75 | ('index', 76 | '%s.tex' % project, 77 | u'%s Documentation' % project, 78 | u'OpenStack Developers', 'manual'), 79 | ] 80 | 81 | # Example configuration for intersphinx: refer to the Python standard library. 82 | #intersphinx_mapping = {'http://docs.python.org/': None} 83 | -------------------------------------------------------------------------------- /doc/source/configuration/index.rst: -------------------------------------------------------------------------------- 1 | ============= 2 | Configuration 3 | ============= 4 | 5 | Configuration of networking-sr. 6 | -------------------------------------------------------------------------------- /doc/source/contributor/contributing.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Contributing 3 | ============ 4 | .. include:: ../../../CONTRIBUTING.rst 5 | -------------------------------------------------------------------------------- /doc/source/contributor/index.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Contributor Documentation 3 | =========================== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | contributing 9 | 10 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. networking-sr documentation master file, created by 2 | sphinx-quickstart on Tue Jul 9 22:26:36 2013. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ============================================= 7 | Welcome to the documentation of networking_sr 8 | ============================================= 9 | 10 | Contents: 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | readme 16 | install/index 17 | library/index 18 | contributor/index 19 | configuration/index 20 | cli/index 21 | user/index 22 | admin/index 23 | reference/index 24 | 25 | Indices and tables 26 | ================== 27 | 28 | * :ref:`genindex` 29 | * :ref:`modindex` 30 | * :ref:`search` 31 | -------------------------------------------------------------------------------- /doc/source/install/common_configure.rst: -------------------------------------------------------------------------------- 1 | 2. Edit the ``/etc/networking_sr/networking_sr.conf`` file and complete the following 2 | actions: 3 | 4 | * In the ``[database]`` section, configure database access: 5 | 6 | .. code-block:: ini 7 | 8 | [database] 9 | ... 10 | connection = mysql+pymysql://networking_sr:NETWORKING_SR_DBPASS@controller/networking_sr 11 | -------------------------------------------------------------------------------- /doc/source/install/common_prerequisites.rst: -------------------------------------------------------------------------------- 1 | Prerequisites 2 | ------------- 3 | 4 | Before you install and configure the Networking Segment Routing service, 5 | you must create a database, service credentials, and API endpoints. 6 | 7 | #. To create the database, complete these steps: 8 | 9 | * Use the database access client to connect to the database 10 | server as the ``root`` user: 11 | 12 | .. code-block:: console 13 | 14 | $ mysql -u root -p 15 | 16 | * Create the ``networking_sr`` database: 17 | 18 | .. code-block:: none 19 | 20 | CREATE DATABASE networking_sr; 21 | 22 | * Grant proper access to the ``networking_sr`` database: 23 | 24 | .. code-block:: none 25 | 26 | GRANT ALL PRIVILEGES ON networking_sr.* TO 'networking_sr'@'localhost' \ 27 | IDENTIFIED BY 'NETWORKING_SR_DBPASS'; 28 | GRANT ALL PRIVILEGES ON networking_sr.* TO 'networking_sr'@'%' \ 29 | IDENTIFIED BY 'NETWORKING_SR_DBPASS'; 30 | 31 | Replace ``NETWORKING_SR_DBPASS`` with a suitable password. 32 | 33 | * Exit the database access client. 34 | 35 | .. code-block:: none 36 | 37 | exit; 38 | 39 | #. Source the ``admin`` credentials to gain access to 40 | admin-only CLI commands: 41 | 42 | .. code-block:: console 43 | 44 | $ . admin-openrc 45 | 46 | #. To create the service credentials, complete these steps: 47 | 48 | * Create the ``networking_sr`` user: 49 | 50 | .. code-block:: console 51 | 52 | $ openstack user create --domain default --password-prompt networking_sr 53 | 54 | * Add the ``admin`` role to the ``networking_sr`` user: 55 | 56 | .. code-block:: console 57 | 58 | $ openstack role add --project service --user networking_sr admin 59 | 60 | * Create the networking_sr service entities: 61 | 62 | .. code-block:: console 63 | 64 | $ openstack service create --name networking_sr --description "Networking Segment Routing" networking segment routing 65 | 66 | #. Create the Networking Segment Routing service API endpoints: 67 | 68 | .. code-block:: console 69 | 70 | $ openstack endpoint create --region RegionOne \ 71 | networking segment routing public http://controller:XXXX/vY/%\(tenant_id\)s 72 | $ openstack endpoint create --region RegionOne \ 73 | networking segment routing internal http://controller:XXXX/vY/%\(tenant_id\)s 74 | $ openstack endpoint create --region RegionOne \ 75 | networking segment routing admin http://controller:XXXX/vY/%\(tenant_id\)s 76 | -------------------------------------------------------------------------------- /doc/source/install/get_started.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | Networking Segment Routing service overview 3 | =========================================== 4 | The Networking Segment Routing service provides... 5 | 6 | The Networking Segment Routing service consists of the following components: 7 | 8 | ``networking_sr-api`` service 9 | Accepts and responds to end user compute API calls... 10 | -------------------------------------------------------------------------------- /doc/source/install/index.rst: -------------------------------------------------------------------------------- 1 | ===================================================== 2 | Networking Segment Routing service installation guide 3 | ===================================================== 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | get_started.rst 9 | install.rst 10 | verify.rst 11 | next-steps.rst 12 | 13 | The Networking Segment Routing service (networking_sr) provides... 14 | 15 | This chapter assumes a working setup of OpenStack following the 16 | `OpenStack Installation Tutorial 17 | `_. 18 | -------------------------------------------------------------------------------- /doc/source/install/install-obs.rst: -------------------------------------------------------------------------------- 1 | .. _install-obs: 2 | 3 | 4 | Install and configure for openSUSE and SUSE Linux Enterprise 5 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | This section describes how to install and configure the Networking Segment Routing service 8 | for openSUSE Leap 42.1 and SUSE Linux Enterprise Server 12 SP1. 9 | 10 | .. include:: common_prerequisites.rst 11 | 12 | Install and configure components 13 | -------------------------------- 14 | 15 | #. Install the packages: 16 | 17 | .. code-block:: console 18 | 19 | # zypper --quiet --non-interactive install 20 | 21 | .. include:: common_configure.rst 22 | 23 | 24 | Finalize installation 25 | --------------------- 26 | 27 | Start the Networking Segment Routing services and configure them to start when 28 | the system boots: 29 | 30 | .. code-block:: console 31 | 32 | # systemctl enable openstack-networking_sr-api.service 33 | 34 | # systemctl start openstack-networking_sr-api.service 35 | -------------------------------------------------------------------------------- /doc/source/install/install-rdo.rst: -------------------------------------------------------------------------------- 1 | .. _install-rdo: 2 | 3 | Install and configure for Red Hat Enterprise Linux and CentOS 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | 7 | This section describes how to install and configure the Networking Segment Routing service 8 | for Red Hat Enterprise Linux 7 and CentOS 7. 9 | 10 | .. include:: common_prerequisites.rst 11 | 12 | Install and configure components 13 | -------------------------------- 14 | 15 | #. Install the packages: 16 | 17 | .. code-block:: console 18 | 19 | # yum install 20 | 21 | .. include:: common_configure.rst 22 | 23 | Finalize installation 24 | --------------------- 25 | 26 | Start the Networking Segment Routing services and configure them to start when 27 | the system boots: 28 | 29 | .. code-block:: console 30 | 31 | # systemctl enable openstack-networking_sr-api.service 32 | 33 | # systemctl start openstack-networking_sr-api.service 34 | -------------------------------------------------------------------------------- /doc/source/install/install-ubuntu.rst: -------------------------------------------------------------------------------- 1 | .. _install-ubuntu: 2 | 3 | Install and configure for Ubuntu 4 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | This section describes how to install and configure the Networking Segment Routing 7 | service for Ubuntu 14.04 (LTS). 8 | 9 | .. include:: common_prerequisites.rst 10 | 11 | Install and configure components 12 | -------------------------------- 13 | 14 | #. Install the packages: 15 | 16 | .. code-block:: console 17 | 18 | # apt-get update 19 | 20 | # apt-get install 21 | 22 | .. include:: common_configure.rst 23 | 24 | Finalize installation 25 | --------------------- 26 | 27 | Restart the Networking Segment Routing services: 28 | 29 | .. code-block:: console 30 | 31 | # service openstack-networking_sr-api restart 32 | -------------------------------------------------------------------------------- /doc/source/install/install.rst: -------------------------------------------------------------------------------- 1 | .. _install: 2 | 3 | Install and configure 4 | ~~~~~~~~~~~~~~~~~~~~~ 5 | 6 | This section describes how to install and configure the 7 | Networking Segment Routing service, code-named networking_sr, on the controller node. 8 | 9 | This section assumes that you already have a working OpenStack 10 | environment with at least the following components installed: 11 | .. (add the appropriate services here and further notes) 12 | 13 | Note that installation and configuration vary by distribution. 14 | 15 | .. toctree:: 16 | :maxdepth: 2 17 | 18 | install-obs.rst 19 | install-rdo.rst 20 | install-ubuntu.rst 21 | -------------------------------------------------------------------------------- /doc/source/install/next-steps.rst: -------------------------------------------------------------------------------- 1 | .. _next-steps: 2 | 3 | Next steps 4 | ~~~~~~~~~~ 5 | 6 | Your OpenStack environment now includes the networking_sr service. 7 | 8 | To add additional services, see 9 | https://docs.openstack.org/project-install-guide/ocata/. 10 | -------------------------------------------------------------------------------- /doc/source/install/verify.rst: -------------------------------------------------------------------------------- 1 | .. _verify: 2 | 3 | Verify operation 4 | ~~~~~~~~~~~~~~~~ 5 | 6 | Verify operation of the Networking Segment Routing service. 7 | 8 | .. note:: 9 | 10 | Perform these commands on the controller node. 11 | 12 | #. Source the ``admin`` project credentials to gain access to 13 | admin-only CLI commands: 14 | 15 | .. code-block:: console 16 | 17 | $ . admin-openrc 18 | 19 | #. List service components to verify successful launch and registration 20 | of each process: 21 | 22 | .. code-block:: console 23 | 24 | $ openstack networking segment routing service list 25 | -------------------------------------------------------------------------------- /doc/source/library/index.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Usage 3 | ======== 4 | 5 | To use networking-sr in a project:: 6 | 7 | import networking_sr 8 | -------------------------------------------------------------------------------- /doc/source/readme.rst: -------------------------------------------------------------------------------- 1 | .. include:: ../../README.rst 2 | -------------------------------------------------------------------------------- /doc/source/reference/index.rst: -------------------------------------------------------------------------------- 1 | ========== 2 | References 3 | ========== 4 | 5 | References of networking-sr. 6 | -------------------------------------------------------------------------------- /doc/source/user/index.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Users guide 3 | =========== 4 | 5 | Users guide of networking-sr. 6 | -------------------------------------------------------------------------------- /etc/neutron/policy.d/srv6.conf: -------------------------------------------------------------------------------- 1 | { 2 | "admin_only": "rule:context_is_admin", 3 | 4 | "create_srv6_encap_network": "rule:admin_only", 5 | "get_srv6_encap_network": "rule:admin_or_owner", 6 | "update_srv6_encap_network": "rule:admin_or_owner", 7 | "delete_srv6_encap_network": "rule:admin_or_owner", 8 | } -------------------------------------------------------------------------------- /networking_sr/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | import pbr.version 14 | 15 | 16 | __version__ = pbr.version.VersionInfo( 17 | 'networking-sr').version_string() 18 | -------------------------------------------------------------------------------- /networking_sr/agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/agent/__init__.py -------------------------------------------------------------------------------- /networking_sr/agent/interface.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib import constants 14 | from oslo_log import log as logging 15 | 16 | from neutron.agent.linux import interface as n_interface 17 | from neutron.agent.linux import ip_lib 18 | 19 | LOG = logging.getLogger(__name__) 20 | 21 | 22 | class SrInterfaceDriver(n_interface.LinuxInterfaceDriver): 23 | 24 | DEV_NAME_PREFIX = 'ns-' 25 | 26 | def plug_new(self, network_id, port_id, device_name, mac_address, 27 | bridge=None, namespace=None, prefix=None, mtu=None): 28 | """Plugin the interface.""" 29 | ip = ip_lib.IPWrapper() 30 | 31 | # Enable agent to define the prefix 32 | tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, 33 | constants.TAP_DEVICE_PREFIX) 34 | # Create ns_veth in a namespace if one is configured. 35 | root_veth, ns_veth = ip.add_veth(tap_name, device_name, 36 | namespace2=namespace) 37 | root_veth.disable_ipv6() 38 | ns_veth.link.set_address(mac_address) 39 | 40 | if mtu: 41 | self.set_mtu(device_name, mtu, namespace=namespace, prefix=prefix) 42 | else: 43 | LOG.warning("No MTU configured for port %s", port_id) 44 | 45 | root_veth.link.set_up() 46 | ns_veth.link.set_up() 47 | 48 | def unplug(self, device_name, bridge=None, namespace=None, prefix=None): 49 | """Unplug the interface.""" 50 | device = ip_lib.IPDevice(device_name, namespace=namespace) 51 | try: 52 | device.link.delete() 53 | LOG.debug("Unplugged interface '%s'", device_name) 54 | except RuntimeError: 55 | LOG.error("Failed unplugging interface '%s'", 56 | device_name) 57 | 58 | def set_mtu(self, device_name, mtu, namespace=None, prefix=None): 59 | tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX, 60 | constants.TAP_DEVICE_PREFIX) 61 | root_dev, ns_dev = n_interface._get_veth( 62 | tap_name, device_name, namespace2=namespace) 63 | root_dev.link.set_mtu(mtu) 64 | ns_dev.link.set_mtu(mtu) 65 | -------------------------------------------------------------------------------- /networking_sr/agent/iptables_vrf_firewall.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 Line Corporation 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | from oslo_log import log as logging 17 | 18 | from neutron.agent.linux import iptables_firewall 19 | 20 | from networking_sr.common import config # noqa 21 | 22 | SG_CHAIN = 'sg-chain' 23 | LOG = logging.getLogger(__name__) 24 | 25 | 26 | class VrfBasedIptablesFirewallDriver(iptables_firewall.IptablesFirewallDriver): 27 | def prepare_port_filter(self, port): 28 | LOG.debug("Preparing device (%s) filter", port['device']) 29 | self._set_ports(port) 30 | # Accept communitation to DHCP server(VM -> DHCP Server) 31 | chain = 'INPUT' 32 | rule = '-p udp -m udp --sport 68 --dport 67 -j ACCEPT' 33 | self.iptables.ipv4['filter'].add_rule(chain, rule, wrap=True, top=True) 34 | # Accept packets from metadata server to VM 35 | chain = 'FORWARD' 36 | rule = '-s 169.254.169.254 -p tcp -m tcp --sport 80 -j ACCEPT' 37 | self.iptables.ipv4['filter'].add_rule(chain, rule, wrap=True, top=True) 38 | vrf = port["binding:profile"].get("vrf") 39 | if vrf: 40 | # Accept packets from VM to metadata server 41 | chain = 'INPUT' 42 | rule = ("-i %(vrf)s -d 169.254.169.254 -p tcp -m tcp --dport 80 " 43 | "-j ACCEPT" % {'vrf': vrf}) 44 | self.iptables.ipv4['filter'].add_rule(chain, rule, wrap=True, 45 | top=True) 46 | # Drop all connections against Hypervisor except for above 47 | # allowed port like metadata(169.254.169.254), dhcp... 48 | chain = 'INPUT' 49 | rule = '-i %(vrf)s -j DROP' % {'vrf': vrf} 50 | self.iptables.ipv4['filter'].add_rule(chain, rule, wrap=True) 51 | self._setup_chains() 52 | return self.iptables.apply() 53 | 54 | def _get_br_device_name(self, port): 55 | return "qbr%s" % port['device'][3:] 56 | -------------------------------------------------------------------------------- /networking_sr/agent/rpc.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib.agent import topics 14 | from neutron_lib import rpc as n_rpc 15 | from oslo_log import log as logging 16 | import oslo_messaging 17 | 18 | 19 | LOG = logging.getLogger(__name__) 20 | 21 | TOPICS_ENCAP = "encap" 22 | TOPICS_ENCAP_RULE = "encap_rule" 23 | TOPICS_VRF = "vrf" 24 | 25 | # TODO(hichihara): This RPC API will be replaced object RPC API 26 | 27 | 28 | class SrAgentApi(object): 29 | '''SR agent RPC API 30 | 31 | API version history: 32 | 1.0 - Initial version. 33 | 1.1 - Add encap rule update method 34 | ''' 35 | 36 | def __init__(self, topic): 37 | self.topic_encap_delete = topics.get_topic_name(topic, 38 | TOPICS_ENCAP, 39 | topics.DELETE) 40 | self.topic_encap_update = topics.get_topic_name(topic, 41 | TOPICS_ENCAP, 42 | topics.UPDATE) 43 | self.topic_encap_rule_update = topics.get_topic_name(topic, 44 | TOPICS_ENCAP_RULE, 45 | topics.UPDATE) 46 | self.topic_vrf_delete = topics.get_topic_name(topic, 47 | TOPICS_VRF, 48 | topics.DELETE) 49 | target = oslo_messaging.Target(topic=topic, version='1.1') 50 | self.client = n_rpc.get_client(target) 51 | 52 | def encap_delete(self, context, port): 53 | cctxt = self.client.prepare(topic=self.topic_encap_delete, 54 | fanout=True) 55 | cctxt.cast(context, 'encap_delete', port=port) 56 | 57 | def encap_update(self, context, port): 58 | cctxt = self.client.prepare(topic=self.topic_encap_update, 59 | fanout=True) 60 | cctxt.cast(context, 'encap_update', port=port) 61 | 62 | def encap_rule_update(self, context, encap_info): 63 | cctxt = self.client.prepare(topic=self.topic_encap_rule_update, 64 | fanout=True) 65 | cctxt.cast(context, 'encap_rule_update', encap_info=encap_info) 66 | 67 | def vrf_delete(self, context, vrf): 68 | cctxt = self.client.prepare(topic=self.topic_vrf_delete, 69 | fanout=True) 70 | cctxt.cast(context, 'vrf_delete', vrf=vrf) 71 | -------------------------------------------------------------------------------- /networking_sr/cmd/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/cmd/__init__.py -------------------------------------------------------------------------------- /networking_sr/cmd/eventlet/__init__.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron.common import eventlet_utils 14 | 15 | eventlet_utils.monkey_patch() 16 | -------------------------------------------------------------------------------- /networking_sr/cmd/eventlet/sr_agent.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from networking_sr.ml2.agent import sr_agent 14 | 15 | 16 | def main(): 17 | sr_agent.main() 18 | -------------------------------------------------------------------------------- /networking_sr/cmd/eventlet/srgw_agent.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from networking_sr.gw import srgw_agent 14 | 15 | 16 | def main(): 17 | srgw_agent.main() 18 | -------------------------------------------------------------------------------- /networking_sr/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/common/__init__.py -------------------------------------------------------------------------------- /networking_sr/common/config.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from oslo_config import cfg 14 | 15 | from neutron._i18n import _ 16 | 17 | SR_MODE_V6 = "srv6" 18 | 19 | sr_opts = [ 20 | cfg.StrOpt('sr_mode', default=SR_MODE_V6, 21 | help=_("Segment Routing mode.")), 22 | cfg.StrOpt('segment_node_id', 23 | help=_("Segment Node ID of host")), 24 | cfg.StrOpt('segment_gw_id', 25 | help=_("Segment Node ID of network nodes so that VMs can " 26 | "access out of SRv6 network. The SID is set as default " 27 | "route on VRF")), 28 | cfg.ListOpt('srv6_interfaces', default=[], 29 | help=_("Interfaces are set SRv6 rules. Agent sets " 30 | "rp_filter=0 to them")), 31 | ] 32 | 33 | 34 | cfg.CONF.register_opts(sr_opts, "sr") 35 | -------------------------------------------------------------------------------- /networking_sr/common/vrf_utils.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | 14 | from neutron_lib import constants 15 | from neutron_lib import context as lib_context 16 | from neutron_lib.db import api as db_api 17 | from neutron_lib import exceptions 18 | from neutron_lib.plugins import directory 19 | from oslo_log import log as logging 20 | from oslo_utils import uuidutils 21 | 22 | from neutron._i18n import _ 23 | 24 | from networking_sr.ml2 import type_srv6 25 | from networking_sr.ml2 import type_srv6vrf 26 | 27 | 28 | LOG = logging.getLogger(__name__) 29 | 30 | 31 | class VrfNetworkNotFound(exceptions.NotFound): 32 | message = _("VRF network could not be found.") 33 | 34 | 35 | class VrfNetworkSubnetNotFound(exceptions.NotFound): 36 | message = _("Subnet for VRF network could not be found.") 37 | 38 | 39 | class VrfPortNotFound(exceptions.NotFound): 40 | message = _("VRF port could not be found.") 41 | 42 | 43 | class VrfNetworkAlreadyExists(exceptions.Conflict): 44 | message = _("VRF network already exists.") 45 | 46 | 47 | def get_vrf_name(network_type, project_id, network_id): 48 | if network_type == type_srv6.SRV6: 49 | vrf = "vrf" + project_id[:6] + network_id[:6] 50 | elif network_type == type_srv6vrf.SRV6VRF: 51 | return 52 | else: 53 | LOG.error("Invalid network type: %s", network_type) 54 | return 55 | return vrf 56 | 57 | 58 | class VrfIpAllocation(object): 59 | def __init__(self): 60 | networks = self._get_vrf_network() 61 | if not networks: 62 | raise VrfNetworkNotFound 63 | self.vrf_network_id = networks[0]['id'] 64 | self.vrf_project_id = networks[0]['project_id'] 65 | 66 | def _get_vrf_network(self): 67 | plugin = directory.get_plugin() 68 | context = lib_context.get_admin_context() 69 | networks = plugin.get_networks( 70 | context, 71 | filters={"provider:network_type": [type_srv6vrf.SRV6VRF]}) 72 | return networks 73 | 74 | def create_vrf_ip(self, context, vrf_name): 75 | plugin = directory.get_plugin() 76 | port_db = {'port': {'name': vrf_name, 77 | 'tenant_id': self.vrf_project_id, 78 | 'device_owner': 'vrf', 79 | 'device_id': uuidutils.generate_uuid(), 80 | 'mac_address': constants.ATTR_NOT_SPECIFIED, 81 | 'admin_state_up': True, 82 | 'network_id': self.vrf_network_id, 83 | 'fixed_ips': constants.ATTR_NOT_SPECIFIED}} 84 | result = plugin.create_port_db(context.elevated(), port_db) 85 | if not result['fixed_ips']: 86 | raise VrfNetworkSubnetNotFound 87 | return result 88 | 89 | @db_api.retry_if_session_inactive() 90 | def _delete_port(self, context, plugin, port_id): 91 | with db_api.CONTEXT_WRITER.using(context): 92 | plugin.ipam.delete_port(context, port_id) 93 | 94 | def delete_vrf_ip(self, context, vrf): 95 | plugin = directory.get_plugin() 96 | vrf_ports = plugin.get_ports(context.elevated(), 97 | filters={'name': [vrf]}) 98 | if not vrf_ports: 99 | return 100 | else: 101 | vrf_port = vrf_ports[0] 102 | try: 103 | self._delete_port(context.elevated(), plugin, vrf_port['id']) 104 | except exceptions.PortNotFound: 105 | # Already the port was removed 106 | pass 107 | -------------------------------------------------------------------------------- /networking_sr/db/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/db/__init__.py -------------------------------------------------------------------------------- /networking_sr/db/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # path to migration scripts 5 | script_location = migrations 6 | 7 | # template used to generate migration files 8 | # file_template = %%(rev)s_%%(slug)s 9 | 10 | # timezone to use when rendering the date 11 | # within the migration file as well as the filename. 12 | # string value is passed to dateutil.tz.gettz() 13 | # leave blank for localtime 14 | # timezone = 15 | 16 | # max length of characters to apply to the 17 | # "slug" field 18 | #truncate_slug_length = 40 19 | 20 | # set to 'true' to run the environment during 21 | # the 'revision' command, regardless of autogenerate 22 | # revision_environment = false 23 | 24 | # set to 'true' to allow .pyc and .pyo files without 25 | # a source .py file to be detected as revisions in the 26 | # versions/ directory 27 | # sourceless = false 28 | 29 | # version location specification; this defaults 30 | # to migrations/versions. When using multiple version 31 | # directories, initial revisions must be specified with --version-path 32 | # version_locations = %(here)s/bar %(here)s/bat migrations/versions 33 | 34 | # the output encoding used when revision files 35 | # are written from script.py.mako 36 | # output_encoding = utf-8 37 | 38 | sqlalchemy.url = driver://user:pass@localhost/dbname 39 | 40 | 41 | # Logging configuration 42 | [loggers] 43 | keys = root,sqlalchemy,alembic 44 | 45 | [handlers] 46 | keys = console 47 | 48 | [formatters] 49 | keys = generic 50 | 51 | [logger_root] 52 | level = WARN 53 | handlers = console 54 | qualname = 55 | 56 | [logger_sqlalchemy] 57 | level = WARN 58 | handlers = 59 | qualname = sqlalchemy.engine 60 | 61 | [logger_alembic] 62 | level = INFO 63 | handlers = 64 | qualname = alembic 65 | 66 | [handler_console] 67 | class = StreamHandler 68 | args = (sys.stderr,) 69 | level = NOTSET 70 | formatter = generic 71 | 72 | [formatter_generic] 73 | format = %(levelname)-5.5s [%(name)s] %(message)s 74 | datefmt = %H:%M:%S 75 | -------------------------------------------------------------------------------- /networking_sr/db/migration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/db/migration/__init__.py -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/db/migration/alembic_migrations/__init__.py -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/env.py: -------------------------------------------------------------------------------- 1 | # Copyright 2012 New Dream Network, LLC (DreamHost) 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | from alembic import context 16 | from neutron_lib.db import model_base 17 | from oslo_config import cfg 18 | import sqlalchemy as sa 19 | from sqlalchemy import event 20 | 21 | from neutron.db.migration.alembic_migrations import external 22 | from neutron.db.migration import autogen 23 | from neutron.db.migration.connection import DBConnection 24 | 25 | MYSQL_ENGINE = None 26 | SR_VERSION_TABLE = "alembic_version_sr" 27 | config = context.config 28 | neutron_config = config.neutron_config 29 | target_metadata = model_base.BASEV2.metadata 30 | 31 | 32 | def set_mysql_engine(): 33 | try: 34 | mysql_engine = neutron_config.command.mysql_engine 35 | except cfg.NoSuchOptError: 36 | mysql_engine = None 37 | 38 | global MYSQL_ENGINE 39 | MYSQL_ENGINE = (mysql_engine or 40 | model_base.BASEV2.__table_args__['mysql_engine']) 41 | 42 | 43 | def include_object(object_, name, type_, reflected, compare_to): 44 | if type_ == 'table' and name in external.TABLES: 45 | return False 46 | elif type_ == 'index' and reflected and name.startswith("idx_autoinc_"): 47 | # skip indexes created by SQLAlchemy autoincrement=True 48 | # on composite PK integer columns 49 | return False 50 | else: 51 | return True 52 | 53 | 54 | def run_migrations_offline(): 55 | """Run migrations in 'offline' mode. 56 | 57 | This configures the context with either a URL 58 | or an Engine. 59 | 60 | Calls to context.execute() here emit the given string to the 61 | script output. 62 | 63 | """ 64 | set_mysql_engine() 65 | 66 | kwargs = dict() 67 | if neutron_config.database.connection: 68 | kwargs['url'] = neutron_config.database.connection 69 | else: 70 | kwargs['dialect_name'] = neutron_config.database.engine 71 | kwargs['include_object'] = include_object 72 | kwargs['version_table'] = SR_VERSION_TABLE 73 | context.configure(**kwargs) 74 | 75 | with context.begin_transaction(): 76 | context.run_migrations() 77 | 78 | 79 | @event.listens_for(sa.Table, 'after_parent_attach') 80 | def set_storage_engine(target, parent): 81 | if MYSQL_ENGINE: 82 | target.kwargs['mysql_engine'] = MYSQL_ENGINE 83 | 84 | 85 | def run_migrations_online(): 86 | """Run migrations in 'online' mode. 87 | 88 | In this scenario we need to create an Engine 89 | and associate a connection with the context. 90 | 91 | """ 92 | set_mysql_engine() 93 | connection = config.attributes.get('connection') 94 | with DBConnection(neutron_config.database.connection, connection) as conn: 95 | context.configure( 96 | connection=conn, 97 | target_metadata=target_metadata, 98 | include_object=include_object, 99 | process_revision_directives=autogen.process_revision_directives, 100 | version_table=SR_VERSION_TABLE 101 | ) 102 | with context.begin_transaction(): 103 | context.run_migrations() 104 | 105 | 106 | if context.is_offline_mode(): 107 | run_migrations_offline() 108 | else: 109 | run_migrations_online() 110 | -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | # 13 | 14 | from alembic import op 15 | import sqlalchemy as sa 16 | ${imports if imports else ""} 17 | 18 | """${message} 19 | 20 | Revision ID: ${up_revision} 21 | Revises: ${down_revision} 22 | Create Date: ${create_date} 23 | 24 | """ 25 | 26 | # revision identifiers, used by Alembic. 27 | revision = ${repr(up_revision)} 28 | down_revision = ${repr(down_revision)} 29 | % if branch_labels: 30 | branch_labels = ${repr(branch_labels)} 31 | % endif 32 | 33 | 34 | def upgrade(): 35 | ${upgrades if upgrades else "pass"} 36 | -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/versions/CONTRACT_HEAD: -------------------------------------------------------------------------------- 1 | 927a16680421 2 | -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/versions/EXPAND_HEAD: -------------------------------------------------------------------------------- 1 | 4db8684b17e9 2 | -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/versions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/db/migration/alembic_migrations/versions/__init__.py -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/versions/train/contract/927a16680421_initial.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 OpenStack Foundation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | 16 | from neutron.db.migration import cli 17 | 18 | """initial contract 19 | 20 | Revision ID: 927a16680421 21 | Revises: None 22 | Create Date: 2019-05-27 07:33:22.328335 23 | 24 | """ 25 | 26 | # revision identifiers, used by Alembic. 27 | revision = '927a16680421' 28 | down_revision = None 29 | branch_labels = (cli.CONTRACT_BRANCH,) 30 | 31 | 32 | def upgrade(): 33 | pass 34 | -------------------------------------------------------------------------------- /networking_sr/db/migration/alembic_migrations/versions/train/expand/4db8684b17e9_initial.py: -------------------------------------------------------------------------------- 1 | # Copyright 2019 OpenStack Foundation 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | 16 | from alembic import op 17 | import sqlalchemy as sa 18 | 19 | 20 | """Start networking-sr chain 21 | 22 | Revision ID: 4db8684b17e9 23 | Revises: None 24 | Create Date: 2019-05-27 05:05:36.644735 25 | 26 | """ 27 | 28 | # revision identifiers, used by Alembic. 29 | revision = '4db8684b17e9' 30 | down_revision = None 31 | 32 | 33 | def upgrade(): 34 | op.create_table('srv6encapnetwork', 35 | sa.Column('project_id', 36 | sa.String(length=255), 37 | nullable=True), 38 | sa.Column('id', 39 | sa.String(length=36), 40 | nullable=False), 41 | sa.Column('network_id', sa.String(36), 42 | sa.ForeignKey('networks.id', 43 | ondelete="CASCADE"), 44 | nullable=False), 45 | sa.PrimaryKeyConstraint('id')) 46 | 47 | op.create_table('srv6encaprule', 48 | sa.Column('srv6_encap_network_id', sa.String(36), 49 | sa.ForeignKey('srv6encapnetwork.id', 50 | ondelete="CASCADE"), 51 | nullable=False), 52 | sa.Column('destination', sa.String(length=255), 53 | nullable=False), 54 | sa.Column('nexthop', sa.String(length=255), 55 | nullable=False), 56 | sa.PrimaryKeyConstraint('srv6_encap_network_id', 57 | 'destination', 58 | 'nexthop')) 59 | -------------------------------------------------------------------------------- /networking_sr/db/srv6_encap_net_db.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib.db import api as db_api 14 | from neutron_lib.db import model_base 15 | from neutron_lib.db import model_query 16 | from neutron_lib.db import utils as db_utils 17 | from neutron_lib import exceptions as lib_exc 18 | from oslo_db import exception as db_exc 19 | from oslo_log import log as logging 20 | import sqlalchemy as sa 21 | from sqlalchemy import orm 22 | from sqlalchemy.orm import exc 23 | 24 | from neutron.db import models_v2 25 | 26 | 27 | LOG = logging.getLogger(__name__) 28 | 29 | 30 | class Srv6EncapNetwork(model_base.BASEV2, model_base.HasId, 31 | model_base.HasProject): 32 | __tablename__ = 'srv6encapnetwork' 33 | network_id = sa.Column(sa.String(36), 34 | sa.ForeignKey('networks.id', ondelete="CASCADE"), 35 | primary_key=True, 36 | nullable=False) 37 | network = orm.relationship( 38 | models_v2.Network, load_on_pending=True, 39 | backref=orm.backref("srv6_encap_networks", lazy='subquery', 40 | cascade='delete')) 41 | 42 | 43 | class Srv6EncapRule(model_base.BASEV2): 44 | __tablename__ = 'srv6encaprule' 45 | srv6_encap_network_id = sa.Column( 46 | sa.String(36), 47 | sa.ForeignKey('srv6encapnetwork.id', ondelete="CASCADE"), 48 | primary_key=True, 49 | nullable=False) 50 | destination = sa.Column(sa.String(255), nullable=False, primary_key=True) 51 | nexthop = sa.Column(sa.String(255), nullable=False, primary_key=True) 52 | srv6_encap_network = orm.relationship( 53 | Srv6EncapNetwork, 54 | backref=orm.backref("srv6_encap_rules", lazy='subquery', 55 | cascade='delete')) 56 | 57 | 58 | class DuplicateDestinationEntry(lib_exc.InvalidInput): 59 | message = _("Duplicate destination entry in request.") 60 | 61 | 62 | class SRv6EncapNetworkNotFound(lib_exc.NotFound): 63 | message = _("SRv6 encap network %(id)s doesn't exist.") 64 | 65 | 66 | class SRv6EncapNetworkDbMixin(object): 67 | 68 | def _validate_srv6_encap_rules(self, encap_rules): 69 | dests = [rule['destination'] for rule in encap_rules] 70 | if len(dests) != len(set(dests)): 71 | raise DuplicateDestinationEntry 72 | 73 | def _make_srv6_encap_network_dict(self, encap_net, encap_rules, 74 | fields=None): 75 | rules = [] 76 | for rule in encap_rules: 77 | rules.append({'destination': rule['destination'], 78 | 'nexthop': rule['nexthop']}) 79 | res = { 80 | 'id': encap_net['id'], 81 | 'project_id': encap_net['project_id'], 82 | 'network_id': encap_net['network_id'], 83 | 'encap_rules': rules} 84 | return db_utils.resource_fields(res, fields) 85 | 86 | def _get_srv6_encap_network(self, context, encap_net_id): 87 | try: 88 | return model_query.get_by_id(context, Srv6EncapNetwork, 89 | encap_net_id) 90 | except exc.NoResultFound: 91 | raise SRv6EncapNetworkNotFound(id=encap_net_id) 92 | 93 | def _get_srv6_encap_rule(self, context, encap_net_id): 94 | try: 95 | query = model_query.query_with_hooks(context, Srv6EncapRule) 96 | return query.filter( 97 | Srv6EncapRule.srv6_encap_network_id == encap_net_id).all() 98 | except exc.NoResultFound: 99 | # TODO(hichihara) 100 | pass 101 | 102 | def get_srv6_encap_networks(self, context, filters=None, 103 | fields=None, sorts=None, limit=None, 104 | marker=None, page_reverse=False): 105 | marker_obj = self.db_utils.get_marker_obj(self, context, 106 | 'srv6_encap_networks', 107 | limit, marker) 108 | encap_networks = model_query.get_collection_query( 109 | context, Srv6EncapNetwork, 110 | filters=filters, sorts=sorts, 111 | limit=limit, marker_obj=marker_obj, 112 | page_reverse=page_reverse) 113 | results = [] 114 | for encap_network in encap_networks: 115 | encap_rule_db = self._get_srv6_encap_rule(context, 116 | encap_network['id']) 117 | result = self._make_srv6_encap_network_dict(encap_network, 118 | encap_rule_db, 119 | fields=fields) 120 | results.append(result) 121 | return results 122 | 123 | def get_srv6_encap_network(self, context, encap_net_id, fields=None): 124 | encap_network_db = self._get_srv6_encap_network(context, encap_net_id) 125 | encap_rule_db = self._get_srv6_encap_rule(context, encap_net_id) 126 | return self._make_srv6_encap_network_dict(encap_network_db, 127 | encap_rule_db, fields) 128 | 129 | def create_srv6_encap_network(self, context, srv6_encap_network): 130 | encap_net = srv6_encap_network['srv6_encap_network'] 131 | self._validate_srv6_encap_rules(encap_net['encap_rules']) 132 | try: 133 | with db_api.CONTEXT_WRITER.using(context): 134 | encap_network_db = Srv6EncapNetwork( 135 | network_id=encap_net['network_id'], 136 | project_id=encap_net['project_id'], 137 | ) 138 | context.session.add(encap_network_db) 139 | except db_exc.DBDuplicateEntry: 140 | # TODO(hichihara) 141 | pass 142 | 143 | try: 144 | with db_api.CONTEXT_WRITER.using(context): 145 | for rule in encap_net['encap_rules']: 146 | encap_rule_db = Srv6EncapRule( 147 | srv6_encap_network_id=encap_network_db.id, 148 | destination=rule['destination'], 149 | nexthop=rule['nexthop'] 150 | ) 151 | context.session.add(encap_rule_db) 152 | except db_exc.DBDuplicateEntry: 153 | # TODO(hichihara) 154 | pass 155 | 156 | return self._make_srv6_encap_network_dict(encap_network_db, 157 | encap_net['encap_rules']) 158 | 159 | def update_srv6_encap_network(self, context, encap_net_id, 160 | srv6_encap_network): 161 | encap_net = srv6_encap_network['srv6_encap_network'] 162 | self._validate_srv6_encap_rules(encap_net['encap_rules']) 163 | with db_api.CONTEXT_WRITER.using(context): 164 | encap_network_db = self._get_srv6_encap_network(context, 165 | encap_net_id) 166 | encap_rules = self._get_srv6_encap_rule(context, encap_net_id) 167 | for rule in encap_rules: 168 | context.session.delete(rule) 169 | for rule in encap_net['encap_rules']: 170 | encap_rule_db = Srv6EncapRule( 171 | srv6_encap_network_id=encap_net_id, 172 | destination=rule['destination'], 173 | nexthop=rule['nexthop'] 174 | ) 175 | context.session.add(encap_rule_db) 176 | return self._make_srv6_encap_network_dict(encap_network_db, 177 | encap_net['encap_rules']) 178 | 179 | def delete_srv6_encap_network(self, context, encap_net_id): 180 | with db_api.CONTEXT_WRITER.using(context): 181 | encap_network_db = self._get_srv6_encap_network(context, 182 | encap_net_id) 183 | context.session.delete(encap_network_db) 184 | -------------------------------------------------------------------------------- /networking_sr/extensions/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/extensions/__init__.py -------------------------------------------------------------------------------- /networking_sr/extensions/srv6_encap_network.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib.api import extensions as api_extensions 14 | from neutron_lib import constants 15 | from neutron_lib.db import constants as db_const 16 | 17 | from neutron.api import extensions 18 | from neutron.api.v2 import resource_helper 19 | 20 | from networking_sr import extensions as sr_extensions 21 | from networking_sr.objects import srv6_encap_network as objects # noqa 22 | 23 | extensions.append_api_extensions_path(sr_extensions.__path__) 24 | 25 | ALIAS = "srv6-encap-network" 26 | 27 | EXTENDED_ATTRIBUTES_2_0 = { 28 | 'srv6_encap_networks': { 29 | 'id': {'allow_post': False, 30 | 'allow_put': False, 31 | 'is_visible': True, 32 | 'validate': {'type:uuid': None}, 33 | 'primary_key': True}, 34 | 'encap_rules': {'allow_post': True, 35 | 'allow_put': True, 36 | 'default': constants.ATTR_NOT_SPECIFIED, 37 | 'is_visible': True, 38 | 'enforce_policy': True}, 39 | 'project_id': {'allow_post': True, 'allow_put': False, 40 | 'required_by_policy': True, 41 | 'validate': { 42 | 'type:string': 43 | db_const.PROJECT_ID_FIELD_SIZE}, 44 | 'is_filter': True, 'is_sort_key': True, 45 | 'is_visible': True}, 46 | 'network_id': {'allow_post': True, 'allow_put': False, 47 | 'validate': {'type:uuid_or_none': None}, 48 | 'is_filter': True, 'is_sort_key': True, 49 | 'default': None, 'is_visible': True}, 50 | } 51 | } 52 | 53 | 54 | class Srv6_encap_network(api_extensions.ExtensionDescriptor): 55 | 56 | @classmethod 57 | def get_name(cls): 58 | return "Srv6 encap network" 59 | 60 | @classmethod 61 | def get_alias(cls): 62 | return ALIAS 63 | 64 | @classmethod 65 | def get_description(cls): 66 | return "Adds srv6 encap rules attribute to network resource." 67 | 68 | @classmethod 69 | def get_updated(cls): 70 | return "2019-05-27T10:00:00-00:00" 71 | 72 | def get_required_extensions(self): 73 | return [] 74 | 75 | @classmethod 76 | def get_resources(cls): 77 | plural_mappings = resource_helper.build_plural_mappings( 78 | {}, EXTENDED_ATTRIBUTES_2_0) 79 | return resource_helper.build_resource_info( 80 | plural_mappings, 81 | EXTENDED_ATTRIBUTES_2_0, 82 | ALIAS) 83 | 84 | def get_extended_resources(self, version): 85 | if version == "2.0": 86 | return EXTENDED_ATTRIBUTES_2_0 87 | else: 88 | return {} 89 | -------------------------------------------------------------------------------- /networking_sr/ml2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/ml2/__init__.py -------------------------------------------------------------------------------- /networking_sr/ml2/agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/ml2/agent/__init__.py -------------------------------------------------------------------------------- /networking_sr/ml2/agent/dnsmasq_manager.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 Line Corporation 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | import os 17 | import socket 18 | import struct 19 | 20 | from oslo_config import cfg 21 | from oslo_log import log as logging 22 | 23 | from neutron.agent.linux import external_process 24 | from neutron.conf.agent import dhcp as dhcp_config 25 | 26 | LOG = logging.getLogger(__name__) 27 | 28 | DNSMASQ_PROCESS_UUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa" 29 | DNSMASQ_HOST_DIR = "dhcp-hosts.d" 30 | DNSMASQ_OPTS_DIR = "dhcp-opts.d" 31 | DNSMASQ_LEASE_DIR = "dhcp-lease" 32 | DNSMASQ_PID_DIR = "dnsmasq-pids" 33 | DNSMASQ_TAG_LEN = 8 34 | DHCP_DEFAULT_ROOT = "0.0.0.0/0,%(gateway_ip)s" 35 | 36 | SUBNET_DEDICATED_DHCP = 'subnet-dedicated-dhcp' 37 | 38 | # NOTE: Use same configuration as neutron-dhcp-agent 39 | # But in this agent, we only use dnsmasq_dns_servers 40 | # which will be used as a default upstream dns server 41 | # If we specify dns_servers in subnet, these dns servers 42 | # will be used intead of cfg.CONF.dnsmasq_dns_servers 43 | cfg.CONF.register_opts(dhcp_config.DNSMASQ_OPTS) 44 | 45 | 46 | class DnsmasqManager(object): 47 | 48 | @classmethod 49 | def _get_config_dir(cls, directory_name): 50 | """Get a path of dnsmasq config directory 51 | 52 | Args: 53 | directory_name(String): directory name 54 | Return: 55 | path(String): absolute path to directory_name 56 | """ 57 | return os.path.join(cfg.CONF.state_path, directory_name) 58 | 59 | @classmethod 60 | def get_host_entries(cls, re_raise=True): 61 | """Get all dhcp host entry names(device name) 62 | 63 | Args: 64 | re_raise(Bool): whether if Exception should be propagated or not 65 | Return: 66 | host_entries(list): existing host entry names(device name) 67 | """ 68 | host_entries = [] 69 | try: 70 | host_entries = os.listdir(cls._get_config_dir(DNSMASQ_HOST_DIR)) 71 | except Exception as e: 72 | LOG.warning("Failed to listdir %s: %s", 73 | cls._get_config_dir(DNSMASQ_HOST_DIR), e) 74 | if re_raise: 75 | raise 76 | return host_entries 77 | 78 | @classmethod 79 | def sync_host_entries(cls, get_interface_func, add_missing_entry_func): 80 | """This method try to ensure following about dhcp host entry 81 | 82 | * We have all dhcp host entry for existing tap device 83 | -> Add tap device into list that contains device being 84 | re-configured in next iteration 85 | * We don't have orphan dhcp host entry which is no longer used 86 | 87 | Args: 88 | get_interface_func(function): function to get existing device 89 | add_missing_entry_func(function): function to add device into 90 | missing_entry list 91 | Return: 92 | None 93 | Raise: 94 | OSError: if we failed to load existing host entries to prevent 95 | mis-behaviour (could be possible to recognise all 96 | host entries as missing) 97 | """ 98 | LOG.debug("Start sync_host_entries") 99 | host_entries = cls.get_host_entries() 100 | host_entries = set(host_entries) 101 | devices = get_interface_func() 102 | devices = set(devices) 103 | 104 | orphan_entries = host_entries - devices 105 | missing_entries = devices - host_entries 106 | 107 | for oe in orphan_entries: 108 | LOG.info("Found orhpan entry and Delete %s", oe) 109 | cls.delete_fixedip_entry(oe) 110 | 111 | for me in missing_entries: 112 | LOG.warning("Found missing dhcp host entry for %s", me) 113 | add_missing_entry_func(me) 114 | LOG.debug("Finish sync_host_entries") 115 | 116 | @classmethod 117 | def initialize(cls, monitor): 118 | """This method should be called before any other method being called. 119 | 120 | Ensure to have required directory and spawn dnsmasq for all subnets 121 | Args: 122 | None 123 | Return: 124 | None 125 | """ 126 | for config_dir_name in (DNSMASQ_OPTS_DIR, 127 | DNSMASQ_HOST_DIR, 128 | DNSMASQ_LEASE_DIR): 129 | config_dir = cls._get_config_dir(config_dir_name) 130 | if not os.path.exists(config_dir): 131 | LOG.info("Create %s directory" % config_dir) 132 | os.makedirs(config_dir) 133 | 134 | # FIXME(Yuki Nishiwaki) 135 | # This is the temporally workaround for older dnsmasq than 2.7.9 136 | # See more detail in def get_dnsmasq_cmd methods 137 | for dummy_file in ("dummy_hosts", "dummy_opts"): 138 | dummy_file_path = os.path.join(cfg.CONF.state_path, dummy_file) 139 | if not os.path.exists(dummy_file_path): 140 | f = open(dummy_file_path, 'w') 141 | f.close() 142 | 143 | callback = cls.get_dnsmasq_cmd(DNSMASQ_PROCESS_UUID) 144 | pm = cls.get_process_manager(DNSMASQ_PROCESS_UUID, cfg.CONF, callback) 145 | pm.enable() 146 | monitor.register(DNSMASQ_PROCESS_UUID, SUBNET_DEDICATED_DHCP, pm) 147 | 148 | @classmethod 149 | def get_tag_name(cls, subnet_id): 150 | """Return tag name 151 | 152 | Args: 153 | subnet_id(String): subnet uuid 154 | Return: 155 | subnet_tag(String): first DNSMASQ_TAG_LEN character of subnet_id 156 | """ 157 | return subnet_id[:DNSMASQ_TAG_LEN] 158 | 159 | @classmethod 160 | def _transform_cidr_notation_to_netmask(cls, cidr_notation): 161 | """Return netmask transformed by cidr_notation 162 | 163 | Args: 164 | cidr_notation(String): network cidr notation 165 | Return: 166 | netmask(String): netmask 167 | """ 168 | host_bits = 32 - int(cidr_notation) 169 | netmask = socket.inet_ntoa( 170 | struct.pack('!I', (1 << 32) - (1 << host_bits))) 171 | return netmask 172 | 173 | @classmethod 174 | def ensure_dhcp_opts(cls, subnet_id, defaultgw, static_routes, 175 | cidr_notation, nameservers): 176 | """Ensure we have dhcp_opts file with passed configuration 177 | 178 | Args: 179 | subnet_id(String): subnet uuid 180 | defaultgw(String): ip address without cider notation 181 | like 192.168.0.1 182 | static_routes(list): [",",] 183 | cidr_notation(String): network cidr notation i.e. 24 or 16... 184 | nameservers(list): ["", ] 185 | Return: 186 | succeed_flg(Bool): True if succeed in ensuring dhcp option for 187 | subnet 188 | """ 189 | # NOTE: If subnet doesn't specify nameservers, 190 | # we use dnsmasq_dns_servers instead 191 | if len(nameservers) == 0: 192 | nameservers = cfg.CONF.dnsmasq_dns_servers 193 | target_opts_path = os.path.join( 194 | cls._get_config_dir(DNSMASQ_OPTS_DIR), subnet_id) 195 | 196 | netmask = cls._transform_cidr_notation_to_netmask(cidr_notation) 197 | routes = [DHCP_DEFAULT_ROOT % {"gateway_ip": defaultgw}, ] 198 | routes += static_routes 199 | opt_pre = "tag:%s," % cls.get_tag_name(subnet_id) 200 | try: 201 | LOG.info("Try to create %s", target_opts_path) 202 | with open(target_opts_path, 'w') as f: 203 | f.write(opt_pre + 204 | "option:router," + defaultgw + "\n") 205 | f.write(opt_pre + 206 | "249," + ",".join(routes) + "\n") 207 | f.write( 208 | opt_pre + 209 | "option:classless-static-route," + ",".join(routes) + "\n") 210 | f.write(opt_pre + 211 | "option:netmask," + netmask + "\n") 212 | if nameservers: 213 | f.write(opt_pre + 214 | "option:dns-server," + 215 | ",".join(nameservers) + "\n") 216 | except Exception as e: 217 | LOG.error("Can not create dhcp opts %s: %s", 218 | target_opts_path, e) 219 | return False 220 | return True 221 | 222 | @classmethod 223 | def delete_fixedip_entry(cls, device_name): 224 | """Delete host entry named as device_name 225 | 226 | And also this sends SIGHUP to dnsmasq to reload config 227 | 228 | Args: 229 | device_name(String): dhcp host entry name 230 | Return: 231 | None 232 | """ 233 | try: 234 | entry = os.path.join(cls._get_config_dir(DNSMASQ_HOST_DIR), 235 | device_name) 236 | os.remove(entry) 237 | pm = cls.get_process_manager(DNSMASQ_PROCESS_UUID, cfg.CONF) 238 | pm.reload_cfg() 239 | except Exception as e: 240 | # Even If we failed to delete dhcp entry, 241 | # that old dhcp entry are not harmful immediately 242 | # So we ignore that after let operator know 243 | LOG.warning("Can not delete %s: %s", entry, e) 244 | 245 | @classmethod 246 | def add_fixedip_entry(cls, subnet_id, device_name, macaddr, ipaddr): 247 | """Added host entry with passed configuration 248 | 249 | Args: 250 | subnet_id(String): subnet uuid 251 | device_name(String): device name being used for dhcp host entry 252 | name 253 | macaddr(String): target mac address 254 | ipaddr(String): the ip address being issued against macaddr 255 | Return: 256 | succeed_flg(Bool): True if it succeed in creating host entry 257 | """ 258 | target_host_path = os.path.join( 259 | cls._get_config_dir(DNSMASQ_HOST_DIR), device_name) 260 | try: 261 | LOG.info("Try to create %s", target_host_path) 262 | with open(target_host_path, 'w') as f: 263 | f.write("%s,%s,set:%s\n" % 264 | (macaddr, ipaddr, 265 | subnet_id[:DNSMASQ_TAG_LEN])) 266 | except Exception as e: 267 | LOG.error("Can not create host entry %s: %s", 268 | target_host_path, e) 269 | return False 270 | return True 271 | 272 | @classmethod 273 | def get_dnsmasq_cmd(cls, uuid): 274 | """Get dnsmasq command being executed 275 | 276 | Args: 277 | uuid(String): uuid to identify dnsmasq process 278 | Return: 279 | callback(function): function to return dnsmasq command with 280 | passed argument 281 | """ 282 | def callback(pid_file): 283 | # TODO(Yuki Nishiwaki) Add --except-interface physical interface 284 | lease_file = os.path.join( 285 | cls._get_config_dir(DNSMASQ_LEASE_DIR), uuid) 286 | 287 | if cfg.CONF.dhcp_lease_duration == -1: 288 | lease = 'infinite' 289 | else: 290 | lease = '%ss' % cfg.CONF.dhcp_lease_duration 291 | 292 | dnsmasq_cmd = [ 293 | 'dnsmasq', '--pid-file=%s' % pid_file, 294 | '--dhcp-optsdir=%s' % cls._get_config_dir( 295 | DNSMASQ_OPTS_DIR), 296 | '--dhcp-hostsdir=%s' % cls._get_config_dir( 297 | DNSMASQ_HOST_DIR), 298 | '--dhcp-leasefile=%s' % lease_file, 299 | '--bind-dynamic', '--port=0', 300 | '--domain=%s' % cfg.CONF.dns_domain, 301 | '--dhcp-range=0.0.0.0,static,128.0.0.0,%s' % lease, 302 | '--dhcp-range=128.0.0.0,static,128.0.0.0,%s' % lease] 303 | # FIXME(Yuki Nishiwaki) 304 | # The older dnsmasq than 2.7.9 have the bug not clearing existing 305 | # dhcp-hosts, dhcp-opts config and just re-load config when 306 | # it got SIGHUP, but if we passed --dhcp-hostsfile, --dhcp-optsfile 307 | # option, dnmsasq correclty clear exisiting config and re-load. 308 | dnsmasq_cmd += [ 309 | '--dhcp-hostsfile=%s' % os.path.join( 310 | cfg.CONF.state_path, 'dummy_hosts'), 311 | '--dhcp-optsfile=%s' % os.path.join( 312 | cfg.CONF.state_path, 'dummy_opts')] 313 | return dnsmasq_cmd 314 | return callback 315 | 316 | @classmethod 317 | def get_process_manager(cls, uuid, conf, callback=None): 318 | """Get process manager for specific command 319 | 320 | Args: 321 | uuid(String): subnet uuid 322 | conf: cfg.CONF 323 | callback: function to return command list 324 | Return: 325 | pm(neutron.agent.linux.external_process.ProcessManager): 326 | """ 327 | return external_process.ProcessManager( 328 | conf=conf, uuid=uuid, 329 | pid_file=cls._get_config_dir(DNSMASQ_PID_DIR) + "/" + uuid, 330 | default_cmd_callback=callback, 331 | run_as_root=True) 332 | -------------------------------------------------------------------------------- /networking_sr/ml2/agent/sr_agent.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | import netaddr 14 | import os 15 | import sys 16 | 17 | from neutron_lib.agent import topics 18 | from neutron_lib import constants 19 | from neutron_lib import exceptions 20 | from oslo_config import cfg 21 | from oslo_log import log as logging 22 | import oslo_messaging 23 | from oslo_service import service 24 | from oslo_utils import excutils 25 | import pyroute2 26 | from pyroute2.config.eventlet import eventlet_config 27 | 28 | from neutron.agent.common import utils 29 | from neutron.agent.linux import bridge_lib 30 | from neutron.agent.linux import external_process 31 | from neutron.agent.linux import ip_lib 32 | from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc 33 | from neutron.common import config as common_config 34 | from neutron.common import profiler as setup_profiler 35 | from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb 36 | from neutron.privileged.agent.linux import ip_lib as privileged 37 | 38 | from networking_sr.agent import rpc as sr_rpc 39 | from networking_sr.common import config # noqa 40 | from networking_sr.ml2.agent import sr_agent_loop 41 | 42 | 43 | eventlet_config() 44 | 45 | LOG = logging.getLogger(__name__) 46 | 47 | SR_AGENT_BINARY = 'neutron-sr-agent' 48 | AGENT_TYPE_SR = 'SR agent' 49 | EXTENSION_DRIVER_TYPE = 'sr' 50 | INTERFACE_FS = "/sys/class/net/" 51 | RESOURCE_ID_LENGTH = 11 52 | VRF_TABLE_NAMBER_BASE = 1000 53 | 54 | 55 | class SysctlCommandError(exceptions.NeutronException): 56 | message = "Sysctl command %(cmd)s failed." 57 | 58 | 59 | class SrManager(amb.CommonAgentManagerBase): 60 | 61 | def __init__(self): 62 | super(SrManager, self).__init__() 63 | self.process_monitor = external_process.ProcessMonitor( 64 | cfg.CONF, resource_type="sr-agent") 65 | 66 | self.node_id = cfg.CONF.sr.segment_node_id 67 | if not self.node_id: 68 | LOG.error("Segment Node ID is not set in config.") 69 | sys.exit(1) 70 | 71 | self.gw_id = cfg.CONF.sr.segment_gw_id 72 | 73 | self._setup_system() 74 | self._setup_ipv6() 75 | 76 | # vrf_tables = {"vrf name": vrf_table_id} 77 | self.vrf_tables = {} 78 | 79 | # Check existing vrf 80 | # TODO(hichihara): Refactor the following codes 81 | # Exteded privileged ip_lib should be created 82 | with pyroute2.IPDB() as ipdb: 83 | interfaces = ipdb.by_name.keys() 84 | 85 | vrfs = [] 86 | for i in interfaces: 87 | if i[:3] == "vrf": 88 | vrfs.append(i) 89 | 90 | with pyroute2.IPRoute() as ip: 91 | for vrf in vrfs: 92 | try: 93 | vrf_id = ip.link_lookup(ifname=vrf)[0] 94 | except IndexError: 95 | privileged.NetworkInterfaceNotFound(device=vrf, 96 | namespace=None) 97 | 98 | link = ip.link("get", index=vrf_id)[0] 99 | linkinfo = self._nlattr_get(link['attrs'], 'IFLA_LINKINFO') 100 | if not linkinfo: 101 | LOG.error("Failed to cannot found attr " 102 | "IFLA_LINKINFO from vrf interface") 103 | sys.exit(1) 104 | info_data = self._nlattr_get(linkinfo["attrs"], 105 | "IFLA_INFO_DATA") 106 | if not info_data: 107 | LOG.error("Failed to cannot found attr " 108 | "IFLA_INFO_DATA from vrf interface") 109 | sys.exit(1) 110 | vrf_table = self._nlattr_get(info_data["attrs"], 111 | "IFLA_VRF_TABLE") 112 | if not vrf_table: 113 | LOG.error("Failed to cannot found attr " 114 | "IFLA_VRF_TABLE from vrf interface") 115 | sys.exit(1) 116 | 117 | self.vrf_tables[vrf] = vrf_table 118 | LOG.debug("Found existing vrf %(vrf)s with table id " 119 | "%(table_id)d", {"vrf": vrf, "table_id": vrf_table}) 120 | 121 | # TODO(hichihara): Replace this to a way which actually gets 122 | # current rules 123 | self.encap_info = [] 124 | 125 | def _nlattr_get(self, attrs, key): 126 | # Search by key from attrs, if not found, return None 127 | for attr in attrs: 128 | if attr[0] == key: 129 | return attr[1] 130 | return None 131 | 132 | def _setup_system(self): 133 | # Make sure to allow ip forward 134 | cmd = ['net.ipv4.ip_forward=1'] 135 | result = ip_lib.sysctl(cmd) 136 | if result == 1: 137 | LOG.error("Failed to enable net.ipv4.ip_forward=1.") 138 | sys.exit(1) 139 | # Make sure to allow tcp packet to pass though default vrf 140 | cmd = ['net.ipv4.tcp_l3mdev_accept=1'] 141 | result = ip_lib.sysctl(cmd) 142 | if result == 1: 143 | LOG.error("Failed to enable net.ipv4.tcp_l3mdev_accept=1.") 144 | sys.exit(1) 145 | # Make sure to allow udp packet to pass though default vrf 146 | cmd = ['net.ipv4.udp_l3mdev_accept=1'] 147 | result = ip_lib.sysctl(cmd) 148 | if result == 1: 149 | LOG.error("Failed to enable net.ipv4.udp_l3mdev_accept=1.") 150 | sys.exit(1) 151 | cmd = ['net.ipv6.conf.all.seg6_enabled=1'] 152 | result = ip_lib.sysctl(cmd) 153 | if result == 1: 154 | LOG.error("Failed to enable net.ipv6.conf.all.seg6_enabled=1.") 155 | sys.exit(1) 156 | cmd = ['net.ipv6.conf.all.forwarding=1'] 157 | result = ip_lib.sysctl(cmd) 158 | if result == 1: 159 | LOG.error("Failed to enable net.ipv6.conf.all.forwarding=1.") 160 | sys.exit(1) 161 | cmd = ['net.ipv4.conf.all.rp_filter=0'] 162 | result = ip_lib.sysctl(cmd) 163 | if result == 1: 164 | LOG.error("Failed to enable net.ipv4.conf.all.rp_filter=0.") 165 | sys.exit(1) 166 | for interface in cfg.CONF.sr.srv6_interfaces: 167 | cmd = ['net.ipv4.conf.%s.rp_filter=0' % interface] 168 | result = ip_lib.sysctl(cmd) 169 | if result == 1: 170 | LOG.error("Failed to enable net.ipv4.conf.%s.rp_filter=0.", 171 | interface) 172 | sys.exit(1) 173 | # Make sure to allow bridge to call iptables 174 | cmd = ['net.bridge.bridge-nf-call-iptables=1'] 175 | result = ip_lib.sysctl(cmd) 176 | if result == 1: 177 | LOG.error("Failed to enable net.bridge.bridge-nf-call-iptables=1.") 178 | sys.exit(1) 179 | 180 | def _setup_ipv6(self): 181 | # Setup SRv6 configuration 182 | # TODO(hichihara): Refactor to use ip_lib instead of command execute 183 | cmd = ["ip", "-6", "rule", "add", "pref", "32765", "table", "local"] 184 | utils.execute(cmd, run_as_root=True, 185 | check_exit_code=False) 186 | cmd = ["ip", "-6", "rule", "del", "pref", "0"] 187 | utils.execute(cmd, run_as_root=True, 188 | check_exit_code=False) 189 | 190 | def _setup_interface_ip(self, ip, interface='lo'): 191 | """Sets up an IP address on the target interface 192 | 193 | Args: 194 | ip(String): ip address with cidr 195 | interface(String): network interface, 'lo' by default 196 | Return: 197 | None 198 | """ 199 | dev = ip_lib.IPDevice(interface) 200 | dev.addr = ip_lib.IpAddrCommand(dev) 201 | existing_addreses = ip_lib.get_devices_with_ip(None, name=dev.name) 202 | existing_ips = [addr['cidr'] for addr in existing_addreses] 203 | if ip not in existing_ips: 204 | LOG.info("Adding %s to %s interface" % (ip, dev.name)) 205 | dev.addr.add(cidr=ip) 206 | else: 207 | LOG.debug("%s interface already have %s ip" % (dev.name, ip)) 208 | 209 | def get_agent_configurations(self): 210 | configurations = {} 211 | configurations['segment_node_id'] = self.node_id 212 | return configurations 213 | 214 | def get_agent_id(self): 215 | devices = ip_lib.IPWrapper().get_devices(True) 216 | if devices: 217 | mac = ip_lib.get_device_mac(devices[0].name) 218 | return 'sr%s' % mac.replace(":", "") 219 | else: 220 | LOG.error("Unable to obtain MAC address for unique ID. " 221 | "Agent terminated!") 222 | sys.exit(1) 223 | 224 | def get_all_devices(self, with_ifindex=False): 225 | """Return all existing tap devices 226 | 227 | They are technically devices having name starting with 228 | constants.TAP_DEVICE_PREFIX 229 | 230 | Args: 231 | with_ifindex(bool): if True, return dict include device index, 232 | if False, return set include just device name 233 | Return: 234 | if with_ifindex is True: 235 | devices_with_ifindex(dict): {"": ""} 236 | if with_ifindex is False: 237 | devices(set): set contains device name 238 | """ 239 | devices = {} if with_ifindex else set() 240 | for device in os.listdir(INTERFACE_FS): 241 | if not device.startswith(constants.TAP_DEVICE_PREFIX): 242 | continue 243 | # Try to lookup interface index as well 244 | if with_ifindex: 245 | try: 246 | with open(os.path.join( 247 | INTERFACE_FS, device, 'ifindex'), 'r') as f: 248 | devices[device] = int(f.read().strip()) 249 | except (IOError, ValueError): 250 | # if we faied to lookup, this device has been deleted 251 | # after exec listdir, so we should not that device as 252 | # current device 253 | continue 254 | else: 255 | devices.add(device) 256 | 257 | return devices 258 | 259 | def get_all_encap_rules(self): 260 | return self.encap_info 261 | 262 | def get_devices_modified_timestamps(self, devices): 263 | return {} 264 | 265 | def get_extension_driver_type(self): 266 | return EXTENSION_DRIVER_TYPE 267 | 268 | def get_rpc_callbacks(self, context, agent, sg_agent): 269 | return SrRpcCallbacks(context, agent, sg_agent) 270 | 271 | def get_agent_api(self, **kwargs): 272 | pass 273 | 274 | def get_rpc_consumers(self): 275 | consumers = [[topics.PORT, topics.UPDATE], 276 | [topics.NETWORK, topics.UPDATE], 277 | [topics.SECURITY_GROUP, topics.UPDATE], 278 | [sr_rpc.TOPICS_ENCAP, topics.DELETE], 279 | [sr_rpc.TOPICS_ENCAP_RULE, topics.UPDATE], 280 | [sr_rpc.TOPICS_ENCAP, topics.UPDATE], 281 | [sr_rpc.TOPICS_VRF, topics.DELETE]] 282 | return consumers 283 | 284 | def plug_interface(self, vrf, device, device_details, ports, vrf_ip, 285 | vrf_cidr): 286 | tap_device_name = device 287 | try: 288 | if not ip_lib.device_exists(tap_device_name): 289 | LOG.debug("Tap device: %s does not exist on " 290 | "this host, skipped", tap_device_name) 291 | return False 292 | 293 | self.configure_tap(tap_device_name, device_details['mac_address'], 294 | device_details['related_ips'], ports, 295 | vrf, vrf_ip, vrf_cidr) 296 | 297 | LOG.debug("Finished to configure tap %s device", tap_device_name) 298 | return True 299 | except Exception: 300 | with excutils.save_and_reraise_exception() as ctx: 301 | if not ip_lib.device_exists(tap_device_name): 302 | # the exception was likely a side effect of the tap device 303 | # being removed during handling so we just return false 304 | # like we would if it didn't exist to begin with. 305 | ctx.reraise = False 306 | return False 307 | 308 | def configure_tap(self, tap_device_name, vm_mac, related_ips, 309 | ports, vrf, vrf_ip, vrf_cidr): 310 | """Configure tap device 311 | 312 | The traffic for vm's ip goes to tap device vm connected to. 313 | NB: 1 port could have multiple ip address. that's why 314 | related_ips is list including ip informations 315 | 316 | Args: 317 | tap_device_name(String): tap device name 318 | vm_mac(String): mac address VM use 319 | related_ips(list): [{'gw_ip': , 320 | 'cidr': , 321 | 'vm_ip': }] 322 | Return: 323 | None 324 | """ 325 | tap_dev = ip_lib.IPDevice(tap_device_name) 326 | tap_dev.addr = IpAddrCommandAcceptArgs(tap_dev) 327 | for related_ip in related_ips: 328 | # Ensure veth 329 | qvb, qvr = self._get_veth_pair_names(tap_device_name[3:]) 330 | qvr_dev = self._add_veth(qvb, qvr) 331 | # Create brdige 332 | br_name = "qbr%s" % tap_device_name[3:] 333 | self._ensure_bridge(br_name, [qvb, tap_dev.name]) 334 | cidr = '/' + related_ip['cidr'] 335 | # assign virtual gateway ip to qvr 336 | qvr_address = related_ip['gw_ip'] + cidr 337 | LOG.debug("Ensure %s having %s" % (qvr_dev.name, qvr_address)) 338 | self._ensure_dev_having_ip(qvr_dev, qvr_address) 339 | # Ensure vrf exist 340 | vrf_table = self._ensure_vrf(vrf, vrf_ip, vrf_cidr) 341 | # assign qvr to vrf 342 | self._add_avr_to_vrf(vrf, qvr) 343 | # Configure SRv6 344 | self._set_srv6_rules(vrf, vrf_ip, ports) 345 | # add static route /32 to tap 346 | vm_ip_for_route = related_ip['vm_ip'] + '/' + '32' 347 | LOG.debug("Ensure root namespace having route %s via %s" % ( 348 | vm_ip_for_route, qvr_dev.name)) 349 | self._ensure_vm_route(qvr_dev, vm_ip_for_route, vrf_table) 350 | 351 | for kernel_opts in ("net.ipv4.conf.%s.proxy_arp=1", 352 | "net.ipv4.neigh.%s.proxy_delay=0"): 353 | cmd = [kernel_opts % qvr] 354 | result = ip_lib.sysctl(cmd) 355 | if result == 1: 356 | raise SysctlCommandError(cmd=cmd) 357 | 358 | def _get_veth_pair_names(self, iface_id): 359 | return (("qvb%s" % iface_id), ("qvr%s" % iface_id)) 360 | 361 | def _add_veth(self, qvb, qvr): 362 | ip = ip_lib.IPWrapper() 363 | try: 364 | qvb_dev, qvr_dev = ip.add_veth(qvb, qvr) 365 | qvb_dev.link.set_up() 366 | qvr_dev.link.set_up() 367 | except RuntimeError: 368 | qvr_dev = ip_lib.IPDevice(qvr) 369 | qvr_dev.addr = IpAddrCommandAcceptArgs(qvr_dev) 370 | return qvr_dev 371 | 372 | def _bridge_exists_and_ensure_up(self, bridge_name): 373 | """Check if the bridge exists and make sure it is up.""" 374 | br = ip_lib.IPDevice(bridge_name) 375 | br.set_log_fail_as_error(False) 376 | try: 377 | # If the device doesn't exist this will throw a RuntimeError 378 | br.link.set_up() 379 | except RuntimeError: 380 | return False 381 | return True 382 | 383 | def _ensure_bridge(self, bridge_name, interfaces): 384 | """Create a bridge unless it already exists.""" 385 | # _bridge_exists_and_ensure_up instead of device_exists is used here 386 | # because there are cases where the bridge exists but it's not UP, 387 | # for example: 388 | # 1) A greenthread was executing this function and had not yet executed 389 | # "ip link set bridge_name up" before eventlet switched to this 390 | # thread running the same function 391 | # 2) The Nova VIF driver was running concurrently and had just created 392 | # the bridge, but had not yet put it UP 393 | if not self._bridge_exists_and_ensure_up(bridge_name): 394 | LOG.debug("Starting bridge %(bridge_name)s for subinterface " 395 | "%(interfaces)s", 396 | {'bridge_name': bridge_name, 'interfaces': interfaces}) 397 | bridge_device = bridge_lib.BridgeDevice.addbr(bridge_name) 398 | if bridge_device.setfd(0): 399 | return 400 | if bridge_device.disable_stp(): 401 | return 402 | if bridge_device.disable_ipv6(): 403 | return 404 | if bridge_device.link.set_up(): 405 | return 406 | LOG.debug("Done starting bridge %(bridge_name)s for " 407 | "subinterface %(interfaces)s", 408 | {'bridge_name': bridge_name, 'interfaces': interfaces}) 409 | else: 410 | bridge_device = bridge_lib.BridgeDevice(bridge_name) 411 | 412 | # Check if the interface is part of the bridge 413 | for interface in interfaces: 414 | if not bridge_device.owns_interface(interface): 415 | try: 416 | bridge_device.addif(interface) 417 | except Exception as e: 418 | LOG.error(("Unable to add %(interface)s to %(bridge_name)s" 419 | "! Exception: %(e)s"), 420 | {'interface': interface, 421 | 'bridge_name': bridge_name, 422 | 'e': e}) 423 | # Try ip link set 424 | cmd = ["ip", "link", "set", "dev", interface, "master", 425 | bridge_name] 426 | utils.execute(cmd, run_as_root=True, 427 | check_exit_code=False) 428 | return 429 | return bridge_name 430 | 431 | def _ensure_dev_having_ip(self, target_dev, ip): 432 | """Ensure target device have ip 433 | 434 | Args: 435 | target_dev(ip_lib.IPDevice): 436 | ip(String): ip address with cidr 437 | Return: 438 | None 439 | """ 440 | existing_addreses = ip_lib.get_devices_with_ip(None, 441 | name=target_dev.name) 442 | existing_ips = [addr['cidr'] for addr in existing_addreses] 443 | LOG.debug("The existing address of dev %s are %s" % (target_dev.name, 444 | existing_ips)) 445 | if ip not in existing_ips: 446 | target_dev.addr.add( 447 | cidr=ip, additional_args=['noprefixroute', ]) 448 | else: 449 | LOG.debug("%s already have ip %s" % (target_dev.name, ip)) 450 | 451 | def _ensure_vrf(self, vrf, vrf_ip, cidr): 452 | """Ensure vrf interface 453 | 454 | return: vrf_table 455 | """ 456 | if self.vrf_tables: 457 | vrf_table = max(list(self.vrf_tables.values())) + 1 458 | else: 459 | vrf_table = VRF_TABLE_NAMBER_BASE 460 | if vrf not in list(self.vrf_tables): 461 | privileged.create_interface(vrf, None, "vrf", vrf_table=vrf_table) 462 | privileged.set_link_attribute(vrf, None, state="up") 463 | 464 | LOG.debug("VRF %s is created" % vrf) 465 | self.vrf_tables[vrf] = vrf_table 466 | 467 | # TODO(hichihara): Refactor to use ip_lib instead of command 468 | ip = vrf_ip + '/' + cidr 469 | self._setup_interface_ip(ip, vrf) 470 | cmd = ["ip", "route", "replace", vrf_ip, "dev", vrf] 471 | utils.execute(cmd, run_as_root=True, 472 | check_exit_code=False) 473 | vrf_sid = ("%(node_id)s:%(vrf_ip)s/128" % {"node_id": self.node_id, 474 | "vrf_ip": vrf_ip}) 475 | self._setup_interface_ip(vrf_sid, vrf) 476 | self._setup_interface_ip("169.254.169.254/32", vrf) 477 | # Create encap rules 478 | for encap_info in self.encap_info: 479 | if vrf == encap_info['vrf']: 480 | self.add_encap_rules([encap_info], add_flag=False) 481 | break 482 | else: 483 | vrf_table = self.vrf_tables[vrf] 484 | return vrf_table 485 | 486 | def _add_avr_to_vrf(self, vrf, qvr): 487 | vrf_idx = privileged.get_link_id(vrf, None) 488 | privileged.set_link_attribute(qvr, None, master=vrf_idx) 489 | 490 | def _set_srv6_rules(self, vrf, vrf_ip, ports): 491 | # Encap rules 492 | for port in ports: 493 | # TODO(hichihara): Configure multiple fixed_ips 494 | target_ip = port["ip"] + "/32" 495 | target_node_id = port["segment_node_id"] 496 | if target_node_id is None: 497 | continue 498 | # Ensure connection between VMs have same network(vrf) 499 | target_vrf = port["vrf"] 500 | if target_vrf != vrf: 501 | continue 502 | if target_node_id != self.node_id: 503 | # Create target_sid 504 | target_sid = ("%(node_id)s:%(vrf_ip)s" % { 505 | "node_id": target_node_id, 506 | "vrf_ip": vrf_ip}) 507 | cmd = ["ip", "route", "replace", target_ip, "encap", "seg6", 508 | "mode", "encap", "segs", target_sid, "dev", vrf, 509 | "vrf", vrf] 510 | utils.execute(cmd, run_as_root=True, 511 | check_exit_code=False) 512 | 513 | # Default route to network nodes 514 | if self.gw_id: 515 | target_sid = ("%(node_id)s:%(vrf_ip)s" % { 516 | "node_id": self.gw_id, 517 | "vrf_ip": vrf_ip}) 518 | cmd = ["ip", "route", "replace", "0.0.0.0/0", "encap", "seg6", 519 | "mode", "encap", "segs", target_sid, "dev", vrf, "vrf", vrf] 520 | utils.execute(cmd, run_as_root=True, 521 | check_exit_code=False) 522 | 523 | # Decap rules 524 | # TODO(hichihara): Refactor to use ip_lib instead of command execute 525 | decap_sid = ("%(node_id)s:%(vrf_ip)s" % {"node_id": self.node_id, 526 | "vrf_ip": vrf_ip}) 527 | cmd = ["ip", "-6", "route", "replace", "local", decap_sid, "encap", 528 | "seg6local", "action", "End.DX4", "nh4", vrf_ip, "dev", vrf] 529 | utils.execute(cmd, run_as_root=True, 530 | check_exit_code=False) 531 | 532 | def _ensure_vm_route(self, target_dev, vm_route, vrf_table): 533 | """Ensure root namespace on host have vm_route 534 | 535 | Args: 536 | target_dev(ip_lib.IPDevice): 537 | vm_route(String): ip address for this vm with /32 538 | e.g. If vm's ip is 192.168.0.2/16, 539 | vm_route should be 192.168.0.2/32 540 | Return: 541 | None 542 | """ 543 | target_dev.route.add_route(cidr=vm_route, table=vrf_table) 544 | 545 | def _get_ip_version(self, cidr): 546 | """Check if cidr is ip version 4 or not by existence of : 547 | 548 | Args: 549 | cidr(String): ip address with cidr 550 | Return: 551 | version(Int): 4 or 6 depending on cidr 552 | """ 553 | if ":" in cidr: 554 | return 6 555 | else: 556 | return 4 557 | 558 | def add_encap_rules(self, encap_rules, add_flag=True): 559 | for target in encap_rules: 560 | # Set srv6 rule on the vrf 561 | vrf = target['vrf'] 562 | encap_info = None 563 | for encap in self.encap_info: 564 | if encap['id'] == target['id']: 565 | encap_info = encap 566 | break 567 | for rule in target['rules']: 568 | ip = rule['destination'] 569 | target_sid = rule['nexthop'] 570 | cmd = ["ip", "route", "replace", ip, "encap", "seg6", "mode", 571 | "encap", "segs", target_sid, "dev", vrf, "vrf", vrf] 572 | utils.execute(cmd, run_as_root=True, 573 | check_exit_code=False) 574 | if add_flag: 575 | if encap_info is not None: 576 | encap_info['rules'] += target['rules'] 577 | else: 578 | self.encap_info.append(target) 579 | 580 | def remove_encap_rules(self, encap_rules): 581 | for target in encap_rules: 582 | # Remove srv6 rule on the vrf 583 | vrf = target['vrf'] 584 | encap_info = None 585 | for encap in self.encap_info: 586 | if encap['id'] == target['id']: 587 | encap_info = encap 588 | break 589 | else: 590 | break 591 | for rule in target['rules']: 592 | ip = rule['destination'] 593 | target_sid = rule['nexthop'] 594 | cmd = ["ip", "route", "del", ip, "encap", "seg6", "mode", 595 | "encap", "segs", target_sid, "dev", vrf, "vrf", vrf] 596 | utils.execute(cmd, run_as_root=True, 597 | check_exit_code=False) 598 | encap_info['rules'].remove(rule) 599 | 600 | def setup_target_sr(self, updated_targets): 601 | for target in updated_targets: 602 | # if target node is same as local node_id, 603 | # we should not configure encap rule 604 | if target["segment_node_id"] == self.node_id: 605 | continue 606 | # Set srv6 rule on the vrf 607 | vrf = target["vrf"] 608 | vrf_ip = target["vrf_ip"] 609 | # Ensure vrf exist 610 | self._ensure_vrf(vrf, vrf_ip, target["cidr"]) 611 | ip = target["ip"] + "/32" 612 | node_id = target["segment_node_id"] 613 | target_sid = ("%(node_id)s:%(vrf_ip)s" % { 614 | "node_id": node_id, 615 | "vrf_ip": vrf_ip}) 616 | cmd = ["ip", "route", "replace", ip, "encap", "seg6", "mode", 617 | "encap", "segs", target_sid, "dev", vrf, "vrf", vrf] 618 | utils.execute(cmd, run_as_root=True, 619 | check_exit_code=False) 620 | 621 | def clear_target_sr(self, removed_targets): 622 | for target in removed_targets: 623 | # Remove srv6 rule on the vrf 624 | vrf = target["vrf"] 625 | vrf_ip = target["vrf_ip"] 626 | ip = target["ip"] + "/32" 627 | node_id = target["segment_node_id"] 628 | target_sid = ("%(node_id)s:%(vrf_ip)s" % { 629 | "node_id": node_id, 630 | "vrf_ip": vrf_ip}) 631 | cmd = ["ip", "route", "del", ip, "encap", "seg6", "mode", 632 | "encap", "segs", target_sid, "dev", vrf, "vrf", vrf] 633 | utils.execute(cmd, run_as_root=True, 634 | check_exit_code=False) 635 | 636 | def remove_vrf(self, vrf): 637 | if self.vrf_tables.get(vrf): 638 | privileged.set_link_attribute(vrf, None, state="down") 639 | privileged.delete_interface(vrf, None) 640 | self.vrf_tables.pop(vrf) 641 | LOG.debug("Removed vrf %s", vrf) 642 | 643 | def get_tap_device_name(self, interface_id): 644 | """Get tap device name by interface_id. 645 | 646 | Normally tap device name is the "tap" + first RESOURCE_ID_LENGTH 647 | characters of port id 648 | 649 | Args: 650 | interface_id(String): port uuid 651 | Return: 652 | tap_device_name(String): tap device name on the based of port id 653 | """ 654 | if not interface_id: 655 | LOG.warning("Invalid Interface ID, will lead to incorrect " 656 | "tap device name") 657 | tap_device_name = constants.TAP_DEVICE_PREFIX + \ 658 | interface_id[:RESOURCE_ID_LENGTH] 659 | return tap_device_name 660 | 661 | def ensure_port_admin_state(self, tap_name, admin_state_up): 662 | """Ensure the tap device is same status as admin_state_up 663 | 664 | Args: 665 | tap_name(String): tap device name 666 | admin_state_up(Bool): port admin status neutron maintain 667 | Return: 668 | None 669 | """ 670 | LOG.debug("Setting admin_state_up to %s for device %s", 671 | admin_state_up, tap_name) 672 | if admin_state_up: 673 | ip_lib.IPDevice(tap_name).link.set_up() 674 | else: 675 | ip_lib.IPDevice(tap_name).link.set_down() 676 | 677 | def _delete_bridge(self, bridge_name): 678 | bridge_device = bridge_lib.BridgeDevice(bridge_name) 679 | if bridge_device.exists(): 680 | try: 681 | LOG.debug("Deleting bridge %s", bridge_name) 682 | if bridge_device.link.set_down(): 683 | return 684 | if bridge_device.delbr(): 685 | return 686 | LOG.debug("Done deleting bridge %s", bridge_name) 687 | return 688 | except RuntimeError: 689 | pass 690 | LOG.debug("Cannot delete bridge %s; it does not exist", 691 | bridge_name) 692 | 693 | def delete_port(self, device): 694 | # Delete veth 695 | qvb, qvr = self._get_veth_pair_names(device[3:]) 696 | ip = ip_lib.IPWrapper() 697 | try: 698 | ip.del_veth(qvb) 699 | LOG.debug("Delete veth pair %s %s", qvb, qvr) 700 | except RuntimeError: 701 | pass 702 | # Delete bridge 703 | br_name = "qbr%s" % device[3:] 704 | self._delete_bridge(br_name) 705 | 706 | def setup_arp_spoofing_protection(self, device, device_details): 707 | pass 708 | 709 | def delete_arp_spoofing_protection(self, devices): 710 | pass 711 | 712 | def delete_unreferenced_arp_protection(self, current_devices): 713 | pass 714 | 715 | 716 | class SrRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 717 | amb.CommonAgentManagerRpcCallBackBase): 718 | # Set RPC API version to 1.0 by default. 719 | # history 720 | # 1.1 Support Security Group RPC 721 | # 1.3 Added param devices_to_update to security_groups_provider_updated 722 | # 1.4 Added support for network_update 723 | target = oslo_messaging.Target(version='1.4') 724 | 725 | def __init__(self, context, agent, sg_agent): 726 | super(SrRpcCallbacks, self).__init__(context, agent, sg_agent) 727 | self.removed_devices_encap = set() 728 | self.removed_ports = {} 729 | self.encap_info = [] 730 | self.updated_devices_encap = set() 731 | self.updated_ports = {} 732 | self.removed_vrfs = set() 733 | 734 | def port_update(self, context, **kwargs): 735 | """RPC for port_update event 736 | 737 | This method will be called when port is updated in neutron server 738 | this method just add device_name associating updated port into 739 | updated_devices list and this device recognized as updated device in 740 | next iteration and if tap is in own host, plug_interaface method 741 | will be executed with that tap 742 | """ 743 | port_id = kwargs['port']['id'] 744 | device_name = self.agent.mgr.get_tap_device_name(port_id) 745 | # Put the device name in the updated_devices set. 746 | # Do not store port details, as if they're used for processing 747 | # notifications there is no guarantee the notifications are 748 | # processed in the same order as the relevant API requests. 749 | self.updated_devices.add(device_name) 750 | LOG.debug("port_update RPC received for port: %s", port_id) 751 | 752 | def encap_rule_update(self, context, **kwargs): 753 | encap_info = kwargs['encap_info'] 754 | for encap in self.encap_info: 755 | if encap['id'] == encap_info['id']: 756 | self.encap_info.remove(encap) 757 | break 758 | self.encap_info.append(encap_info) 759 | LOG.debug("encap_update RPC received for encap rules: %s", 760 | encap_info) 761 | 762 | def get_and_clear_updated_encaps(self): 763 | encap_info = self.encap_info 764 | self.encap_info = [] 765 | return encap_info 766 | 767 | def encap_delete(self, context, **kwargs): 768 | port = kwargs['port'] 769 | port_id = port['id'] 770 | device_name = self.agent.mgr.get_tap_device_name(port_id) 771 | self.removed_devices_encap.add(device_name) 772 | self.removed_ports[device_name] = port 773 | LOG.debug("encap_delete RPC received for port: %s", port_id) 774 | 775 | def encap_update(self, context, **kwargs): 776 | port = kwargs['port'] 777 | port_id = port['id'] 778 | device_name = self.agent.mgr.get_tap_device_name(port_id) 779 | 780 | self.updated_devices_encap.add(device_name) 781 | self.updated_ports[device_name] = port 782 | LOG.debug("encap_update RPC received for port: %s", port_id) 783 | 784 | def network_update(self, context, **kwargs): 785 | """RPC for network_update event 786 | 787 | This method will be called when network is updated in neutron server 788 | this method add all ports under this network into updated_devices list 789 | """ 790 | network_id = kwargs['network']['id'] 791 | LOG.debug("network_update message processed for network " 792 | "%(network_id)s, with ports: %(ports)s", 793 | {'network_id': network_id, 794 | 'ports': self.agent.network_ports[network_id]}) 795 | for port_data in self.agent.network_ports[network_id]: 796 | self.updated_devices.add(port_data['device']) 797 | 798 | def get_and_clear_removed_devices_encap(self): 799 | """Get and clear the list of devices for which a removed was received. 800 | 801 | :return: set - A set with removed devices. Format is ['tap1', 'tap2'] 802 | """ 803 | 804 | # Save and reinitialize the set variable that the port_delete RPC uses. 805 | # This should be thread-safe as the greenthread should not yield 806 | # between these two statements. 807 | removed_devices_encap = self.removed_devices_encap 808 | self.removed_devices_encap = set() 809 | return removed_devices_encap 810 | 811 | def get_removed_ports(self, devices): 812 | for device in devices: 813 | try: 814 | yield self.removed_ports[device] 815 | except KeyError: 816 | # Already removed 817 | pass 818 | 819 | def clear_removed_ports(self, devices): 820 | for device in devices: 821 | self.removed_ports.pop(device, None) 822 | 823 | def network_delete(self, context, **kwargs): 824 | pass 825 | 826 | def get_and_clear_updated_devices_encap(self): 827 | """Get and clear the list of devices for which a updated was received. 828 | 829 | :return: set - A set with updated devices. Format is ['tap1', 'tap2'] 830 | """ 831 | 832 | # Save and reinitialize the set variable that the port_delete RPC uses. 833 | # This should be thread-safe as the greenthread should not yield 834 | # between these two statements. 835 | updated_devices_encap = self.updated_devices_encap 836 | self.updated_devices_encap = set() 837 | return updated_devices_encap 838 | 839 | def get_updated_ports(self, devices): 840 | for device in devices: 841 | try: 842 | yield self.updated_ports[device] 843 | except KeyError: 844 | # Already removed 845 | pass 846 | 847 | def clear_updated_ports(self, devices): 848 | for device in devices: 849 | self.updated_ports.pop(device, None) 850 | 851 | def vrf_delete(self, context, **kwargs): 852 | vrf = kwargs['vrf'] 853 | LOG.debug("vrf_delete message processed for vrf " 854 | "%(vrf)s", {'vrf': vrf}) 855 | self.removed_vrfs.add(vrf) 856 | 857 | def get_and_clear_removed_vrfs(self): 858 | """Get and clear the list of vrfs for which a removed was received. 859 | 860 | :return: set - A set with removed vrfs. 861 | """ 862 | removed_vrfs = self.removed_vrfs 863 | self.removed_vrfs = set() 864 | return removed_vrfs 865 | 866 | 867 | class IpAddrCommandAcceptArgs(ip_lib.IpAddrCommand): 868 | 869 | def add(self, cidr, scope='global', add_broadcast=True, 870 | additional_args=None): 871 | """This is method for executing "ip addr add" as root 872 | 873 | The reason why it override is we want to specify option. 874 | but super class doesn't allow us to pass additional option 875 | 876 | Args: 877 | cidr(String): ip address with subnet 878 | scope(String): scope of this address 879 | add_broadcast(Bool): if True, it add "brd" option 880 | additional_args(list): additional arguments 881 | Return: 882 | None 883 | """ 884 | net = netaddr.IPNetwork(cidr) 885 | args = ['add', cidr, 886 | 'scope', scope, 887 | 'dev', self.name] 888 | if add_broadcast and net.version == 4: 889 | args += ['brd', str(net[-1])] 890 | if additional_args: 891 | args += additional_args 892 | self._as_root([net.version], tuple(args)) 893 | 894 | 895 | def main(): 896 | common_config.init(sys.argv[1:]) 897 | common_config.setup_logging() 898 | 899 | manager = SrManager() 900 | 901 | polling_interval = cfg.CONF.AGENT.polling_interval 902 | quitting_rpc_timeout = cfg.CONF.AGENT.quitting_rpc_timeout 903 | agent = sr_agent_loop.SrAgentLoop(manager, polling_interval, 904 | quitting_rpc_timeout, 905 | AGENT_TYPE_SR, 906 | SR_AGENT_BINARY) 907 | setup_profiler.setup(SR_AGENT_BINARY, cfg.CONF.host) 908 | LOG.info("Agent initialized successfully, now running... ") 909 | launcher = service.launch(cfg.CONF, agent, restart_method='mutate') 910 | launcher.wait() 911 | -------------------------------------------------------------------------------- /networking_sr/ml2/agent/sr_agent_loop.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 Line Corporation 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | 16 | from functools import partial 17 | import time 18 | 19 | from neutron_lib.agent import topics 20 | from neutron_lib.utils import helpers 21 | from oslo_config import cfg 22 | from oslo_log import log as logging 23 | from oslo_service import loopingcall 24 | 25 | from neutron.agent.dhcp.agent import DhcpPluginApi 26 | from neutron.agent.metadata.agent import MetadataPluginAPI 27 | from neutron.api.rpc.handlers import resources_rpc 28 | from neutron.plugins.ml2.drivers.agent import _common_agent as ca 29 | 30 | from networking_sr.common import vrf_utils 31 | from networking_sr.ml2.agent.dnsmasq_manager import DnsmasqManager 32 | from networking_sr.objects import srv6_encap_network as objects 33 | 34 | LOG = logging.getLogger(__name__) 35 | 36 | 37 | class SrAgentLoop(ca.CommonAgentLoop): 38 | 39 | def setup_rpc(self): 40 | """Setup some rpc mechanisms 41 | 42 | This method try to initialise all rpc client this agent loop 43 | need to use. in addition to rpc client common agent loop is using, 44 | We setp dhcp_rpc in order to get subnet information 45 | Args: 46 | None 47 | Return: 48 | None 49 | """ 50 | 51 | self.dhcp_rpc = DhcpPluginApi(topics.PLUGIN, cfg.CONF.host) 52 | self.meta_rpc = MetadataPluginAPI(topics.PLUGIN) 53 | self.resource_rpc = resources_rpc.ResourcesPullRpcApi() 54 | super(SrAgentLoop, self).setup_rpc() 55 | 56 | def start(self): 57 | """Start agent 58 | 59 | This is the entry point for sr agent, Once sr agent 60 | command is executed, this method will be called by oslo_service 61 | Args: 62 | None 63 | Return: 64 | None 65 | """ 66 | self.subnet_info_map = {} 67 | self.force_updated_device = [] 68 | # network_info_map = {'network_id': 'provider:network_type'} 69 | self.network_type_map = {} 70 | # NOTE: Unnecessary for nova-meta-api model 71 | # MetadataProxyManager.initialize(self.mgr.process_monitor) 72 | DnsmasqManager.initialize(self.mgr.process_monitor) 73 | 74 | sync_host_func = partial(DnsmasqManager.sync_host_entries, 75 | self.mgr.get_all_devices, 76 | self.add_force_updated_device) 77 | 78 | sync_host_service = loopingcall.FixedIntervalLoopingCall( 79 | sync_host_func) 80 | # NOTE: Use static value instead of config 81 | sync_host_service.start( 82 | interval=60) 83 | 84 | super(SrAgentLoop, self).start() 85 | 86 | def add_force_updated_device(self, device): 87 | """Add device(interface name) into force_updated_device 88 | 89 | This list contains device name that have to be reconfigured. 90 | Args: 91 | device(String): device name usually tapXXXXX 92 | Return: 93 | None 94 | """ 95 | self.force_updated_device.append(device) 96 | LOG.warning("Added %s into force_updated_device list", device) 97 | 98 | def get_force_updated_device_and_clear(self): 99 | """Get force_updated_device list and clear that list 100 | 101 | Args: 102 | None 103 | Return: 104 | devices(list): list contains device name have to be 105 | configured 106 | """ 107 | devices = self.force_updated_device 108 | self.force_updated_device = [] 109 | return devices 110 | 111 | def _get_ifindex_changed(self, previous, current): 112 | """Get interface index changed 113 | 114 | Compare interface index map between previous and current and 115 | return set including device name only having different interface 116 | index than previous one. 117 | Args: 118 | previous(dict): {"": ""} 119 | current(dict): {"": ""} 120 | Return: 121 | changed_device_names(set) : 122 | set inlcuding device name having different 123 | index than previous interface index map 124 | """ 125 | changed_device_names = set() 126 | for c_dev_name, c_dev_i in current.items(): 127 | if previous.get(c_dev_name) and\ 128 | c_dev_i != previous.get(c_dev_name): 129 | changed_device_names.add(c_dev_name) 130 | return changed_device_names 131 | 132 | def scan_devices(self, previous, sync): 133 | """Scan devices and check which tap device is new/changed/removed 134 | 135 | Args: 136 | previous(dict): previous device information 137 | {"added": set, 138 | "current": set, 139 | "updated": set, 140 | "removed": set, 141 | "ifmap": {"": ""} 142 | } 143 | sync(Bool): If this is true, we believe all current devices as new 144 | Return: 145 | device_info(dict): same format as previous(dict) 146 | """ 147 | device_info = {} 148 | updated_devices = self.rpc_callbacks.get_and_clear_updated_devices() 149 | 150 | current_devices_ifindex = self.mgr.get_all_devices(with_ifindex=True) 151 | current_devices = set(current_devices_ifindex.keys()) 152 | device_info['current'] = current_devices 153 | 154 | if previous is None: 155 | # NOTE: In rocky, ifmap is changed to timestamps but not affect 156 | previous = {'added': set(), 'current': set(), 157 | 'updated': set(), 'removed': set(), 'ifmap': {}} 158 | 159 | device_info['ifmap'] = current_devices_ifindex 160 | locally_updated = self._get_ifindex_changed(previous['ifmap'], 161 | device_info['ifmap']) 162 | locally_updated |= set(self.get_force_updated_device_and_clear()) 163 | 164 | if locally_updated: 165 | LOG.debug("Adding locally changed devices to updated set: %s", 166 | locally_updated) 167 | updated_devices |= locally_updated 168 | 169 | if sync: 170 | LOG.info("Sync all devices") 171 | # This is the first iteration, or the previous one had a problem. 172 | # Re-add all existing devices. 173 | device_info['added'] = current_devices 174 | 175 | # Retry cleaning devices that may not have been cleaned properly. 176 | # And clean any that disappeared since the previous iteration. 177 | device_info['removed'] = (previous['removed'] | 178 | previous['current'] - 179 | current_devices) 180 | 181 | # Retry updating devices that may not have been updated properly. 182 | # And any that were updated since the previous iteration. 183 | # Only update devices that currently exist. 184 | device_info['updated'] = (previous['updated'] | 185 | updated_devices & 186 | current_devices) 187 | 188 | else: 189 | device_info['added'] = current_devices - previous['current'] 190 | device_info['removed'] = previous['current'] - current_devices 191 | device_info['updated'] = updated_devices & current_devices 192 | 193 | return device_info 194 | 195 | def scan_encaps(self, previous, sync): 196 | """Scan encap rules and check which rules is new/changed/removed 197 | 198 | Args: 199 | previous(dict): previous encap information 200 | {"targets": list 201 | "targets_updated": list 202 | "targets_removed": list 203 | } 204 | sync(Bool): If this is true, all current encap rules as new 205 | Return: 206 | encap_info(dict): same format as previous(dict) 207 | """ 208 | encap_info = {'targets': [], 'targets_updated': [], 209 | 'targets_removed': []} 210 | 211 | updated_encaps = self.rpc_callbacks.get_and_clear_updated_encaps() 212 | all_encaps = self.mgr.get_all_encap_rules() 213 | 214 | if previous is None: 215 | previous = {'targets': [], 'targets_updated': [], 216 | 'targets_removed': []} 217 | 218 | if sync: 219 | LOG.info("Sync all encap rules") 220 | encap_nets = self.resource_rpc.bulk_pull( 221 | self.context, 222 | objects.SRv6EncapNetwork.obj_name()) 223 | # NOTE: Fails to get encap_nets.encap_rules so gets encap_rules 224 | bulk_encap_rules = self.resource_rpc.bulk_pull( 225 | self.context, 226 | objects.SRv6EncapRule.obj_name()) 227 | current_encaps = [] 228 | for encap_net in encap_nets: 229 | net_id = encap_net['network_id'] 230 | project_id = encap_net['project_id'] 231 | network_type = self.network_type_map.get(net_id) 232 | if network_type is None: 233 | network_info = self.dhcp_rpc.get_network_info(net_id) 234 | network_type = network_info["provider:network_type"] 235 | self.network_type_map[net_id] = network_type 236 | vrf = vrf_utils.get_vrf_name(network_type, project_id, net_id) 237 | encap_rules = [] 238 | for rule in bulk_encap_rules: 239 | if rule.srv6_encap_network_id != encap_net['id']: 240 | continue 241 | encap_rules.append({"destination": rule['destination'], 242 | "nexthop": rule['nexthop']}) 243 | current_encaps.append({'id': encap_net['id'], 244 | 'rules': encap_rules, 245 | 'vrf': vrf}) 246 | for encap in current_encaps: 247 | for pre_encap in all_encaps: 248 | if encap['id'] == pre_encap['id']: 249 | added, removed = helpers.diff_list_of_dict( 250 | pre_encap['rules'], 251 | encap['rules']) 252 | encap_info['targets_updated'].append( 253 | {'id': encap['id'], 254 | 'rules': added, 255 | 'vrf': encap['vrf']}) 256 | encap_info['targets_removed'].append( 257 | {'id': encap['id'], 258 | 'rules': removed, 259 | 'vrf': encap['vrf']}) 260 | break 261 | else: 262 | encap_info['targets_updated'].append( 263 | {'id': encap['id'], 264 | 'rules': encap['rules'], 265 | 'vrf': encap['vrf']}) 266 | encap_info['targets'] = current_encaps 267 | else: 268 | for encap in updated_encaps: 269 | for pre_encap in previous['targets']: 270 | if encap['id'] == pre_encap['id']: 271 | added, removed = helpers.diff_list_of_dict( 272 | pre_encap['rules'], 273 | encap['rules']) 274 | encap_info['targets_updated'].append( 275 | {'id': encap['id'], 276 | 'rules': added, 277 | 'vrf': encap['vrf']}) 278 | encap_info['targets_removed'].append( 279 | {'id': encap['id'], 280 | 'rules': removed, 281 | 'vrf': encap['vrf']}) 282 | if (len(removed) != len(pre_encap['rules'])) or added: 283 | encap_info['targets'].append(encap) 284 | previous['targets'].remove(pre_encap) 285 | break 286 | else: 287 | encap_info['targets_updated'].append( 288 | {'id': encap['id'], 289 | 'rules': encap['rules'], 290 | 'vrf': encap['vrf']}) 291 | encap_info['targets'].append(encap) 292 | encap_info['targets'] += previous['targets'] 293 | return encap_info 294 | 295 | def scan_devices_encap(self, previous, sync): 296 | """Scan encap of devices and check which port is new/changed/removed 297 | 298 | Args: 299 | previous(dict): previous device encap information 300 | {"targets": set 301 | "targets_updated": set 302 | "targets_removed": set 303 | } 304 | sync(Bool): If this is true, we believe all current devices as new 305 | Return: 306 | device_encap_info(dict): same format as previous(dict) 307 | """ 308 | device_encap_info = {} 309 | updated_devices_encap = \ 310 | self.rpc_callbacks.get_and_clear_updated_devices_encap() 311 | removed_devices_encap = \ 312 | self.rpc_callbacks.get_and_clear_removed_devices_encap() 313 | current_devices_ifindex = self.mgr.get_all_devices(with_ifindex=True) 314 | current_devices = set(current_devices_ifindex.keys()) 315 | # Removes device info isn't included in targets_updated 316 | self.rpc_callbacks.clear_updated_ports(current_devices) 317 | 318 | if previous is None: 319 | # NOTE: In rocky, ifmap is changed to timestamps but not affect 320 | previous = {'targets': set(), 'targets_updated': set(), 321 | 'targets_removed': set()} 322 | if sync: 323 | LOG.info("Sync all devices encap rules") 324 | device_encap_info['targets_updated'] = ( 325 | updated_devices_encap - current_devices 326 | ) | previous["targets"] 327 | # Take care of a case device stored in both updated and removed 328 | device_encap_info['targets_updated'] -= removed_devices_encap 329 | device_encap_info['targets_removed'] = ( 330 | removed_devices_encap | previous["targets_removed"]) 331 | device_encap_info['targets'] = ( 332 | device_encap_info['targets_updated'] - 333 | device_encap_info['targets_removed']) 334 | else: 335 | # Doesn't detect existing port update for SR 336 | device_encap_info['targets_updated'] = ( 337 | updated_devices_encap - current_devices) 338 | # Take care of a case device stored in both updated and removed 339 | device_encap_info['targets_updated'] -= removed_devices_encap 340 | device_encap_info['targets_removed'] = removed_devices_encap 341 | device_encap_info['targets'] = ( 342 | previous['targets'] | device_encap_info['targets_updated'] 343 | ) - device_encap_info['targets_removed'] 344 | return device_encap_info 345 | 346 | def scan_removed_vrfs(self, previous, sync): 347 | """Scan removed vrfs 348 | 349 | Args: 350 | previous(set): List of previous removed vrfs 351 | sync(Bool): If this is true, all removed vrfs as new 352 | Return: 353 | removed_vrf_info(set): List of removed vrfs 354 | """ 355 | removed_vrf_info = set() 356 | removed_vrfs = self.rpc_callbacks.get_and_clear_removed_vrfs() 357 | 358 | if previous is None: 359 | previous = set() 360 | 361 | if sync: 362 | LOG.info("Sync all removed vrfs") 363 | removed_vrf_info = removed_vrfs | previous 364 | else: 365 | removed_vrf_info = removed_vrfs 366 | return removed_vrf_info 367 | 368 | def process_sr_devices(self, device_info): 369 | resync_a = False 370 | resync_b = False 371 | 372 | if device_info.get('targets_updated'): 373 | resync_a = self.treat_sr_devices_updated( 374 | device_info['targets_updated']) 375 | 376 | if device_info.get('targets_removed'): 377 | resync_b = self.treat_sr_devices_removed( 378 | device_info['targets_removed']) 379 | # If one of the above operations fails => resync with plugin 380 | return (resync_a | resync_b) 381 | 382 | def process_encap_rules(self, encap_info): 383 | resync_a = False 384 | resync_b = False 385 | 386 | if encap_info.get('targets_removed'): 387 | resync_a = self.treat_encap_rules_removed( 388 | encap_info['targets_removed']) 389 | 390 | if encap_info.get('targets_updated'): 391 | resync_b = self.treat_encap_rules_updated( 392 | encap_info['targets_updated']) 393 | 394 | # If one of the above operations fails => resync with plugin 395 | return (resync_a | resync_b) 396 | 397 | def treat_sr_devices_updated(self, devices): 398 | updated_targets = [] 399 | for port in self.rpc_callbacks.get_updated_ports(devices): 400 | target_node_id = port["binding:profile"].get("segment_node_id") 401 | target_vrf = port["binding:profile"].get("vrf") 402 | target_vrf_ip = port["binding:profile"].get("vrf_ip") 403 | target_vrf_cidr = port["binding:profile"].get("vrf_cidr") 404 | if not target_node_id or not target_vrf: 405 | LOG.error("Detected a port without SRv6 info") 406 | return True 407 | for fixed_ip in port["fixed_ips"]: 408 | ip = fixed_ip['ip_address'] 409 | cidr = target_vrf_cidr.split('/')[-1] 410 | updated_targets.append({ 411 | "ip": ip, 412 | "vrf": target_vrf, 413 | "cidr": cidr, 414 | "segment_node_id": target_node_id, 415 | "vrf_ip": target_vrf_ip 416 | }) 417 | if updated_targets: 418 | self.mgr.setup_target_sr(updated_targets) 419 | self.rpc_callbacks.clear_updated_ports(devices) 420 | return False 421 | 422 | def treat_sr_devices_removed(self, devices): 423 | removed_targets = [] 424 | for port in self.rpc_callbacks.get_removed_ports(devices): 425 | target_node_id = port["binding:profile"].get("segment_node_id") 426 | target_vrf = port["binding:profile"].get("vrf") 427 | target_vrf_ip = port["binding:profile"].get("vrf_ip") 428 | if not target_node_id or not target_vrf: 429 | continue 430 | for fixed_ip in port["fixed_ips"]: 431 | ip = fixed_ip['ip_address'] 432 | removed_targets.append({ 433 | "ip": ip, 434 | "vrf": target_vrf, 435 | "cidr": "", 436 | "segment_node_id": target_node_id, 437 | "vrf_ip": target_vrf_ip 438 | }) 439 | if removed_targets: 440 | self.mgr.clear_target_sr(removed_targets) 441 | self.rpc_callbacks.clear_removed_ports(devices) 442 | return False 443 | 444 | def treat_encap_rules_updated(self, encap_rules): 445 | self.mgr.add_encap_rules(encap_rules) 446 | return False 447 | 448 | def treat_encap_rules_removed(self, encap_rules): 449 | self.mgr.remove_encap_rules(encap_rules) 450 | return False 451 | 452 | def treat_vrf_remove(self, removed_vrf_info): 453 | for vrf in removed_vrf_info: 454 | self.mgr.remove_vrf(vrf) 455 | return False 456 | 457 | def _device_info_has_changes(self, device_info): 458 | return (device_info.get('added') or 459 | device_info.get('updated') or 460 | device_info.get('removed')) 461 | 462 | def _device_sr_info_has_changes(self, device_info): 463 | return (device_info.get('targets_updated') or 464 | device_info.get('targets_removed')) 465 | 466 | def _encap_rule_info_has_changes(self, encap_info): 467 | return (encap_info.get('targets_updated') or 468 | encap_info.get('targets_removed')) 469 | 470 | def daemon_loop(self): 471 | LOG.info("%s Agent RPC Daemon Started!", self.agent_type) 472 | device_info = None 473 | encap_info = None 474 | device_encap_info = None 475 | removed_vrf_info = None 476 | sync = True 477 | 478 | while True: 479 | start = time.time() 480 | 481 | if self.fullsync: 482 | sync = True 483 | self.fullsync = False 484 | 485 | if sync: 486 | LOG.info("%s Agent out of sync with plugin!", 487 | self.agent_type) 488 | 489 | device_info = self.scan_devices(previous=device_info, sync=sync) 490 | encap_info = self.scan_encaps(previous=encap_info, sync=sync) 491 | device_encap_info = self.scan_devices_encap( 492 | previous=device_encap_info, 493 | sync=sync) 494 | removed_vrf_info = self.scan_removed_vrfs( 495 | previous=removed_vrf_info, 496 | sync=sync) 497 | sync = False 498 | 499 | if (self._device_info_has_changes(device_info) or 500 | self.sg_agent.firewall_refresh_needed()): 501 | LOG.debug("Agent loop found changes! %s", device_info) 502 | try: 503 | sync = self.process_network_devices(device_info) 504 | except Exception: 505 | LOG.exception("Error in agent loop. Devices info: %s", 506 | device_info) 507 | sync = True 508 | 509 | if self._device_sr_info_has_changes(device_encap_info): 510 | LOG.debug("Agent loop found SR changes! %s", device_encap_info) 511 | try: 512 | sync = self.process_sr_devices(device_encap_info) 513 | except Exception: 514 | LOG.exception("Error in agent loop. SR Devices info: %s", 515 | device_encap_info) 516 | sync = True 517 | 518 | if self._encap_rule_info_has_changes(encap_info): 519 | LOG.debug("Agent loop found Encap Rule changes! %s", 520 | encap_info) 521 | try: 522 | sync = self.process_encap_rules(encap_info) 523 | except Exception: 524 | LOG.exception("Error in agent loop. Encap Rules info: %s", 525 | encap_info) 526 | sync = True 527 | 528 | if removed_vrf_info: 529 | LOG.debug("Agent loop found vrfs should be removed! %s", 530 | removed_vrf_info) 531 | try: 532 | sync = self.treat_vrf_remove(removed_vrf_info) 533 | except Exception: 534 | LOG.exception("Error in agent loop. Removed vrf info: " 535 | "%s", removed_vrf_info) 536 | sync = True 537 | 538 | # sleep till end of polling interval 539 | elapsed = (time.time() - start) 540 | if (elapsed < self.polling_interval): 541 | time.sleep(self.polling_interval - elapsed) 542 | else: 543 | LOG.debug("Loop iteration exceeded interval " 544 | "(%(polling_interval)s vs. %(elapsed)s)!", 545 | {'polling_interval': self.polling_interval, 546 | 'elapsed': elapsed}) 547 | 548 | def _lookup_subnet_info(self, subnet_id, network_id): 549 | """Get subnet information 550 | 551 | At first this try to search local cache(self.subnet_info_map) but if 552 | there is no cache, it try to call rpc api to get subnet information 553 | matching subnet_id from neutron server and update subnet cache 554 | 555 | Args: 556 | subnet_id(String): subnet uuid 557 | network_id(String): network uuid 558 | Return: 559 | subnet_info(dict): {"gateway_ip": , 560 | "cidr": , 561 | "allocation_pools": [{"start": , 562 | "end": },], 563 | "host_routes": [{"destination": , 564 | "next_hop": }] 565 | } 566 | """ 567 | if subnet_id not in self.subnet_info_map: 568 | LOG.debug("Subnet %s is not in subnet_info_map," 569 | " retrieving its details via RPC", subnet_id) 570 | try: 571 | network_info = self.dhcp_rpc.get_network_info(network_id) 572 | LOG.debug("get_network_info rpc returned %s" % network_info) 573 | for subnet in network_info["subnets"]: 574 | self.subnet_info_map[subnet["id"]] = { 575 | "gateway_ip": subnet["gateway_ip"], 576 | "cidr": subnet["cidr"], 577 | "allocation_pools": subnet["allocation_pools"], 578 | "host_routes": subnet["host_routes"], 579 | "dns_nameservers": subnet.get("dns_nameservers", []) 580 | } 581 | except Exception as e: 582 | LOG.exception( 583 | "Unable to get subnet information for %s from %s" 584 | " network: %s", subnet_id, network_id, e) 585 | return {} 586 | return self.subnet_info_map[subnet_id] 587 | 588 | def _translate_routes_format(self, host_routes): 589 | """Translate the format from neutron defined to dnsmasq friendly format 590 | 591 | Args: 592 | host_routes(list): [{"destination": , 593 | "next_hop": }] 594 | Return: 595 | static_routes(list): [",",] 596 | """ 597 | static_routes = [] 598 | for host_route in host_routes: 599 | static_routes.append( 600 | host_route["destination"] + "," + host_route["nexthop"]) 601 | return static_routes 602 | 603 | # This will be called when "new tap device being appeared" or 604 | # "port updated on server side and updated port related device is on host" 605 | # regardless of binding status 606 | def treat_devices_added_updated(self, devices): 607 | """Treat devices addedd or updated 608 | 609 | This method will be called when new/updated tap deivces detected. 610 | The reason why we override this method is we want to pass more 611 | information about port onto manager rather than just segment id, 612 | network type, device owner. 613 | Args: 614 | devices(list): list contains device name such as tapXXX 615 | Return: 616 | resync_flg(Bool): If it's True, try to full sync in next interation 617 | """ 618 | try: 619 | # This doesn't return the ports matching following conditions 620 | # * port not existing in neutron database 621 | # * port exisiting but being not bound to anywhere 622 | # In other words, this rpc call return following ports 623 | # * port being bound to other host 624 | # * port beind bound to own host 625 | 626 | # NOTE: I cannot understand the reason doesn't specify host 627 | """ 628 | devices_details_list = self.plugin_rpc.get_devices_details_list( 629 | self.context, devices, self.agent_id) 630 | """ 631 | devices_details_list = self.plugin_rpc.get_devices_details_list( 632 | self.context, devices, self.agent_id, host=cfg.CONF.host) 633 | # If we don't specify host here and call above rpc api with the 634 | # port bound to somewhere else than myself, Neutron server change 635 | # port status to BUILD inside rpc api. 636 | # One more thing we have to notice here this rpc call still return 637 | # port even if that is bound to other host without host 638 | # information, which measn there is no way for agent to know 639 | # if we have to bind that tap into own or not by just this rpc 640 | except Exception: 641 | LOG.exception("Unable to get port details for %s", devices) 642 | # resync is needed 643 | return True 644 | 645 | for device_details in devices_details_list: 646 | device = device_details['device'] 647 | LOG.debug("Port %s adding", device) 648 | if 'port_id' in device_details: 649 | LOG.info("Port %(device)s details: %(details)s", 650 | {'device': device, 'details': device_details}) 651 | network_id = device_details['network_id'] 652 | vrf = device_details["profile"].get("vrf") 653 | vrf_ip = device_details["profile"].get("vrf_ip") 654 | vrf_cidr = device_details["profile"].get("vrf_cidr") 655 | if vrf_cidr: 656 | cidr = vrf_cidr.split('/')[-1] 657 | if vrf is None: 658 | return True 659 | related_ips = [] 660 | for fixed_ip in device_details['fixed_ips']: 661 | subnet_id = fixed_ip['subnet_id'] 662 | vm_ip = fixed_ip['ip_address'] 663 | subnet_info = self._lookup_subnet_info(subnet_id, 664 | network_id) 665 | if subnet_info: 666 | cidr_notation = subnet_info['cidr'].split('/')[-1] 667 | related_ips.append({ 668 | "vm_ip": vm_ip, 669 | "gw_ip": subnet_info['gateway_ip'], 670 | "cidr": cidr_notation 671 | }) 672 | created = DnsmasqManager.ensure_dhcp_opts( 673 | subnet_id, 674 | self.subnet_info_map[subnet_id]["gateway_ip"], 675 | self._translate_routes_format( 676 | self.subnet_info_map[ 677 | subnet_id]["host_routes"]), 678 | cidr_notation, 679 | self.subnet_info_map[subnet_id]["dns_nameservers"]) 680 | if not created: 681 | return True 682 | 683 | created = DnsmasqManager.add_fixedip_entry( 684 | subnet_id, device, 685 | device_details["mac_address"], vm_ip) 686 | if not created: 687 | return True 688 | else: 689 | # resync is needed 690 | return True 691 | 692 | device_details['related_ips'] = related_ips 693 | 694 | # Get ports info of VMs which belogn on the same project 695 | # and network 696 | port_id = device_details['port_id'] 697 | ports = self.meta_rpc.get_ports( 698 | self.context, filters={'id': [port_id]}) 699 | project_id = ports[0]['project_id'] 700 | ports = self.meta_rpc.get_ports( 701 | self.context, filters={'network_id': [network_id], 702 | 'project_id': [project_id]}) 703 | target_ports = [] 704 | for port in ports: 705 | target_node_id = port["binding:profile"].get( 706 | "segment_node_id") 707 | if (cfg.CONF.sr.segment_node_id == target_node_id or 708 | port['id'] == port_id): 709 | continue 710 | target_vrf = port["binding:profile"].get("vrf") 711 | target_vrf_ip = port["binding:profile"].get("vrf_ip") 712 | target_vrf_cidr = port["binding:profile"].get("vrf_cidr") 713 | if not target_node_id or not target_vrf: 714 | continue 715 | for fixed_ip in port["fixed_ips"]: 716 | target_ip = fixed_ip['ip_address'] 717 | target_cidr = target_vrf_cidr.split('/')[-1] 718 | target_ports.append({ 719 | "ip": target_ip, 720 | "vrf": target_vrf, 721 | "cidr": target_cidr, 722 | "segment_node_id": target_node_id, 723 | "vrf_ip": target_vrf_ip 724 | }) 725 | interface_plugged = self.mgr.plug_interface( 726 | vrf, device, device_details, target_ports, vrf_ip, 727 | cidr) 728 | 729 | if interface_plugged: 730 | self.mgr.ensure_port_admin_state( 731 | device, device_details['admin_state_up']) 732 | # update plugin about port status if admin_state is up 733 | if device_details['admin_state_up']: 734 | if interface_plugged: 735 | # NB: We have to call this rpc to let neutron server 736 | # know we could plug new tap device into our network 737 | # even if new tap is not bound to own host, because 738 | # Nova intentionally try to plug tap into other host 739 | # than bounded host neutron know when just before 740 | # start live migration for Nova to know if destination 741 | # host can plug tap device or not But some packet loss 742 | # could happen at that time, cause we will configure 743 | # static route for that tap 744 | self.plugin_rpc.update_device_up(self.context, 745 | device, 746 | self.agent_id, 747 | cfg.CONF.host) 748 | else: 749 | self.plugin_rpc.update_device_down(self.context, 750 | device, 751 | self.agent_id, 752 | cfg.CONF.host) 753 | self._update_network_ports(device_details['network_id'], 754 | device_details['port_id'], 755 | device_details['device']) 756 | self.ext_manager.handle_port(self.context, device_details) 757 | else: 758 | LOG.info("Device %s not defined on plugin", device) 759 | # no resync is needed 760 | return False 761 | 762 | def treat_devices_removed(self, devices): 763 | resync = False 764 | self.sg_agent.remove_devices_filter(devices) 765 | for device in devices: 766 | LOG.info("Attachment %s removed", device) 767 | details = None 768 | try: 769 | details = self.plugin_rpc.update_device_down(self.context, 770 | device, 771 | self.agent_id, 772 | cfg.CONF.host) 773 | except Exception: 774 | LOG.exception("Error occurred while removing port %s", 775 | device) 776 | resync = True 777 | if details and details['exists']: 778 | LOG.info("Port %s updated.", device) 779 | else: 780 | LOG.debug("Device %s not defined on plugin", device) 781 | port_id = self._clean_network_ports(device) 782 | self.ext_manager.delete_port(self.context, 783 | {'device': device, 784 | 'port_id': port_id}) 785 | self.mgr.delete_port(device) 786 | 787 | self.mgr.delete_arp_spoofing_protection(devices) 788 | return resync 789 | -------------------------------------------------------------------------------- /networking_sr/ml2/mech_driver/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/ml2/mech_driver/__init__.py -------------------------------------------------------------------------------- /networking_sr/ml2/mech_driver/mech_sr.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib.agent import topics 14 | from neutron_lib.api.definitions import portbindings 15 | from neutron_lib import constants 16 | from neutron_lib import exceptions as n_exc 17 | from neutron_lib.plugins import directory 18 | from neutron_lib.plugins.ml2 import api 19 | from neutron_lib.plugins import utils 20 | from oslo_log import log as logging 21 | from oslo_serialization import jsonutils 22 | 23 | from neutron.agent import securitygroups_rpc 24 | from neutron.plugins.ml2.drivers import mech_agent 25 | 26 | from networking_sr.agent import rpc as sr_rpc 27 | from networking_sr.common import vrf_utils 28 | from networking_sr.ml2 import type_srv6 29 | from networking_sr.ml2 import type_srv6vrf 30 | 31 | LOG = logging.getLogger(__name__) 32 | AGENT_TYPE_SR = "SR agent" 33 | 34 | 35 | class SrMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase): 36 | 37 | def __init__(self): 38 | sg_enabled = securitygroups_rpc.is_firewall_enabled() 39 | super(SrMechanismDriver, self).__init__( 40 | AGENT_TYPE_SR, 41 | portbindings.VIF_TYPE_TAP, 42 | {portbindings.CAP_PORT_FILTER: sg_enabled}) 43 | self.sr_rpc_api = sr_rpc.SrAgentApi(topics.AGENT) 44 | self.vrf_allocation = None 45 | 46 | def get_allowed_network_types(self, agent=None): 47 | return [type_srv6.SRV6, type_srv6vrf.SRV6VRF] 48 | 49 | def get_mappings(self, agent): 50 | pass 51 | 52 | def check_segment_for_agent(self, segment, agent): 53 | """Check if segment can be bound for agent. 54 | 55 | :param segment: segment dictionary describing segment to bind 56 | :param agent: agents_db entry describing agent to bind 57 | :returns: True iff segment can be bound for agent 58 | 59 | Called outside any transaction during bind_port so that derived 60 | MechanismDrivers can use agent_db data along with built-in 61 | knowledge of the corresponding agent's capabilities to 62 | determine whether or not the specified network segment can be 63 | bound for the agent. 64 | """ 65 | 66 | allowed_network_types = self.get_allowed_network_types(agent) 67 | 68 | LOG.debug("Checking segment: %(segment)s " 69 | "with network types: %(network_types)s", 70 | {'segment': segment, 71 | 'network_types': allowed_network_types}) 72 | 73 | network_type = segment[api.NETWORK_TYPE] 74 | if network_type not in allowed_network_types: 75 | LOG.debug( 76 | 'Network %(network_id)s with segment %(id)s is type ' 77 | 'of %(network_type)s but agent %(agent)s or mechanism driver ' 78 | 'only support %(allowed_network_types)s.', 79 | {'network_id': segment['network_id'], 80 | 'id': segment['id'], 81 | 'network_type': network_type, 82 | 'agent': agent['host'], 83 | 'allowed_network_types': allowed_network_types}) 84 | return False 85 | 86 | return True 87 | 88 | def _create_vrf_ip(self, context, vrf): 89 | if self.vrf_allocation is None: 90 | self.vrf_allocation = vrf_utils.VrfIpAllocation() 91 | try: 92 | vrf_port = self.vrf_allocation.create_vrf_ip(context, vrf) 93 | except n_exc.NetworkNotFound: 94 | # An old vrf network might be deleted. Try to get a new one 95 | self.vrf_allocation = vrf_utils.VrfIpAllocation() 96 | vrf_port = self.vrf_allocation.create_vrf_ip(vrf) 97 | return vrf_port 98 | 99 | def _delete_vrf_ip(self, context, vrf): 100 | # The mech_sr might be restarted 101 | if self.vrf_allocation is None: 102 | self.vrf_allocation = vrf_utils.VrfIpAllocation() 103 | self.vrf_allocation.delete_vrf_ip(context, vrf) 104 | 105 | def create_network_precommit(self, context): 106 | network = context.current 107 | if network['provider:network_type'] != type_srv6vrf.SRV6VRF: 108 | return 109 | if self.vrf_allocation is None: 110 | try: 111 | self.vrf_allocation = vrf_utils.VrfIpAllocation() 112 | except vrf_utils.VrfNetworkNotFound: 113 | return 114 | raise vrf_utils.VrfNetworkAlreadyExists 115 | 116 | def create_network_postcommit(self, context): 117 | # Adds a logic to pass vrf create event to gateway agent if needed 118 | pass 119 | 120 | def delete_network_postcommit(self, context): 121 | plugin_context = context._plugin_context 122 | network = context.current 123 | vrf = vrf_utils.get_vrf_name(network["provider:network_type"], 124 | network["project_id"], 125 | network["id"]) 126 | self.sr_rpc_api.vrf_delete(plugin_context, vrf) 127 | self._delete_vrf_ip(plugin_context, vrf) 128 | 129 | def update_port_precommit(self, context): 130 | # This mech doesn't check whether port is binded to VM so 131 | # the mech treats all ports as VM's port 132 | plugin = directory.get_plugin() 133 | port_id = context.current["id"] 134 | port_db = plugin._get_port(context._plugin_context, port_id) 135 | cur_binding = utils.get_port_binding_by_status_and_host( 136 | port_db.port_bindings, constants.ACTIVE) 137 | agents = context.host_agents(self.agent_type) 138 | network_id = port_db.network_id 139 | node_id = None 140 | if agents: 141 | # SR plugin expects to return just one sr_agent 142 | agent = agents[0] 143 | if agent["alive"]: 144 | node_id = agent["configurations"].get("segment_node_id") 145 | if self._is_required_to_update_binding_profile(context, node_id): 146 | # Specify vrf name 147 | network = plugin.get_network(context._plugin_context, 148 | network_id) 149 | network_type = network["provider:network_type"] 150 | project_id = port_db.project_id 151 | vrf = vrf_utils.get_vrf_name(network_type, project_id, network_id) 152 | if not vrf: 153 | return 154 | # Prepare vrf port info 155 | vrf_ports = plugin.get_ports(context._plugin_context, 156 | filters={'name': [vrf]}) 157 | if not vrf_ports: 158 | vrf_port = self._create_vrf_ip(context._plugin_context, vrf) 159 | else: 160 | vrf_port = vrf_ports[0] 161 | vrf_ip = vrf_port['fixed_ips'][0]['ip_address'] 162 | subnet = plugin.get_subnet(context._plugin_context, 163 | vrf_port['fixed_ips'][0]['subnet_id']) 164 | cidr = subnet['cidr'] 165 | 166 | # update DB 167 | cur_binding.profile = jsonutils.dumps({"segment_node_id": node_id, 168 | "vrf": vrf, 169 | "vrf_ip": vrf_ip, 170 | "vrf_cidr": cidr}) 171 | if context.host == context.original_host: 172 | return 173 | self._insert_provisioning_block(context) 174 | 175 | def update_port_postcommit(self, context): 176 | port = context.current 177 | if port["status"] != constants.PORT_STATUS_ACTIVE: 178 | # TODO(hichihara): Treat with port status DOWN, 179 | # for example, VM shutdown case. 180 | return 181 | # Notify encap_update to all agents 182 | self.sr_rpc_api.encap_update(context._plugin_context, port) 183 | 184 | def delete_port_postcommit(self, context): 185 | plugin_context = context._plugin_context 186 | port = context.current 187 | self.sr_rpc_api.encap_delete(plugin_context, port) 188 | 189 | def _is_required_to_update_binding_profile(self, context, node_id): 190 | # If node_id is None, the agent running on that Host is not 191 | # srv6 node 192 | if node_id is None: 193 | return False 194 | 195 | if context._binding.profile: 196 | # If binding profile is already configured 197 | # and current host is same as new host, 198 | # we don't need to update binding profile 199 | if context.host == context.original_host: 200 | return False 201 | else: 202 | # If binding profile is not configure but 203 | # status is already ACTIVE, this port somehow 204 | # got active without binding profile so leave it as is 205 | 206 | # NOTE(Yuki Nishiwaki): Honestly I'm not sure which case 207 | # this condition try to cover and I think we don't need 208 | # this condition and safe to try to update binding profile 209 | # always when binding profile is missing, 210 | # But this condition itself is not so harm, that's why leave it 211 | # here 212 | if context.current["status"] != constants.PORT_STATUS_DOWN: 213 | return False 214 | 215 | return True 216 | -------------------------------------------------------------------------------- /networking_sr/ml2/type_srv6.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib import exceptions as exc 14 | from neutron_lib.plugins.ml2 import api 15 | from oslo_log import log 16 | 17 | from neutron._i18n import _ 18 | 19 | LOG = log.getLogger(__name__) 20 | SRV6 = "srv6" 21 | 22 | 23 | class Srv6TypeDriver(api.ML2TypeDriver): 24 | """Manage state for srv6 networks with ML2. 25 | 26 | The Srv6TypeDriver implements the 'srv6' network_type. 27 | """ 28 | 29 | def __init__(self): 30 | super(Srv6TypeDriver, self).__init__() 31 | 32 | def get_type(self): 33 | return SRV6 34 | 35 | def initialize(self): 36 | LOG.info("ML2 Srv6TypeDriver initialization complete") 37 | 38 | def is_partial_segment(self, segment): 39 | return False 40 | 41 | def validate_provider_segment(self, segment): 42 | for key, value in segment.items(): 43 | if value and key not in [api.NETWORK_TYPE]: 44 | msg = _("%s prohibited for srv6 provider network") % key 45 | raise exc.InvalidInput(error_message=msg) 46 | 47 | def reserve_provider_segment(self, context, segment, filters=None): 48 | return segment 49 | 50 | def allocate_tenant_segment(self, context, filters=None): 51 | return 52 | 53 | def release_segment(self, context, segment): 54 | pass 55 | 56 | def get_mtu(self, physical_network=None): 57 | pass 58 | 59 | def initialize_network_segment_range_support(self): 60 | pass 61 | 62 | def update_network_segment_range_allocations(self): 63 | pass 64 | -------------------------------------------------------------------------------- /networking_sr/ml2/type_srv6vrf.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib import exceptions as exc 14 | from neutron_lib.plugins.ml2 import api 15 | from oslo_log import log 16 | 17 | from neutron._i18n import _ 18 | 19 | LOG = log.getLogger(__name__) 20 | SRV6VRF = "srv6vrf" 21 | 22 | 23 | class Srv6VrfTypeDriver(api.ML2TypeDriver): 24 | """Manage state for srv6vrf networks with ML2. 25 | 26 | The Srv6VrfTypeDriver implements the 'srv6vrf' network_type. 27 | """ 28 | 29 | def __init__(self): 30 | super(Srv6VrfTypeDriver, self).__init__() 31 | 32 | def get_type(self): 33 | return SRV6VRF 34 | 35 | def initialize(self): 36 | LOG.info("ML2 Srv6VrfTypeDriver initialization complete") 37 | 38 | def is_partial_segment(self, segment): 39 | return False 40 | 41 | def validate_provider_segment(self, segment): 42 | for key, value in segment.items(): 43 | if value and key not in [api.NETWORK_TYPE]: 44 | msg = _("%s prohibited for srv6vrf provider network") % key 45 | raise exc.InvalidInput(error_message=msg) 46 | 47 | def reserve_provider_segment(self, context, segment, filters=None): 48 | return segment 49 | 50 | def allocate_tenant_segment(self, context, filters=None): 51 | return 52 | 53 | def release_segment(self, context, segment): 54 | pass 55 | 56 | def get_mtu(self, physical_network=None): 57 | pass 58 | 59 | def initialize_network_segment_range_support(self): 60 | pass 61 | 62 | def update_network_segment_range_allocations(self): 63 | pass 64 | -------------------------------------------------------------------------------- /networking_sr/objects/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/objects/__init__.py -------------------------------------------------------------------------------- /networking_sr/objects/srv6_encap_network.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from oslo_versionedobjects import fields as obj_fields 14 | 15 | from neutron.api.rpc.callbacks import resources 16 | from neutron.objects import base 17 | from neutron.objects import common_types 18 | 19 | from networking_sr.db import srv6_encap_net_db 20 | 21 | 22 | @base.NeutronObjectRegistry.register 23 | class SRv6EncapNetwork(base.NeutronDbObject): 24 | # Version 1.0: Initial version 25 | VERSION = '1.0' 26 | 27 | new_facade = True 28 | db_model = srv6_encap_net_db.Srv6EncapNetwork 29 | 30 | fields = { 31 | 'id': common_types.UUIDField(), 32 | 'project_id': obj_fields.StringField(), 33 | 'network_id': obj_fields.StringField(), 34 | 'encap_rules': obj_fields.ListOfObjectsField( 35 | 'SRv6EncapRule', nullable=True), 36 | } 37 | 38 | synthetic_fields = ['encap_rules'] 39 | 40 | 41 | @base.NeutronObjectRegistry.register 42 | class SRv6EncapRule(base.NeutronDbObject): 43 | # Version 1.0: Initial version 44 | VERSION = '1.0' 45 | 46 | new_facade = True 47 | db_model = srv6_encap_net_db.Srv6EncapRule 48 | 49 | fields = { 50 | 'srv6_encap_network_id': common_types.UUIDField(), 51 | 'destination': obj_fields.StringField(nullable=False), 52 | 'nexthop': obj_fields.StringField(nullable=False), 53 | } 54 | 55 | primary_keys = ['srv6_encap_network_id', 'destination', 'nexthop'] 56 | foreign_keys = {'SRv6EncapNetwork': {'srv6_encap_network_id': 'id'}} 57 | 58 | resources.register_resource_class(SRv6EncapNetwork) 59 | resources.register_resource_class(SRv6EncapRule) 60 | -------------------------------------------------------------------------------- /networking_sr/services/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/services/__init__.py -------------------------------------------------------------------------------- /networking_sr/services/plugin.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | from neutron_lib.agent import topics 14 | from neutron_lib.plugins import directory 15 | from neutron_lib.services import base 16 | from oslo_log import log as logging 17 | 18 | from networking_sr.agent import rpc as sr_rpc 19 | from networking_sr.common import vrf_utils 20 | from networking_sr.db import srv6_encap_net_db as encap_db 21 | from networking_sr.extensions import srv6_encap_network 22 | 23 | LOG = logging.getLogger(__name__) 24 | 25 | 26 | class SRv6EncapNetworkPlugin(base.ServicePluginBase, 27 | encap_db.SRv6EncapNetworkDbMixin): 28 | 29 | supported_extension_aliases = [srv6_encap_network.ALIAS] 30 | 31 | def __init__(self): 32 | super(SRv6EncapNetworkPlugin, self).__init__() 33 | self.sr_rpc_api = sr_rpc.SrAgentApi(topics.AGENT) 34 | self.vrf_allocation = None 35 | 36 | def get_plugin_type(self): 37 | return "srv6-encap-network" 38 | 39 | def get_plugin_description(self): 40 | return "SRv6 Encap Network service plugin" 41 | 42 | def get_srv6_encap_networks(self, context, filters=None, 43 | fields=None, sorts=None, limit=None, 44 | marker=None, page_reverse=False): 45 | return super(SRv6EncapNetworkPlugin, self).get_srv6_encap_networks( 46 | context, filters=filters, fields=fields, 47 | sorts=sorts, limit=limit, marker=marker, 48 | page_reverse=page_reverse) 49 | 50 | def get_srv6_encap_network(self, context, encap_net_id, fields=None): 51 | return super(SRv6EncapNetworkPlugin, self).get_srv6_encap_network( 52 | context, encap_net_id, fields=fields) 53 | 54 | def _make_encap_rule_rpc_content(self, context, encap_net): 55 | plugin = directory.get_plugin() 56 | network = plugin.get_network(context, encap_net['network_id']) 57 | vrf = vrf_utils.get_vrf_name(network["provider:network_type"], 58 | encap_net['project_id'], 59 | encap_net['network_id']) 60 | encap_info = {'id': encap_net['id'], 61 | 'vrf': vrf, 62 | 'rules': encap_net['encap_rules']} 63 | return encap_info 64 | 65 | def create_srv6_encap_network(self, context, srv6_encap_network): 66 | encap_net = super(SRv6EncapNetworkPlugin, 67 | self).create_srv6_encap_network( 68 | context, srv6_encap_network) 69 | encap_info = self._make_encap_rule_rpc_content(context, encap_net) 70 | self.sr_rpc_api.encap_rule_update(context, encap_info) 71 | return encap_net 72 | 73 | def update_srv6_encap_network(self, context, encap_net_id, 74 | srv6_encap_network): 75 | encap_net = super(SRv6EncapNetworkPlugin, 76 | self).update_srv6_encap_network( 77 | context, encap_net_id, srv6_encap_network) 78 | encap_info = self._make_encap_rule_rpc_content(context, encap_net) 79 | self.sr_rpc_api.encap_rule_update(context, encap_info) 80 | return encap_net 81 | 82 | def delete_srv6_encap_network(self, context, encap_net_id): 83 | encap_net_db = self._get_srv6_encap_network(context, 84 | encap_net_id) 85 | encap_net = self._make_srv6_encap_network_dict(encap_net_db, 86 | []) 87 | super(SRv6EncapNetworkPlugin, 88 | self).delete_srv6_encap_network( 89 | context, encap_net_id) 90 | encap_info = self._make_encap_rule_rpc_content(context, encap_net) 91 | self.sr_rpc_api.encap_rule_update(context, encap_info) 92 | -------------------------------------------------------------------------------- /networking_sr/tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/networking_sr/tests/__init__.py -------------------------------------------------------------------------------- /networking_sr/tests/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Copyright 2010-2011 OpenStack Foundation 4 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 7 | # not use this file except in compliance with the License. You may obtain 8 | # a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 14 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 15 | # License for the specific language governing permissions and limitations 16 | # under the License. 17 | 18 | from oslotest import base 19 | 20 | 21 | class TestCase(base.BaseTestCase): 22 | 23 | """Test case base class for all unit tests.""" 24 | -------------------------------------------------------------------------------- /networking_sr/tests/test_networking_sr.py: -------------------------------------------------------------------------------- 1 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 2 | # not use this file except in compliance with the License. You may obtain 3 | # a copy of the License at 4 | # 5 | # http://www.apache.org/licenses/LICENSE-2.0 6 | # 7 | # Unless required by applicable law or agreed to in writing, software 8 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 9 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 10 | # License for the specific language governing permissions and limitations 11 | # under the License. 12 | 13 | """ 14 | test_networking_sr 15 | ---------------------------------- 16 | 17 | Tests for `networking_sr` module. 18 | """ 19 | 20 | from networking_sr.tests import base 21 | 22 | 23 | class TestNetworking_sr(base.TestCase): 24 | 25 | def test_something(self): 26 | pass 27 | -------------------------------------------------------------------------------- /releasenotes/notes/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/releasenotes/notes/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/_static/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/releasenotes/source/_static/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/_templates/.placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/line/networking-sr/0c7302ef7e9733c4a07aed48accb6c190485f14d/releasenotes/source/_templates/.placeholder -------------------------------------------------------------------------------- /releasenotes/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # Licensed under the Apache License, Version 2.0 (the "License"); 3 | # you may not use this file except in compliance with the License. 4 | # You may obtain a copy of the License at 5 | # 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # Unless required by applicable law or agreed to in writing, software 9 | # distributed under the License is distributed on an "AS IS" BASIS, 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 11 | # implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # This file is execfile()d with the current directory set to its 16 | # containing dir. 17 | # 18 | # Note that not all possible configuration values are present in this 19 | # autogenerated file. 20 | # 21 | # All configuration values have a default; values that are commented out 22 | # serve to show the default. 23 | 24 | # If extensions (or modules to document with autodoc) are in another directory, 25 | # add these directories to sys.path here. If the directory is relative to the 26 | # documentation root, use os.path.abspath to make it absolute, like shown here. 27 | # sys.path.insert(0, os.path.abspath('.')) 28 | 29 | # -- General configuration ------------------------------------------------ 30 | 31 | # If your documentation needs a minimal Sphinx version, state it here. 32 | # needs_sphinx = '1.0' 33 | 34 | # Add any Sphinx extension module names here, as strings. They can be 35 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 36 | # ones. 37 | extensions = [ 38 | 'openstackdocstheme', 39 | 'reno.sphinxext', 40 | ] 41 | 42 | # Add any paths that contain templates here, relative to this directory. 43 | templates_path = ['_templates'] 44 | 45 | # The suffix of source filenames. 46 | source_suffix = '.rst' 47 | 48 | # The encoding of source files. 49 | # source_encoding = 'utf-8-sig' 50 | 51 | # The master toctree document. 52 | master_doc = 'index' 53 | 54 | # General information about the project. 55 | project = u'networking_sr Release Notes' 56 | copyright = u'2017, OpenStack Developers' 57 | 58 | # openstackdocstheme options 59 | repository_name = 'openstack/networking-sr' 60 | bug_project = ('replace with the name of the project on Launchpad or the ID ' 61 | 'from Storyboard') 62 | bug_tag = '' 63 | html_last_updated_fmt = '%Y-%m-%d %H:%M' 64 | 65 | # The version info for the project you're documenting, acts as replacement for 66 | # |version| and |release|, also used in various other places throughout the 67 | # built documents. 68 | # 69 | # The short X.Y version. 70 | # The full version, including alpha/beta/rc tags. 71 | release = '' 72 | # The short X.Y version. 73 | version = '' 74 | 75 | # The language for content autogenerated by Sphinx. Refer to documentation 76 | # for a list of supported languages. 77 | # language = None 78 | 79 | # There are two options for replacing |today|: either, you set today to some 80 | # non-false value, then it is used: 81 | # today = '' 82 | # Else, today_fmt is used as the format for a strftime call. 83 | # today_fmt = '%B %d, %Y' 84 | 85 | # List of patterns, relative to source directory, that match files and 86 | # directories to ignore when looking for source files. 87 | exclude_patterns = [] 88 | 89 | # The reST default role (used for this markup: `text`) to use for all 90 | # documents. 91 | # default_role = None 92 | 93 | # If true, '()' will be appended to :func: etc. cross-reference text. 94 | # add_function_parentheses = True 95 | 96 | # If true, the current module name will be prepended to all description 97 | # unit titles (such as .. function::). 98 | # add_module_names = True 99 | 100 | # If true, sectionauthor and moduleauthor directives will be shown in the 101 | # output. They are ignored by default. 102 | # show_authors = False 103 | 104 | # The name of the Pygments (syntax highlighting) style to use. 105 | pygments_style = 'sphinx' 106 | 107 | # A list of ignored prefixes for module index sorting. 108 | # modindex_common_prefix = [] 109 | 110 | # If true, keep warnings as "system message" paragraphs in the built documents. 111 | # keep_warnings = False 112 | 113 | 114 | # -- Options for HTML output ---------------------------------------------- 115 | 116 | # The theme to use for HTML and HTML Help pages. See the documentation for 117 | # a list of builtin themes. 118 | html_theme = 'openstackdocs' 119 | 120 | # Theme options are theme-specific and customize the look and feel of a theme 121 | # further. For a list of options available for each theme, see the 122 | # documentation. 123 | # html_theme_options = {} 124 | 125 | # Add any paths that contain custom themes here, relative to this directory. 126 | # html_theme_path = [] 127 | 128 | # The name for this set of Sphinx documents. If None, it defaults to 129 | # " v documentation". 130 | # html_title = None 131 | 132 | # A shorter title for the navigation bar. Default is the same as html_title. 133 | # html_short_title = None 134 | 135 | # The name of an image file (relative to this directory) to place at the top 136 | # of the sidebar. 137 | # html_logo = None 138 | 139 | # The name of an image file (within the static path) to use as favicon of the 140 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 141 | # pixels large. 142 | # html_favicon = None 143 | 144 | # Add any paths that contain custom static files (such as style sheets) here, 145 | # relative to this directory. They are copied after the builtin static files, 146 | # so a file named "default.css" will overwrite the builtin "default.css". 147 | html_static_path = ['_static'] 148 | 149 | # Add any extra paths that contain custom files (such as robots.txt or 150 | # .htaccess) here, relative to this directory. These files are copied 151 | # directly to the root of the documentation. 152 | # html_extra_path = [] 153 | 154 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 155 | # using the given strftime format. 156 | # html_last_updated_fmt = '%b %d, %Y' 157 | 158 | # If true, SmartyPants will be used to convert quotes and dashes to 159 | # typographically correct entities. 160 | # html_use_smartypants = True 161 | 162 | # Custom sidebar templates, maps document names to template names. 163 | # html_sidebars = {} 164 | 165 | # Additional templates that should be rendered to pages, maps page names to 166 | # template names. 167 | # html_additional_pages = {} 168 | 169 | # If false, no module index is generated. 170 | # html_domain_indices = True 171 | 172 | # If false, no index is generated. 173 | # html_use_index = True 174 | 175 | # If true, the index is split into individual pages for each letter. 176 | # html_split_index = False 177 | 178 | # If true, links to the reST sources are added to the pages. 179 | # html_show_sourcelink = True 180 | 181 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 182 | # html_show_sphinx = True 183 | 184 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 185 | # html_show_copyright = True 186 | 187 | # If true, an OpenSearch description file will be output, and all pages will 188 | # contain a tag referring to it. The value of this option must be the 189 | # base URL from which the finished HTML is served. 190 | # html_use_opensearch = '' 191 | 192 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 193 | # html_file_suffix = None 194 | 195 | # Output file base name for HTML help builder. 196 | htmlhelp_basename = 'networking_srReleaseNotesdoc' 197 | 198 | 199 | # -- Options for LaTeX output --------------------------------------------- 200 | 201 | latex_elements = { 202 | # The paper size ('letterpaper' or 'a4paper'). 203 | # 'papersize': 'letterpaper', 204 | 205 | # The font size ('10pt', '11pt' or '12pt'). 206 | # 'pointsize': '10pt', 207 | 208 | # Additional stuff for the LaTeX preamble. 209 | # 'preamble': '', 210 | } 211 | 212 | # Grouping the document tree into LaTeX files. List of tuples 213 | # (source start file, target name, title, 214 | # author, documentclass [howto, manual, or own class]). 215 | latex_documents = [ 216 | ('index', 'networking_srReleaseNotes.tex', 217 | u'networking_sr Release Notes Documentation', 218 | u'OpenStack Foundation', 'manual'), 219 | ] 220 | 221 | # The name of an image file (relative to this directory) to place at the top of 222 | # the title page. 223 | # latex_logo = None 224 | 225 | # For "manual" documents, if this is true, then toplevel headings are parts, 226 | # not chapters. 227 | # latex_use_parts = False 228 | 229 | # If true, show page references after internal links. 230 | # latex_show_pagerefs = False 231 | 232 | # If true, show URL addresses after external links. 233 | # latex_show_urls = False 234 | 235 | # Documents to append as an appendix to all manuals. 236 | # latex_appendices = [] 237 | 238 | # If false, no module index is generated. 239 | # latex_domain_indices = True 240 | 241 | 242 | # -- Options for manual page output --------------------------------------- 243 | 244 | # One entry per manual page. List of tuples 245 | # (source start file, name, description, authors, manual section). 246 | man_pages = [ 247 | ('index', 'networking_srrereleasenotes', 248 | u'networking_sr Release Notes Documentation', 249 | [u'OpenStack Foundation'], 1) 250 | ] 251 | 252 | # If true, show URL addresses after external links. 253 | # man_show_urls = False 254 | 255 | 256 | # -- Options for Texinfo output ------------------------------------------- 257 | 258 | # Grouping the document tree into Texinfo files. List of tuples 259 | # (source start file, target name, title, author, 260 | # dir menu entry, description, category) 261 | texinfo_documents = [ 262 | ('index', 'networking_sr ReleaseNotes', 263 | u'networking_sr Release Notes Documentation', 264 | u'OpenStack Foundation', 'networking_srReleaseNotes', 265 | 'One line description of project.', 266 | 'Miscellaneous'), 267 | ] 268 | 269 | # Documents to append as an appendix to all manuals. 270 | # texinfo_appendices = [] 271 | 272 | # If false, no module index is generated. 273 | # texinfo_domain_indices = True 274 | 275 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 276 | # texinfo_show_urls = 'footnote' 277 | 278 | # If true, do not generate a @detailmenu in the "Top" node's menu. 279 | # texinfo_no_detailmenu = False 280 | 281 | # -- Options for Internationalization output ------------------------------ 282 | locale_dirs = ['locale/'] 283 | -------------------------------------------------------------------------------- /releasenotes/source/index.rst: -------------------------------------------------------------------------------- 1 | ============================================ 2 | networking_sr Release Notes 3 | ============================================ 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | unreleased 9 | -------------------------------------------------------------------------------- /releasenotes/source/unreleased.rst: -------------------------------------------------------------------------------- 1 | ============================== 2 | Current Series Release Notes 3 | ============================== 4 | 5 | .. release-notes:: 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | pbr>=2.0 # Apache-2.0 6 | pyroute2>=0.5.3;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) 7 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = networking-sr 3 | summary = Networking Segment Routing is neutron plugin to manage segment routing in openstack. 4 | description-file = 5 | README.rst 6 | author = Hirofumi Ichihara 7 | author-email = hirofumi.ichihara@linecorp.com 8 | home-page = https://github.com/line/networking-sr 9 | classifier = 10 | Environment :: OpenStack 11 | Intended Audience :: Information Technology 12 | Intended Audience :: System Administrators 13 | License :: OSI Approved :: Apache Software License 14 | Operating System :: POSIX :: Linux 15 | Programming Language :: Python 16 | Programming Language :: Python :: 2 17 | Programming Language :: Python :: 2.7 18 | Programming Language :: Python :: 3 19 | Programming Language :: Python :: 3.5 20 | 21 | [files] 22 | packages = 23 | networking_sr 24 | data_files = 25 | etc/neutron/policy.d = 26 | etc/neutron/policy.d/srv6.conf 27 | 28 | [entry_points] 29 | console_scripts = 30 | neutron-sr-agent = networking_sr.cmd.eventlet.sr_agent:main 31 | neutron-srgw-agent = networking_sr.cmd.eventlet.srgw_agent:main 32 | neutron.ml2.mechanism_drivers = 33 | sr = networking_sr.ml2.mech_driver.mech_sr:SrMechanismDriver 34 | neutron.interface_drivers = 35 | sr = networking_sr.agent.interface:SrInterfaceDriver 36 | neutron.agent.firewall_drivers = 37 | iptables_vrf = networking_sr.agent.iptables_vrf_firewall:VrfBasedIptablesFirewallDriver 38 | neutron.ml2.type_drivers = 39 | srv6 = networking_sr.ml2.type_srv6:Srv6TypeDriver 40 | srv6vrf = networking_sr.ml2.type_srv6vrf:Srv6VrfTypeDriver 41 | neutron.db.alembic_migrations = 42 | networking-sr = networking_sr.db.migration:alembic_migrations 43 | neutron.service_plugins = 44 | sr = networking_sr.services.plugin:SRv6EncapNetworkPlugin 45 | 46 | [compile_catalog] 47 | directory = networking_sr/locale 48 | domain = networking_sr 49 | 50 | [update_catalog] 51 | domain = networking_sr 52 | output_dir = networking_sr/locale 53 | input_file = networking_sr/locale/networking_sr.pot 54 | 55 | [extract_messages] 56 | keywords = _ gettext ngettext l_ lazy_gettext 57 | mapping_file = babel.cfg 58 | output_file = networking_sr/locale/networking_sr.pot 59 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT 17 | import setuptools 18 | 19 | # In python < 2.7.4, a lazy loading of package `pbr` will break 20 | # setuptools if some other modules registered functions in `atexit`. 21 | # solution from: http://bugs.python.org/issue15881#msg170215 22 | try: 23 | import multiprocessing # noqa 24 | except ImportError: 25 | pass 26 | 27 | setuptools.setup( 28 | setup_requires=['pbr'], 29 | pbr=True) 30 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | hacking>=0.12.0,<0.13 # Apache-2.0 6 | 7 | coverage>=4.0,!=4.4 # Apache-2.0 8 | python-subunit>=0.0.18 # Apache-2.0/BSD 9 | oslotest>=1.10.0 # Apache-2.0 10 | stestr>=1.0.0 # Apache-2.0 11 | testtools>=1.4.0 # MIT 12 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 2.0 3 | envlist = py35,py27,pep8 4 | skipsdist = True 5 | 6 | [testenv] 7 | usedevelop = True 8 | install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages} 9 | setenv = 10 | VIRTUAL_ENV={envdir} 11 | PYTHONWARNINGS=default::DeprecationWarning 12 | OS_STDOUT_CAPTURE=1 13 | OS_STDERR_CAPTURE=1 14 | OS_TEST_TIMEOUT=60 15 | deps = -r{toxinidir}/test-requirements.txt 16 | commands = stestr run {posargs} 17 | 18 | [testenv:pep8] 19 | commands = flake8 {posargs} 20 | 21 | [testenv:venv] 22 | commands = {posargs} 23 | 24 | [testenv:cover] 25 | setenv = 26 | VIRTUAL_ENV={envdir} 27 | PYTHON=coverage run --source networking_sr --parallel-mode 28 | commands = 29 | stestr run {posargs} 30 | coverage combine 31 | coverage html -d cover 32 | coverage xml -o cover/coverage.xml 33 | 34 | [testenv:docs] 35 | deps = -r{toxinidir}/doc/requirements.txt 36 | commands = sphinx-build -W -b html doc/source doc/build/html 37 | 38 | [testenv:releasenotes] 39 | deps = {[testenv:docs]deps} 40 | commands = 41 | sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html 42 | 43 | [testenv:debug] 44 | commands = oslo_debug_helper {posargs} 45 | 46 | [flake8] 47 | # E123, E125 skipped as they are invalid PEP-8. 48 | 49 | show-source = True 50 | ignore = E123,E125 51 | builtins = _ 52 | exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build 53 | --------------------------------------------------------------------------------