├── dist ├── amazon-efs-utils.conffiles ├── scriptlets │ ├── after-remove │ ├── after-install-upgrade │ └── before-remove ├── amazon-efs-utils.control ├── amazon-efs-mount-watchdog.service ├── amazon-efs-mount-watchdog.conf ├── amazon-efs-mount-watchdog.plist ├── efs-utils.conf └── efs-utils.crt ├── NOTICE ├── src └── proxy │ ├── build.rs │ ├── src │ ├── lib.rs │ ├── efs_prot.x │ ├── error.rs │ ├── proxy_identifier.rs │ ├── logger.rs │ ├── shutdown.rs │ ├── proxy.rs │ ├── status_reporter.rs │ ├── main.rs │ ├── connection_task.rs │ ├── test_utils.rs │ ├── rpc.rs │ └── config_parser.rs │ ├── rust-xdr │ ├── xdr-codec │ │ ├── Cargo.toml │ │ ├── src │ │ │ └── error.rs │ │ ├── tests │ │ │ ├── qc-record.rs │ │ │ └── test-record.rs │ │ └── README.md │ └── xdrgen │ │ ├── Cargo.toml │ │ ├── src │ │ ├── xdrgen.rs │ │ ├── lib.rs │ │ └── spec │ │ │ └── test.rs │ │ └── README.md │ └── Cargo.toml ├── .gitignore ├── .github └── PULL_REQUEST_TEMPLATE.md ├── requirements.txt ├── test ├── __init__.py ├── global_test │ ├── __init__.py │ └── test_global_version_match.py ├── mount_efs_test │ ├── __init__.py │ ├── test_check_unsupported_options.py │ ├── test_get_init_system.py │ ├── test_get_fips_config.py │ ├── test_check_network_status.py │ ├── test_is_stunnel_option_supported.py │ ├── test_get_linux_kernel_version.py │ ├── test_start_watchdog.py │ ├── test_is_ocsp_enabled.py │ ├── test_create_state_file_dir.py │ ├── test_add_stunnel_ca_options.py │ ├── test_parse_arguments.py │ ├── test_environment_variables.py │ ├── test_write_tls_tunnel_state_file.py │ ├── test_get_nfs_mount_options.py │ ├── test_choose_tls_port.py │ ├── test_get_instance_id.py │ ├── test_mount_with_proxy.py │ └── test_describe_availability_zone.py ├── watchdog_test │ ├── __init__.py │ ├── test_mark_as_unmounted.py │ ├── test_check_child_procs.py │ ├── test_parse_arguments.py │ ├── test_get_state_files.py │ ├── test_restart_tls_tunnel.py │ ├── test_get_nfs_mount_options_on_macos.py │ ├── test_clean_up_previous_tunnel_pids.py │ ├── test_get_current_local_nfs_mounts.py │ ├── test_send_signal_to_stunnel_processes.py │ └── test_clean_up_mount_state.py ├── utils.py └── common.py ├── setup.cfg ├── config.ini ├── config.toml ├── LICENSE ├── Makefile └── CONTRIBUTING.md /dist/amazon-efs-utils.conffiles: -------------------------------------------------------------------------------- 1 | /etc/amazon/efs/efs-utils.conf 2 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | efs-utils 2 | Copyright 2017-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | -------------------------------------------------------------------------------- /src/proxy/build.rs: -------------------------------------------------------------------------------- 1 | use xdrgen; 2 | 3 | fn main() { 4 | xdrgen::compile("src/efs_prot.x").expect("xdrgen efs_prot.x failed"); 5 | } 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /amazon-efs-utils* 2 | !amazon-efs-utils.spec 3 | 4 | .coverage 5 | .pytest_cache 6 | 7 | *.pyc 8 | 9 | __pycache__/ 10 | build/ 11 | 12 | .DS_Store 13 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | botocore == 1.34.140 2 | configparser == 7.0.0 3 | coverage == 7.6.0 4 | flake8 == 7.1.0 5 | pytest == 8.2.2 6 | pytest-cov == 5.0.0 7 | pytest-html == 4.1.1 8 | pytest-metadata == 3.1.1 9 | pytest-mock == 3.14.0 10 | mock == 5.1.0 11 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | -------------------------------------------------------------------------------- /test/global_test/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | -------------------------------------------------------------------------------- /test/mount_efs_test/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | -------------------------------------------------------------------------------- /test/watchdog_test/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 130 3 | exclude = test 4 | extend-ignore = E203, E501 5 | 6 | [tool:pytest] 7 | addopts = 8 | --verbose 9 | --html build/pytest/index.html 10 | --cov mount_efs 11 | --cov watchdog 12 | --cov-report html:build/coverage 13 | --cov-fail-under 80 14 | -------------------------------------------------------------------------------- /dist/scriptlets/after-remove: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | reload() { 5 | if [ "$(cat /proc/1/comm)" = "systemd" ]; then 6 | systemctl daemon-reload 7 | fi 8 | } 9 | 10 | if [ "$1" = "remove" ]; then 11 | reload 12 | elif [ "$1" = "purge" ]; then 13 | reload 14 | rm -f /var/log/amazon/efs/* 15 | fi -------------------------------------------------------------------------------- /config.ini: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | [global] 10 | version=2.4.1 11 | release=1 12 | -------------------------------------------------------------------------------- /dist/amazon-efs-utils.control: -------------------------------------------------------------------------------- 1 | Package: amazon-efs-utils 2 | Architecture: $ARCH 3 | Version: $VERSION-$RELEASE 4 | Section: utils 5 | Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux 6 | Priority: optional 7 | Copyright: MIT License 8 | Maintainer: Amazon.com, Inc. 9 | Description: This package provides utilities for simplifying the use of EFS file systems 10 | -------------------------------------------------------------------------------- /config.toml: -------------------------------------------------------------------------------- 1 | [source] 2 | 3 | # Under the `source` table are a number of other tables whose keys are a 4 | # name for the relevant source. For example this section defines a new 5 | # source, called `my-vendor-source`, which comes from a directory 6 | # located at `vendor` relative to the directory containing this `.cargo/config.toml` 7 | # file 8 | [source.my-vendor-source] 9 | directory = "vendor" 10 | 11 | [source.crates-io] 12 | replace-with = "my-vendor-source" -------------------------------------------------------------------------------- /dist/scriptlets/after-install-upgrade: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ -n $2 ]; then 5 | if [ "$(cat /proc/1/comm)" = "init" ]; then 6 | /sbin/restart amazon-efs-mount-watchdog &> /dev/null || true 7 | elif [ "$(cat /proc/1/comm)" = "systemd" ]; then 8 | if systemctl is-active --quiet amazon-efs-mount-watchdog; then 9 | systemctl try-restart amazon-efs-mount-watchdog.service &> /dev/null || true 10 | fi 11 | fi 12 | fi -------------------------------------------------------------------------------- /dist/scriptlets/before-remove: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ "$1" = "remove" ] || [ "$1" = "purge" ]; then 5 | if [ "$(cat /proc/1/comm)" = "init" ]; then 6 | /sbin/stop amazon-efs-mount-watchdog &> /dev/null || true 7 | elif [ "$(cat /proc/1/comm)" = "systemd" ]; then 8 | if systemctl is-active --quiet amazon-efs-mount-watchdog; then 9 | systemctl --no-reload disable amazon-efs-mount-watchdog.service &> /dev/null || true 10 | systemctl stop amazon-efs-mount-watchdog.service &> /dev/null || true 11 | fi 12 | fi 13 | fi -------------------------------------------------------------------------------- /dist/amazon-efs-mount-watchdog.service: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | [Unit] 10 | Description=amazon-efs-mount-watchdog 11 | Before=remote-fs-pre.target 12 | 13 | [Service] 14 | Type=simple 15 | ExecStart=/usr/bin/env amazon-efs-mount-watchdog 16 | KillMode=process 17 | Restart=on-failure 18 | RestartSec=15s 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /dist/amazon-efs-mount-watchdog.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | description "Amazon EFS Mount Watchdog" 10 | author "Amazon.com" 11 | 12 | # Uncomment these lines to start amazon-efs-mount-watchdog automatically on boot 13 | # start on (runlevel [345] and started network) 14 | # stop on (runlevel [!345] or stopping network) 15 | 16 | respawn 17 | respawn limit 0 15 18 | 19 | exec /usr/bin/env amazon-efs-mount-watchdog 20 | -------------------------------------------------------------------------------- /test/watchdog_test/test_mark_as_unmounted.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import json 10 | 11 | import watchdog 12 | 13 | 14 | def test_mark_as_unmounted(tmpdir): 15 | state_file = "fs-deadbeef.mount.12345" 16 | state = {} 17 | 18 | tmpdir.join(state_file).write(json.dumps(state)) 19 | 20 | updated_state = watchdog.mark_as_unmounted(state, str(tmpdir), state_file, 0) 21 | 22 | assert 0 == updated_state.get("unmount_time") 23 | 24 | with open(str(tmpdir.join(state_file))) as f: 25 | file_state = json.load(f) 26 | 27 | assert 0 == file_state.get("unmount_time") 28 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_check_unsupported_options.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import mount_efs 10 | 11 | 12 | def test_no_unsupported_options(capsys): 13 | options = {} 14 | 15 | mount_efs.check_unsupported_options(options) 16 | 17 | out, err = capsys.readouterr() 18 | assert not out 19 | 20 | 21 | def test_capath_unsupported(capsys): 22 | options = {"capath": "/capath"} 23 | 24 | mount_efs.check_unsupported_options(options) 25 | 26 | out, err = capsys.readouterr() 27 | assert "not supported" in err 28 | assert "capath" in err 29 | assert "capath" not in options 30 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_get_init_system.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import tempfile 10 | 11 | import mount_efs 12 | 13 | 14 | def test_get_init_system_from_file(tmpdir): 15 | temp_file = tmpdir.join(tempfile.mkstemp()[1]) 16 | temp_file.write("systemd\n", ensure=True) 17 | 18 | init_system = mount_efs.get_init_system(str(temp_file)) 19 | 20 | assert "systemd" == init_system 21 | 22 | 23 | def test_get_init_system_nonexistent_file(tmpdir): 24 | temp_file = tmpdir.join(tempfile.mkstemp()[1]) 25 | 26 | init_system = mount_efs.get_init_system(str(temp_file)) 27 | 28 | assert "unknown" == init_system 29 | -------------------------------------------------------------------------------- /src/proxy/src/lib.rs: -------------------------------------------------------------------------------- 1 | // EFS Proxy modules are made visible such that they can be reused in the Integration tests. 2 | // EFS proxy Integration tests are implemented in a white box testing manner. 3 | // We want to keep all the proxy internals visible and accessible. 4 | // 5 | #![warn(rust_2018_idioms)] 6 | 7 | pub mod config_parser; 8 | pub mod connection_task; 9 | pub mod connections; 10 | pub mod controller; 11 | pub mod efs_rpc; 12 | pub mod error; 13 | pub mod logger; 14 | pub mod proxy; 15 | pub mod proxy_identifier; 16 | pub mod proxy_task; 17 | pub mod rpc; 18 | pub mod shutdown; 19 | pub mod status_reporter; 20 | pub mod test_utils; 21 | pub mod tls; 22 | 23 | #[allow(clippy::all)] 24 | #[allow(deprecated)] 25 | #[allow(invalid_value)] 26 | #[allow(non_camel_case_types)] 27 | #[allow(unused_assignments)] 28 | pub mod efs_prot { 29 | include!(concat!(env!("OUT_DIR"), "/efs_prot_xdr.rs")); 30 | } 31 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdr-codec/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xdr-codec" 3 | version = "0.4.4" 4 | authors = ["Jeremy Fitzhardinge "] 5 | license = "MIT OR Apache-2.0" 6 | description = "XDR encode/decode runtime support. Pairs with xdrgen which generates code from specs." 7 | repository = "https://github.com/jsgf/rust-xdr/tree/master/xdr-codec" 8 | documentation = "https://docs.rs/xdr-codec" 9 | readme = "README.md" 10 | keywords = ["encoding", "protocol", "xdr", "rfc4506", "serialization"] 11 | include = [ "src/**/*.rs", "tests/**/*.rs", "*.md", "Cargo.toml" ] 12 | 13 | [features] 14 | # Enable use of `Pack`/`Unpack` traits for `i8`/`u8`. Normally this is disabled to 15 | # prevent unintended use of `char thing[]` arrays when then intent was `opaque thing[]`. 16 | bytecodec = [] 17 | # For travis 18 | unstable = [] 19 | 20 | [dependencies] 21 | byteorder = "1.0" 22 | error-chain = "0.10" 23 | 24 | [dev-dependencies] 25 | quickcheck = "0.4" 26 | -------------------------------------------------------------------------------- /test/watchdog_test/test_check_child_procs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import watchdog 8 | 9 | from .. import common, utils 10 | 11 | 12 | def test_child_procs_empty(): 13 | watchdog.check_child_procs([]) 14 | 15 | # nothing to assert, this just verifies that empty child procs doesn't crash 16 | 17 | 18 | def test_child_procs(): 19 | live_proc = common.PopenMock(return_code=None).mock 20 | dead_proc = common.PopenMock( 21 | return_code=1, communicate_return_value=(None, None) 22 | ).mock 23 | 24 | children = [live_proc, dead_proc] 25 | 26 | watchdog.check_child_procs(children) 27 | 28 | assert 1 == len(children) 29 | assert dead_proc not in children 30 | utils.assert_called_once(dead_proc.poll) 31 | assert live_proc in children 32 | utils.assert_called_once(live_proc.poll) 33 | -------------------------------------------------------------------------------- /test/utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | 10 | def assert_called_once(mock): 11 | assert ( 12 | mock.call_count == 1 13 | ), "Expected mock to have been called once. Called {} times.".format( 14 | mock.call_count 15 | ) 16 | 17 | 18 | def assert_called_n_times(mock, n): 19 | assert ( 20 | mock.call_count == n 21 | ), "Expected mock to have been called {} times. Called {} times.".format( 22 | n, mock.call_count 23 | ) 24 | 25 | 26 | def assert_not_called(mock): 27 | assert ( 28 | mock.call_count == 0 29 | ), "Expected mock to have been not called. Called {} times.".format(mock.call_count) 30 | 31 | 32 | def assert_called(mock): 33 | assert ( 34 | mock.call_count != 0 35 | ), "Expected mock to have been called. While the mock is not called." 36 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdrgen/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "xdrgen" 3 | version = "0.4.4" 4 | authors = ["Jeremy Fitzhardinge "] 5 | license = "MIT/Apache-2.0" 6 | description = "XDR codec generator from specification. Designed for use with xdr-codec." 7 | repository = "https://github.com/jsgf/rust-xdr/tree/master/xdrgen" 8 | documentation = "https://docs.rs/xdrgen/" 9 | readme = "README.md" 10 | keywords = ["encoding", "protocol", "xdr", "rfc4506", "serialization"] 11 | include = [ "src/**/*.rs", "tests/**/*.rs", "*.md", "Cargo.toml" ] 12 | 13 | [[bin]] 14 | name = "xdrgen" 15 | path = "src/xdrgen.rs" 16 | test = false 17 | bench = false 18 | doc = false 19 | 20 | [features] 21 | unstable = [] 22 | 23 | [dependencies] 24 | log = "0.3" 25 | env_logger = "0.10" 26 | nom = { version="3.1", features=["verbose-errors"] } 27 | quote = "0.3" 28 | clap = "2.24" 29 | lazy_static = "0.2" 30 | bitflags = "0.9" 31 | 32 | [dependencies.xdr-codec] 33 | path = "../xdr-codec" 34 | version = "0.4" 35 | 36 | 37 | [dev-dependencies] 38 | tempdir = "0.3" 39 | error-chain = "0.10" 40 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdr-codec/src/error.rs: -------------------------------------------------------------------------------- 1 | #![allow(deprecated)] 2 | 3 | error_chain! { 4 | foreign_links { 5 | IOError(::std::io::Error); 6 | InvalidUtf8(::std::string::FromUtf8Error); 7 | } 8 | 9 | errors { 10 | InvalidCase(v: i32) { 11 | description("invalid union case") 12 | display("invalid union case: '{}'", v) 13 | } 14 | InvalidEnum(v: i32) { 15 | description("invalid enum value") 16 | display("invalid enum value: '{}'", v) 17 | } 18 | InvalidLen(v: usize) { 19 | description("invalid array len") 20 | display("invalid array len: '{}'", v) 21 | } 22 | } 23 | } 24 | 25 | unsafe impl Sync for Error {} 26 | 27 | impl Error { 28 | pub fn invalidcase(v: i32) -> Error { 29 | ErrorKind::InvalidCase(v).into() 30 | } 31 | 32 | pub fn invalidenum(v: i32) -> Error { 33 | ErrorKind::InvalidEnum(v).into() 34 | } 35 | 36 | pub fn invalidlen(v: usize) -> Error { 37 | ErrorKind::InvalidLen(v).into() 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright 2017 Amazon.com, Inc. or its affiliates. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /test/watchdog_test/test_parse_arguments.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import pytest 8 | 9 | import watchdog 10 | 11 | 12 | def _test_parse_arguments_help(capsys, help): 13 | with pytest.raises(SystemExit) as ex: 14 | watchdog.parse_arguments(["watchdog", "foo", "bar", help]) 15 | 16 | assert 0 == ex.value.code 17 | 18 | out, err = capsys.readouterr() 19 | assert "Usage:" in out 20 | 21 | 22 | def test_parse_arguments_help_long(capsys): 23 | _test_parse_arguments_help(capsys, "--help") 24 | 25 | 26 | def test_parse_arguments_help_short(capsys): 27 | _test_parse_arguments_help(capsys, "-h") 28 | 29 | 30 | def test_parse_arguments_version(capsys): 31 | with pytest.raises(SystemExit) as ex: 32 | watchdog.parse_arguments(["watchdog", "foo", "bar", "--version"]) 33 | 34 | assert 0 == ex.value.code 35 | 36 | out, err = capsys.readouterr() 37 | assert "Version: %s" % watchdog.VERSION in out 38 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdrgen/src/xdrgen.rs: -------------------------------------------------------------------------------- 1 | #![crate_type = "bin"] 2 | 3 | extern crate clap; 4 | extern crate env_logger; 5 | extern crate xdrgen; 6 | 7 | use std::fs::File; 8 | use std::io::{stderr, stdin, stdout}; 9 | use std::io::{BufReader, Write}; 10 | 11 | use clap::App; 12 | 13 | use xdrgen::generate; 14 | 15 | fn main() { 16 | let _ = env_logger::init(); 17 | 18 | let matches = App::new("XDR code generator") 19 | .version(env!("CARGO_PKG_VERSION")) 20 | .arg_from_usage("[FILE] 'Set .x file'") 21 | .get_matches(); 22 | 23 | let output = stdout(); 24 | let mut err = stderr(); 25 | 26 | let res = if let Some(fname) = matches.value_of("FILE") { 27 | let f = match File::open(fname) { 28 | Ok(f) => f, 29 | Err(e) => { 30 | let _ = writeln!(&mut err, "Failed to open {}: {}", fname, e); 31 | std::process::exit(1); 32 | } 33 | }; 34 | generate(fname, BufReader::new(f), output) 35 | } else { 36 | generate("stdin", BufReader::new(stdin()), output) 37 | }; 38 | 39 | if let Err(e) = res { 40 | let _ = writeln!(&mut err, "Failed: {}", e); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /test/watchdog_test/test_get_state_files.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import watchdog 10 | 11 | 12 | def test_non_existent_dir(tmpdir): 13 | state_files = watchdog.get_state_files(str(tmpdir.join("new-dir"))) 14 | 15 | assert {} == state_files 16 | 17 | 18 | def test_empty_dir(tmpdir): 19 | state_files = watchdog.get_state_files(str(tmpdir)) 20 | 21 | assert {} == state_files 22 | 23 | 24 | def test_no_state_files(tmpdir): 25 | tmpdir.join("~fs-deadbeef.mount.dir.12345").write("") 26 | 27 | state_files = watchdog.get_state_files(str(tmpdir)) 28 | 29 | assert {} == state_files 30 | 31 | 32 | def test_state_files(tmpdir): 33 | efs_config = "fs-deadbeef.mount.dir.12345" 34 | tmpdir.join(efs_config).write("") 35 | 36 | stunnel_config = "stunnel-config.fs-deadbeef.mount.dir.12345" 37 | tmpdir.join(stunnel_config).write("") 38 | 39 | state_files = watchdog.get_state_files(str(tmpdir)) 40 | 41 | assert 1 == len(state_files) 42 | assert "mount.dir.12345" in state_files 43 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_get_fips_config.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import os 8 | from unittest.mock import MagicMock 9 | 10 | import mount_efs 11 | 12 | MOCK_CONFIG = MagicMock() 13 | 14 | 15 | def test_get_fips_config_env_var_enabled(mocker): 16 | mocker.patch("os.getenv", return_value="true") 17 | mocker.patch("mount_efs.get_boolean_config_item_value", return_value=False) 18 | 19 | result = mount_efs.get_fips_config(None) 20 | assert result == True 21 | 22 | 23 | def test_get_fips_config_env_var_disabled(mocker): 24 | mocker.patch("os.getenv", return_value="False") 25 | mocker.patch("mount_efs.get_boolean_config_item_value", return_value=False) 26 | 27 | result = mount_efs.get_fips_config(None) 28 | assert result == False 29 | assert mount_efs.STUNNEL_GLOBAL_CONFIG["fips"] == "no" 30 | 31 | 32 | def test_get_fips_config_enabled_in_file(mocker): 33 | mocker.patch("os.getenv", return_value="False") 34 | mocker.patch("mount_efs.get_boolean_config_item_value", return_value=True) 35 | 36 | result = mount_efs.get_fips_config(None) 37 | assert result == True 38 | -------------------------------------------------------------------------------- /src/proxy/src/efs_prot.x: -------------------------------------------------------------------------------- 1 | /* 2 | * EFS program V1 3 | */ 4 | 5 | const PROXY_ID_LENGTH = 16; 6 | const PROXY_INCARNATION_LENGTH = 8; 7 | const PARTITION_ID_LENGTH = 64; 8 | 9 | enum OperationType { 10 | OP_BIND_CLIENT_TO_PARTITION = 1 11 | }; 12 | 13 | typedef opaque PartitionId[PARTITION_ID_LENGTH]; 14 | 15 | struct ProxyIdentifier { 16 | opaque identifier; 17 | opaque incarnation; 18 | }; 19 | 20 | struct ScaleUpConfig { 21 | int max_multiplexed_connections; 22 | int scale_up_bytes_per_sec_threshold; 23 | int scale_up_threshold_breached_duration_sec; 24 | }; 25 | 26 | enum BindResponseType { 27 | RETRY = 0, 28 | RETRY_LATER = 1, 29 | PREFERRED = 2, 30 | READY = 3, 31 | ERROR = 4 32 | }; 33 | 34 | union BindResponse switch (BindResponseType type) { 35 | case PREFERRED: 36 | case READY: 37 | PartitionId partition_id; 38 | case RETRY: 39 | case RETRY_LATER: 40 | String stop_msg; 41 | case ERROR: 42 | String error_msg; 43 | default: 44 | void; 45 | }; 46 | 47 | struct BindClientResponse { 48 | BindResponse bind_response; 49 | ScaleUpConfig scale_up_config; 50 | }; 51 | 52 | union OperationResponse switch (OperationType operation_type) { 53 | case OP_BIND_CLIENT_TO_PARTITION: 54 | BindClientResponse response; 55 | default: 56 | void; 57 | }; 58 | -------------------------------------------------------------------------------- /src/proxy/src/error.rs: -------------------------------------------------------------------------------- 1 | use thiserror::Error as ThisError; 2 | 3 | #[derive(Debug, ThisError)] 4 | pub enum ConnectError { 5 | #[error("Connect attempt cancelled")] 6 | Cancelled, 7 | #[error("{0}")] 8 | IoError(#[from] tokio::io::Error), 9 | #[error("Connect attempt failed - Maximum attempt count exceeded")] 10 | MaxAttemptsExceeded, 11 | #[error("Attempt to acquire additional connections to EFS failed.")] 12 | MultiplexFailure, 13 | #[error(transparent)] 14 | Tls(#[from] s2n_tls::error::Error), 15 | #[error("Connect attempt failed - Timeout")] 16 | Timeout, 17 | } 18 | 19 | #[derive(Debug, ThisError)] 20 | pub enum RpcError { 21 | #[error("not a rpc response")] 22 | MalformedResponse, 23 | #[error("rpc reply_stat: MSG_DENIED")] 24 | Denied, 25 | #[error("rpc accept_stat: GARBAGE_ARGS")] 26 | GarbageArgs, 27 | #[error("rpc accept_stat: PROG_UNAVAIL")] 28 | ProgramUnavailable, 29 | #[error("rpc accept_stat: PROG_MISMATCH low: {} high: {}", .low, .high)] 30 | ProgramMismatch { low: u32, high: u32 }, 31 | #[error("rpc accept_stat: PROC_UNAVAIL")] 32 | ProcedureUnavailable, 33 | #[error("rpc accept_stat: SystemError")] 34 | SystemError, 35 | #[error(transparent)] 36 | IoError(#[from] tokio::io::Error), 37 | #[error(transparent)] 38 | XdrCodecError(#[from] xdr_codec::Error), 39 | #[error(transparent)] 40 | OncRpc(#[from] onc_rpc::Error), 41 | } 42 | -------------------------------------------------------------------------------- /src/proxy/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "efs-proxy" 3 | edition = "2021" 4 | build = "build.rs" 5 | # The version of efs-proxy is tied to efs-utils. 6 | version = "2.4.1" 7 | publish = false 8 | license = "MIT" 9 | 10 | [dependencies] 11 | anyhow = "1.0.72" 12 | async-trait = "0.1" 13 | bytes = { version = "1.4.0" } 14 | chrono = "0.4" 15 | clap = { version = "=4.0.0", features = ["derive"] } 16 | fern = "0.6" 17 | futures = "0.3" 18 | log = "0.4" 19 | log4rs = { version = "1.2.0", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"]} 20 | nix = { version = "0.26.2", features = ["signal"]} 21 | onc-rpc = "0.2.3" 22 | rand = "0.8.5" 23 | s2n-tls = {version="^0.3.19",features=["fips"]} 24 | s2n-tls-tokio = "^0.3.19" 25 | serde = {version="1.0.175",features=["derive"]} 26 | serde_ini = "0.2.0" 27 | thiserror = "1.0.44" 28 | tokio = { version = "1.29.0, <1.39", features = ["full"] } 29 | tokio-util = "0.7.8" 30 | uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics"]} 31 | xdr-codec = { path = "rust-xdr/xdr-codec"} 32 | 33 | [dev-dependencies] 34 | test-case = "*" 35 | tokio = { version = "1.29.0", features = ["test-util"] } 36 | tempfile = "3.10.1" 37 | 38 | [build-dependencies] 39 | xdrgen = { path = "rust-xdr/xdrgen" } 40 | 41 | [lib] 42 | # Library is used only to make symbols visible/reusable by Integration tests 43 | # so we want to disable unit-tests for that target, to avoid running all the unit tests twice 44 | test = false 45 | doctest = false 46 | -------------------------------------------------------------------------------- /test/watchdog_test/test_restart_tls_tunnel.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import json 10 | import tempfile 11 | 12 | import watchdog 13 | 14 | from .. import utils 15 | 16 | PID = 1234 17 | 18 | 19 | def test_restart_tls_tunnel_without_certificate_path(mocker, tmpdir): 20 | mocker.patch("watchdog.start_tls_tunnel", return_value=PID) 21 | 22 | state = {"pid": 9999, "cmd": ""} 23 | 24 | state_file = tmpdir.join(tempfile.mkstemp()[1]) 25 | state_file.write(json.dumps(state), ensure=True) 26 | 27 | watchdog.restart_tls_tunnel([], state, state_file.dirname, state_file.basename) 28 | 29 | assert PID == state["pid"] 30 | 31 | with state_file.open() as f: 32 | new_state = json.load(f) 33 | 34 | assert PID == new_state["pid"] 35 | 36 | 37 | def test_restart_tls_tunnel_with_certificate_path(mocker, tmpdir): 38 | start_tls_tunnel_call = mocker.patch("watchdog.start_tls_tunnel", return_value=PID) 39 | 40 | state = {"pid": 9999, "cmd": "", "certificate": "foo/bar/certificate.pem"} 41 | 42 | state_file = tmpdir.join(tempfile.mkstemp()[1]) 43 | state_file.write(json.dumps(state), ensure=True) 44 | 45 | watchdog.restart_tls_tunnel([], state, state_file.dirname, state_file.basename) 46 | 47 | utils.assert_not_called(start_tls_tunnel_call) 48 | -------------------------------------------------------------------------------- /test/watchdog_test/test_get_nfs_mount_options_on_macos.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import json 8 | import sys 9 | from unittest.mock import MagicMock 10 | 11 | import pytest 12 | 13 | import watchdog 14 | 15 | NFSSTAT_DEFAULT_OUTPUT = { 16 | "127.0.0.1:/": { 17 | "Current mount parameters": { 18 | "File system locations": [ 19 | {"Export": "/", "Locations": ["127.0.0.1"], "Server": "127.0.0.1"} 20 | ], 21 | "NFS parameters": ["port=12345", "rw"], 22 | }, 23 | "Original mount options": { 24 | "File system locations": [ 25 | {"Export": "/", "Locations": ["127.0.0.1"], "Server": "127.0.0.1"} 26 | ], 27 | "NFS parameters": ["port=12345", "rw"], 28 | }, 29 | "Server": "/Users/ec2-user/efs", 30 | } 31 | } 32 | 33 | 34 | @pytest.mark.skipif(sys.version_info < (3, 5), reason="requires python3.5") 35 | def test_get_nfs_mount_options_on_macos(mocker): 36 | mount_point = "/mnt" 37 | process_mock = MagicMock() 38 | process_mock.stdout = str(json.dumps(NFSSTAT_DEFAULT_OUTPUT)) 39 | process_mock.returncode = 0 40 | 41 | mocker.patch("subprocess.run", return_value=process_mock) 42 | nfs_options = watchdog.get_nfs_mount_options_on_macos(mount_point) 43 | assert "port=12345" in nfs_options 44 | -------------------------------------------------------------------------------- /src/proxy/src/proxy_identifier.rs: -------------------------------------------------------------------------------- 1 | use uuid::Uuid; 2 | 3 | pub const INITIAL_INCARNATION: i64 = 0; 4 | 5 | #[derive(Eq, PartialEq, Clone, Copy, Debug)] 6 | pub struct ProxyIdentifier { 7 | pub uuid: Uuid, 8 | pub incarnation: i64, 9 | } 10 | 11 | impl ProxyIdentifier { 12 | pub fn new() -> Self { 13 | ProxyIdentifier { 14 | uuid: Uuid::new_v4(), 15 | incarnation: INITIAL_INCARNATION, 16 | } 17 | } 18 | 19 | pub fn increment(&mut self) { 20 | if self.incarnation == i64::MAX { 21 | self.incarnation = 0; 22 | return; 23 | } 24 | self.incarnation += 1; 25 | } 26 | } 27 | 28 | #[cfg(test)] 29 | mod tests { 30 | use super::ProxyIdentifier; 31 | use super::INITIAL_INCARNATION; 32 | 33 | #[test] 34 | fn test_increment() { 35 | let mut proxy_id = ProxyIdentifier::new(); 36 | let proxy_id_original = proxy_id; 37 | for i in 0..5 { 38 | assert_eq!(i, proxy_id.incarnation); 39 | proxy_id.increment(); 40 | } 41 | assert_eq!(proxy_id_original.uuid, proxy_id.uuid); 42 | assert_eq!(INITIAL_INCARNATION, proxy_id_original.incarnation); 43 | } 44 | 45 | #[test] 46 | fn test_wrap_around() { 47 | let mut proxy_id = ProxyIdentifier::new(); 48 | let proxy_id_original = proxy_id; 49 | proxy_id.incarnation = i64::MAX; 50 | proxy_id.increment(); 51 | assert_eq!(proxy_id_original.uuid, proxy_id.uuid); 52 | assert_eq!(INITIAL_INCARNATION, proxy_id.incarnation); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_check_network_status.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | from unittest.mock import MagicMock 8 | 9 | import pytest 10 | 11 | import mount_efs 12 | 13 | from .. import utils 14 | 15 | FS_ID = "fs-deadbeef" 16 | 17 | 18 | def _mock_subprocess_call(mocker, returncode=0): 19 | call_mock = MagicMock(return_value=returncode) 20 | return mocker.patch("subprocess.call", side_effect=call_mock) 21 | 22 | 23 | def test_non_systemd_init(mocker): 24 | call_mock = _mock_subprocess_call(mocker) 25 | 26 | mount_efs.check_network_status(FS_ID, "init") 27 | 28 | utils.assert_not_called(call_mock) 29 | 30 | 31 | def test_non_systemd_launchd(mocker): 32 | call_mock = _mock_subprocess_call(mocker) 33 | 34 | mount_efs.check_network_status(FS_ID, "launchd") 35 | 36 | utils.assert_not_called(call_mock) 37 | 38 | 39 | def test_systemd_network_up(mocker): 40 | call_mock = _mock_subprocess_call(mocker) 41 | 42 | mount_efs.check_network_status(FS_ID, "systemd") 43 | 44 | utils.assert_called_once(call_mock) 45 | 46 | 47 | def test_systemd_network_down(mocker): 48 | call_mock = _mock_subprocess_call(mocker, returncode=1) 49 | 50 | with pytest.raises(SystemExit) as ex: 51 | mount_efs.check_network_status(FS_ID, "systemd") 52 | 53 | utils.assert_called_once(call_mock) 54 | assert 0 == ex.value.code 55 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_is_stunnel_option_supported.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import mount_efs 10 | 11 | STUNNEL_OPTION = "stunnelOption" 12 | STUNNEL_VALUES = "value1|value2|value3" 13 | STUNNEL_OPTION_VALUE = "{}: {}".format(STUNNEL_OPTION, STUNNEL_VALUES) 14 | 15 | STUNNEL_OUTPUT_WITHOUT_OPTION = ["foo", "bar", "baz"] 16 | STUNNEL_OUTPUT_WITH_OPTION = STUNNEL_OUTPUT_WITHOUT_OPTION + [STUNNEL_OPTION] 17 | STUNNEL_OUTPUT_WITH_OPTION_AND_VALUE = STUNNEL_OUTPUT_WITHOUT_OPTION + [ 18 | STUNNEL_OPTION_VALUE 19 | ] 20 | 21 | 22 | def test_supported_option(): 23 | enabled = mount_efs.is_stunnel_option_supported( 24 | STUNNEL_OUTPUT_WITH_OPTION, STUNNEL_OPTION 25 | ) 26 | 27 | assert enabled 28 | 29 | 30 | def test_supported_option_value(): 31 | enabled = mount_efs.is_stunnel_option_supported( 32 | STUNNEL_OUTPUT_WITH_OPTION_AND_VALUE, STUNNEL_OPTION, "value1" 33 | ) 34 | 35 | assert enabled 36 | 37 | 38 | def test_unsupported_option(): 39 | enabled = mount_efs.is_stunnel_option_supported( 40 | STUNNEL_OUTPUT_WITHOUT_OPTION, STUNNEL_OPTION 41 | ) 42 | 43 | assert not enabled 44 | 45 | 46 | def test_unsupported_option_value(): 47 | enabled = mount_efs.is_stunnel_option_supported( 48 | STUNNEL_OUTPUT_WITHOUT_OPTION, STUNNEL_OPTION, "value1" 49 | ) 50 | 51 | assert not enabled 52 | -------------------------------------------------------------------------------- /dist/amazon-efs-mount-watchdog.plist: -------------------------------------------------------------------------------- 1 | 2 | 11 | 12 | 13 | 14 | Unit 15 | 16 | Description 17 | amazon-efs-mount-watchdog 18 | Before 19 | remote-fs-pre.target 20 | 21 | Label 22 | amazon-efs-mount-watchdog 23 | ProgramArguments 24 | 25 | /usr/local/bin/python3 26 | /usr/local/bin/amazon-efs-mount-watchdog 27 | 28 | RunAtLoad 29 | 30 | KeepAlive 31 | 32 | PathState 33 | 34 | /usr/local/bin/amazon-efs-mount-watchdog 35 | 36 | 37 | Crashed 38 | 39 | 40 | StandardErrorPath 41 | /var/log/amazon/efs/mount-watchdog.log 42 | StandardOutPath 43 | /var/log/amazon/efs/mount-watchdog.log 44 | StartOnMount 45 | 46 | 47 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_get_linux_kernel_version.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import mount_efs 10 | 11 | DEFAULT_KERNEL_VERSION_NEEDED_LEN = len( 12 | mount_efs.NFS_READAHEAD_OPTIMIZE_LINUX_KERNEL_MIN_VERSION 13 | ) 14 | 15 | 16 | def test_get_linux_kernel_version_empty(mocker): 17 | mocker.patch("platform.release", return_value="") 18 | assert [0, 0] == mount_efs.get_linux_kernel_version( 19 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 20 | ) 21 | 22 | 23 | def test_get_linux_kernel_version_only_dash(mocker): 24 | mocker.patch("platform.release", return_value="-") 25 | assert [0, 0] == mount_efs.get_linux_kernel_version( 26 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 27 | ) 28 | 29 | 30 | def test_get_linux_kernel_version_no_number(mocker): 31 | mocker.patch("platform.release", return_value="test") 32 | assert [0, 0] == mount_efs.get_linux_kernel_version( 33 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 34 | ) 35 | 36 | 37 | def test_get_linux_kernel_version(mocker): 38 | mocker.patch("platform.release", return_value="3.10.0-1160.el7.x86_64") 39 | assert [3, 10] == mount_efs.get_linux_kernel_version( 40 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 41 | ) 42 | 43 | 44 | def test_get_linux_kernel_version_only_major_version(mocker): 45 | mocker.patch("platform.release", return_value="3-1160.el7.x86_64") 46 | assert [3, 0] == mount_efs.get_linux_kernel_version( 47 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 48 | ) 49 | 50 | 51 | def test_get_linux_kernel_version_no_dash(mocker): 52 | mocker.patch("platform.release", return_value="3.10.0") 53 | assert [3, 10] == mount_efs.get_linux_kernel_version( 54 | DEFAULT_KERNEL_VERSION_NEEDED_LEN 55 | ) 56 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | PACKAGE_NAME = amazon-efs-utils 10 | SOURCE_TARBALL = $(PACKAGE_NAME).tar.gz 11 | SPECFILE = $(PACKAGE_NAME).spec 12 | BUILD_DIR = build/rpmbuild 13 | PROXY_VERSION = 2.0.0 14 | RPM_BUILD_FLAGS ?= --with system_rust --noclean 15 | export PYTHONPATH := $(shell pwd)/src 16 | 17 | .PHONY: clean 18 | clean: 19 | rm -rf $(BUILD_DIR) 20 | rm -rf $(PACKAGE_NAME) 21 | rm -f $(SOURCE_TARBALL) 22 | 23 | .PHONY: tarball 24 | tarball: clean 25 | mkdir -p $(PACKAGE_NAME) 26 | 27 | mkdir -p $(PACKAGE_NAME)/dist 28 | cp -p dist/amazon-efs-mount-watchdog.conf $(PACKAGE_NAME)/dist 29 | cp -p dist/amazon-efs-mount-watchdog.service $(PACKAGE_NAME)/dist 30 | cp -p dist/efs-utils.conf $(PACKAGE_NAME)/dist 31 | cp -p dist/efs-utils.crt $(PACKAGE_NAME)/dist 32 | 33 | mkdir -p $(PACKAGE_NAME)/src 34 | cp -rp src/mount_efs $(PACKAGE_NAME)/src 35 | cp -rp src/watchdog $(PACKAGE_NAME)/src 36 | cp -rp src/proxy $(PACKAGE_NAME)/src 37 | 38 | mkdir -p ${PACKAGE_NAME}/man 39 | cp -rp man/mount.efs.8 ${PACKAGE_NAME}/man 40 | 41 | tar -czf $(SOURCE_TARBALL) $(PACKAGE_NAME)/* 42 | 43 | .PHONY: sources 44 | sources: tarball 45 | 46 | .PHONY: rpm-only 47 | rpm-only: 48 | mkdir -p $(BUILD_DIR)/{SPECS,COORD_SOURCES,DATA_SOURCES,BUILD,RPMS,SOURCES,SRPMS} 49 | cp $(SPECFILE) $(BUILD_DIR)/SPECS 50 | cp $(SOURCE_TARBALL) $(BUILD_DIR)/SOURCES 51 | cp config.toml $(BUILD_DIR)/SOURCES 52 | rpmbuild -ba --define "_topdir `pwd`/$(BUILD_DIR)" --define "include_vendor_tarball false" $(BUILD_DIR)/SPECS/$(SPECFILE) $(RPM_BUILD_FLAGS) 53 | cp $(BUILD_DIR)/RPMS/*/*rpm build 54 | 55 | .PHONY: rpm 56 | rpm: sources rpm-only 57 | 58 | .PHONY: rpm-without-system-rust 59 | rpm-without-system-rust: sources 60 | $(MAKE) rpm-only RPM_BUILD_FLAGS="--without system_rust" 61 | 62 | .PHONY: deb 63 | deb: 64 | ./build-deb.sh 65 | 66 | .PHONY: test 67 | test: 68 | pytest 69 | flake8 70 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_start_watchdog.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | from unittest.mock import MagicMock 8 | 9 | import mount_efs 10 | 11 | from .. import utils 12 | 13 | FS_ID = "fs-deadbeef" 14 | 15 | 16 | def test_upstart_system(mocker): 17 | process_mock = MagicMock() 18 | process_mock.communicate.return_value = ( 19 | "stop", 20 | "", 21 | ) 22 | process_mock.returncode = 0 23 | popen_mock = mocker.patch("subprocess.Popen", return_value=process_mock) 24 | 25 | mount_efs.start_watchdog("init") 26 | 27 | assert 2 == popen_mock.call_count 28 | assert "/sbin/start" in popen_mock.call_args[0][0] 29 | 30 | 31 | def test_systemd_system(mocker): 32 | call_mock = mocker.patch("subprocess.call", return_value=1) 33 | popen_mock = mocker.patch("subprocess.Popen") 34 | 35 | mount_efs.start_watchdog("systemd") 36 | 37 | utils.assert_called_once(call_mock) 38 | assert "systemctl" in call_mock.call_args[0][0] 39 | assert "is-active" in call_mock.call_args[0][0] 40 | utils.assert_called_once(popen_mock) 41 | assert "systemctl" in popen_mock.call_args[0][0] 42 | assert "start" in popen_mock.call_args[0][0] 43 | 44 | 45 | def test_launchd_system(mocker): 46 | process_mock = MagicMock() 47 | process_mock.communicate.return_value = ( 48 | "stop", 49 | "", 50 | ) 51 | process_mock.returncode = 0 52 | popen_mock = mocker.patch("subprocess.Popen", return_value=process_mock) 53 | mocker.patch("os.path.exists", return_value=True) 54 | 55 | mount_efs.start_watchdog("launchd") 56 | 57 | assert 2 == popen_mock.call_count 58 | assert "sudo" in popen_mock.call_args[0][0] 59 | assert "launchctl" in popen_mock.call_args[0][0] 60 | assert "load" in popen_mock.call_args[0][0] 61 | 62 | 63 | def test_unknown_system(mocker): 64 | popen_mock = mocker.patch("subprocess.Popen") 65 | 66 | mount_efs.start_watchdog("unknown") 67 | 68 | utils.assert_not_called(popen_mock) 69 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_is_ocsp_enabled.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import mount_efs 10 | 11 | try: 12 | import ConfigParser 13 | except ImportError: 14 | from configparser import ConfigParser 15 | 16 | 17 | def _get_config(stunnel_check_cert_validity): 18 | try: 19 | config = ConfigParser.SafeConfigParser() 20 | except AttributeError: 21 | config = ConfigParser() 22 | config.add_section(mount_efs.CONFIG_SECTION) 23 | if stunnel_check_cert_validity is not None: 24 | config.set( 25 | mount_efs.CONFIG_SECTION, 26 | "stunnel_check_cert_validity", 27 | str(stunnel_check_cert_validity), 28 | ) 29 | return config 30 | 31 | 32 | def test_is_ocsp_enabled_config_false_no_cli(): 33 | options = {} 34 | 35 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(False), options) 36 | 37 | assert ocsp_enabled is False 38 | 39 | 40 | def test_is_ocsp_enabled_config_true_no_cli(): 41 | options = {} 42 | 43 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(True), options) 44 | 45 | assert ocsp_enabled is True 46 | 47 | 48 | def test_is_ocsp_enabled_config_false_cli_true(): 49 | options = {"ocsp": None} 50 | 51 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(False), options) 52 | 53 | assert ocsp_enabled is True 54 | 55 | 56 | def test_is_ocsp_enabled_config_true_cli_true(): 57 | options = {"ocsp": None} 58 | 59 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(True), options) 60 | 61 | assert ocsp_enabled is True 62 | 63 | 64 | def test_is_ocsp_enabled_config_false_cli_false(): 65 | options = {"noocsp": None} 66 | 67 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(False), options) 68 | 69 | assert ocsp_enabled is False 70 | 71 | 72 | def test_is_ocsp_enabled_config_true_cli_false(): 73 | options = {"noocsp": None} 74 | 75 | ocsp_enabled = mount_efs.is_ocsp_enabled(_get_config(True), options) 76 | 77 | assert ocsp_enabled is False 78 | -------------------------------------------------------------------------------- /test/common.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import subprocess 10 | 11 | from mock import MagicMock 12 | 13 | 14 | # The process mock can be retrieved by calling PopenMock().mock 15 | class PopenMock: 16 | def __init__( 17 | self, 18 | return_code=0, 19 | poll_result=0, 20 | communicate_return_value=None, 21 | communicate_side_effect=None, 22 | kill_side_effect=None, 23 | ): 24 | self.return_code = return_code 25 | self.poll_result = poll_result 26 | self.communicate_return_value = communicate_return_value 27 | self.communicate_side_effect = communicate_side_effect 28 | self.kill_side_effect = kill_side_effect 29 | self.mock = self._create_mock() 30 | 31 | def _create_mock(self): 32 | popen_mock = MagicMock() 33 | if self.communicate_return_value: 34 | popen_mock.communicate.return_value = self.communicate_return_value 35 | elif self.communicate_side_effect: 36 | popen_mock.communicate.side_effect = self.communicate_side_effect 37 | if self.kill_side_effect: 38 | popen_mock.kill.side_effect = self.kill_side_effect 39 | popen_mock.returncode = self.return_code 40 | popen_mock.poll.return_value = self.poll_result 41 | return popen_mock 42 | 43 | 44 | DEFAULT_RETRYABLE_FAILURE_POPEN = PopenMock( 45 | return_code=1, 46 | poll_result=1, 47 | communicate_return_value=(b"", b"mount.nfs4: Connection reset by peer"), 48 | ) 49 | DEFAULT_NON_RETRYABLE_FAILURE_POPEN = PopenMock( 50 | return_code=1, 51 | poll_result=1, 52 | communicate_return_value=( 53 | b"", 54 | b"mount.nfs4: access denied by server while mounting 127.0.0.1:/", 55 | ), 56 | ) 57 | DEFAULT_SUCCESS_POPEN = PopenMock(communicate_return_value=(b"", b"")) 58 | DEFAULT_TIMEOUT_POPEN = PopenMock( 59 | return_code=1, 60 | poll_result=1, 61 | communicate_side_effect=subprocess.TimeoutExpired("cmd", timeout=1), 62 | ) 63 | DEFAULT_UNKNOWN_EXCEPTION_POPEN = PopenMock( 64 | return_code=1, poll_result=1, communicate_side_effect=Exception("Unknown error") 65 | ) 66 | -------------------------------------------------------------------------------- /src/proxy/src/logger.rs: -------------------------------------------------------------------------------- 1 | use log::LevelFilter; 2 | use log4rs::{ 3 | append::{ 4 | console::{ConsoleAppender, Target}, 5 | rolling_file::{ 6 | policy::compound::{ 7 | roll::fixed_window::FixedWindowRoller, trigger::size::SizeTrigger, CompoundPolicy, 8 | }, 9 | RollingFileAppender, 10 | }, 11 | }, 12 | config::{Appender, Config, Root}, 13 | encode::pattern::PatternEncoder, 14 | filter::threshold::ThresholdFilter, 15 | }; 16 | use std::{path::Path, str::FromStr}; 17 | 18 | use crate::config_parser::ProxyConfig; 19 | 20 | const LOG_FILE_MAX_BYTES: u64 = 1048576; 21 | const LOG_FILE_COUNT: u32 = 10; 22 | 23 | pub fn init(config: &ProxyConfig) { 24 | let log_file_path_string = config 25 | .output 26 | .clone() 27 | .expect("config value `output` is not set"); 28 | let log_file_path = Path::new(&log_file_path_string); 29 | let level_filter = 30 | LevelFilter::from_str(&config.debug).expect("config value for `debug` is invalid"); 31 | 32 | let stderr = ConsoleAppender::builder().target(Target::Stderr).build(); 33 | 34 | let trigger = SizeTrigger::new(LOG_FILE_MAX_BYTES); 35 | let mut pattern = log_file_path_string.clone(); 36 | pattern.push_str(".{}"); 37 | let roller = FixedWindowRoller::builder() 38 | .build(&pattern, LOG_FILE_COUNT) 39 | .expect("Unable to create roller"); 40 | let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller)); 41 | 42 | let log_file = RollingFileAppender::builder() 43 | .encoder(Box::new(PatternEncoder::new( 44 | "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {P} {l} {M} {m}{n}", 45 | ))) 46 | .build(log_file_path, Box::new(policy)) 47 | .expect("Unable to create log file"); 48 | 49 | let config = Config::builder() 50 | .appender(Appender::builder().build("logfile", Box::new(log_file))) 51 | .appender( 52 | Appender::builder() 53 | .filter(Box::new(ThresholdFilter::new(LevelFilter::Error))) 54 | .build("stderr", Box::new(stderr)), 55 | ) 56 | .build( 57 | Root::builder() 58 | .appender("logfile") 59 | .appender("stderr") 60 | .build(level_filter), 61 | ) 62 | .expect("Invalid logger config"); 63 | 64 | let _ = log4rs::init_config(config).expect("Unable to initialize logger"); 65 | } 66 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_create_state_file_dir.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import errno 8 | import os 9 | 10 | import pytest 11 | 12 | import mount_efs 13 | 14 | try: 15 | import ConfigParser 16 | except ImportError: 17 | from configparser import ConfigParser 18 | 19 | 20 | def _get_config(mode=None): 21 | try: 22 | config = ConfigParser.SafeConfigParser() 23 | except AttributeError: 24 | config = ConfigParser() 25 | config.add_section(mount_efs.CONFIG_SECTION) 26 | 27 | if mode is not None: 28 | config.set(mount_efs.CONFIG_SECTION, "state_file_dir_mode", mode) 29 | 30 | return config 31 | 32 | 33 | def test_create_state_file_dir(tmpdir): 34 | state_file_dir = str(tmpdir.join("efs")) 35 | 36 | mount_efs.create_required_directory(_get_config(), state_file_dir) 37 | 38 | assert os.path.isdir(state_file_dir) 39 | assert "0750" == oct(os.stat(state_file_dir).st_mode)[-4:] 40 | 41 | 42 | def test_create_state_file_dir_exists(tmpdir): 43 | state_file_dir = str(tmpdir.join("efs")) 44 | os.makedirs(state_file_dir) 45 | 46 | mount_efs.create_required_directory(_get_config(), state_file_dir) 47 | 48 | assert os.path.isdir(state_file_dir) 49 | 50 | 51 | def test_create_state_file_dir_exists_as_file(tmpdir): 52 | state_file = tmpdir.join("efs") 53 | state_file.write("", ensure=True) 54 | 55 | with pytest.raises(OSError) as ex: 56 | mount_efs.create_required_directory(_get_config(), str(state_file)) 57 | 58 | assert errno.EEXIST == ex.value.errno 59 | 60 | 61 | def test_create_state_file_dir_overridden_mode(tmpdir): 62 | state_file_dir = str(tmpdir.join("efs")) 63 | 64 | mount_efs.create_required_directory(_get_config(mode=str(755)), state_file_dir) 65 | 66 | assert os.path.isdir(state_file_dir) 67 | assert "0755" == oct(os.stat(state_file_dir).st_mode)[-4:] 68 | 69 | 70 | def test_create_state_file_dir_overridden_bad_mode(tmpdir): 71 | state_file_dir = str(tmpdir.join("efs")) 72 | 73 | mount_efs.create_required_directory( 74 | _get_config(mode="invalid-mode"), state_file_dir 75 | ) 76 | 77 | assert os.path.isdir(state_file_dir) 78 | assert "0750" == oct(os.stat(state_file_dir).st_mode)[-4:] 79 | -------------------------------------------------------------------------------- /src/proxy/src/shutdown.rs: -------------------------------------------------------------------------------- 1 | use log::debug; 2 | use tokio::sync::mpsc::{self, Receiver, Sender}; 3 | use tokio_util::sync::CancellationToken; 4 | 5 | #[derive(Debug, Clone, Eq, PartialEq)] 6 | pub enum ShutdownReason { 7 | NeedsRestart, 8 | UnexpectedError, 9 | Unmount, 10 | FrameSizeExceeded, 11 | FrameSizeTooSmall, 12 | } 13 | 14 | #[derive(Clone)] 15 | pub struct ShutdownHandle { 16 | pub cancellation_token: CancellationToken, 17 | notifier: Sender, 18 | } 19 | 20 | impl ShutdownHandle { 21 | pub fn new(cancellation_token: CancellationToken) -> (Self, Receiver) { 22 | let (notifier, r) = mpsc::channel(1024); 23 | let h = Self { 24 | cancellation_token, 25 | notifier, 26 | }; 27 | (h, r) 28 | } 29 | 30 | pub async fn exit(self, reason: Option) { 31 | debug!("Exiting: {:?}", reason); 32 | self.cancellation_token.cancel(); 33 | if let Some(reason) = reason { 34 | let _ = self.notifier.send(reason).await; 35 | } 36 | } 37 | } 38 | 39 | #[cfg(test)] 40 | mod test { 41 | use log::info; 42 | use std::time::Duration; 43 | 44 | use tokio::sync::mpsc; 45 | use tokio_util::sync::CancellationToken; 46 | 47 | use super::ShutdownHandle; 48 | 49 | #[tokio::test] 50 | async fn test() { 51 | let (t, mut r) = mpsc::channel(1); 52 | let token = CancellationToken::new(); 53 | 54 | let s1 = ShutdownHandle { 55 | cancellation_token: token.clone(), 56 | notifier: t.clone(), 57 | }; 58 | let s2 = ShutdownHandle { 59 | cancellation_token: token.clone(), 60 | notifier: t.clone(), 61 | }; 62 | 63 | tokio::spawn(run_task(s1, false)); 64 | tokio::spawn(run_task(s2, true)); 65 | drop(t); 66 | 67 | let _ = r.recv().await; 68 | info!("Done"); 69 | } 70 | 71 | async fn run_task(shutdown: ShutdownHandle, to_cancel: bool) { 72 | let f = async { 73 | if to_cancel { 74 | shutdown.cancellation_token.clone().cancel() 75 | } else { 76 | tokio::time::sleep(Duration::from_secs(10)).await; 77 | } 78 | }; 79 | tokio::select! { 80 | _ = shutdown.cancellation_token.cancelled() => {}, 81 | _ = f => {} 82 | } 83 | info!("Task exiting"); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/aws/efs-utils/issues), or [recently closed](https://github.com/aws/efs-utils/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/aws/efs-utils/labels/help%20wanted) issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Licensing 54 | 55 | See the [LICENSE](https://github.com/aws/efs-utils/blob/master/LICENSE) file for our project's licensing. We will ask you confirm the licensing of your contribution. 56 | 57 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 58 | -------------------------------------------------------------------------------- /src/proxy/src/proxy.rs: -------------------------------------------------------------------------------- 1 | use std::{error::Error, marker::PhantomData, sync::Arc, time::Duration}; 2 | 3 | use tokio::{ 4 | net::TcpStream, 5 | sync::{ 6 | mpsc::{self}, 7 | Mutex, 8 | }, 9 | task::JoinHandle, 10 | }; 11 | 12 | use crate::{ 13 | connection_task::ConnectionTask, 14 | connections::ProxyStream, 15 | controller::Event, 16 | proxy_task::{ConnectionMessage, ProxyTask}, 17 | rpc::RpcBatch, 18 | shutdown::ShutdownHandle, 19 | }; 20 | 21 | pub struct Proxy { 22 | partition_to_nfs_cli_queue: mpsc::Sender, 23 | partition_senders: Arc>>>, 24 | shutdown: ShutdownHandle, 25 | proxy_task_handle: JoinHandle<()>, 26 | phantom: PhantomData, 27 | } 28 | 29 | impl Proxy { 30 | const SHUTDOWN_TIMEOUT: u64 = 15; 31 | 32 | pub fn new( 33 | nfs_client: TcpStream, 34 | partition_servers: Vec, 35 | notification_queue: mpsc::Sender>, 36 | shutdown: ShutdownHandle, 37 | ) -> Self { 38 | // Channel for NFSServer -> NFSClient communication 39 | let (tx, rx) = mpsc::channel(64); 40 | 41 | // tx is passed to ConnectionTasks, so each ConnectionTask will be reading from NFS socket 42 | // and sending messages to NFSClient channel via tx 43 | let senders = partition_servers 44 | .into_iter() 45 | .map(|stream| Proxy::create_connection(stream, tx.clone(), shutdown.clone())) 46 | .collect::>>(); 47 | 48 | let partition_senders = Arc::new(Mutex::new(senders)); 49 | 50 | // rx is passed to ProxyTask, so it can receive NFS response messages from ConnectionTask 51 | // and write it to NFSClient socket 52 | let proxy_task = ProxyTask::new( 53 | nfs_client, 54 | notification_queue, 55 | partition_senders.clone(), 56 | rx, 57 | shutdown.clone(), 58 | ); 59 | let proxy_task_handle = tokio::spawn(proxy_task.run()); 60 | Self { 61 | partition_to_nfs_cli_queue: tx, 62 | partition_senders, 63 | shutdown, 64 | proxy_task_handle, 65 | phantom: PhantomData, 66 | } 67 | } 68 | 69 | pub async fn add_connection(&self, stream: S) { 70 | let conn = Proxy::create_connection( 71 | stream, 72 | self.partition_to_nfs_cli_queue.clone(), 73 | self.shutdown.clone(), 74 | ); 75 | let mut f = self.partition_senders.lock().await; 76 | f.push(conn); 77 | } 78 | 79 | fn create_connection( 80 | stream: S, 81 | proxy: mpsc::Sender, 82 | shutdown: ShutdownHandle, 83 | ) -> mpsc::Sender { 84 | let (tx, rx) = mpsc::channel(64); 85 | tokio::spawn(ConnectionTask::new(stream, rx, proxy).run(shutdown)); 86 | tx 87 | } 88 | 89 | pub async fn shutdown(self) -> Result<(), Box> { 90 | self.shutdown.cancellation_token.cancel(); 91 | match tokio::time::timeout( 92 | Duration::from_secs(Self::SHUTDOWN_TIMEOUT), 93 | self.proxy_task_handle, 94 | ) 95 | .await? 96 | { 97 | Ok(()) => Ok(()), 98 | Err(join_err) => Err(join_err.into()), 99 | } 100 | } 101 | } 102 | -------------------------------------------------------------------------------- /src/proxy/src/status_reporter.rs: -------------------------------------------------------------------------------- 1 | use crate::controller::ConnectionSearchState; 2 | use crate::efs_rpc::PartitionId; 3 | use crate::{proxy_identifier::ProxyIdentifier, proxy_task::PerformanceStats}; 4 | use anyhow::{Error, Result}; 5 | use tokio::sync::mpsc::{self, Receiver, Sender}; 6 | use tokio::time::Instant; 7 | 8 | #[allow(dead_code)] 9 | pub struct Report { 10 | pub proxy_id: ProxyIdentifier, 11 | pub partition_id: Option, 12 | pub connection_state: ConnectionSearchState, 13 | pub num_connections: usize, 14 | pub last_proxy_update: Option<(Instant, PerformanceStats)>, 15 | pub scale_up_attempt_count: u64, 16 | pub restart_count: u64, 17 | } 18 | 19 | type Request = (); 20 | type Response = Report; 21 | 22 | pub struct StatusReporter { 23 | pub sender: Sender, 24 | pub receiver: Receiver, 25 | } 26 | 27 | impl StatusReporter { 28 | pub async fn await_report_request(&mut self) -> Result<()> { 29 | self.receiver 30 | .recv() 31 | .await 32 | .ok_or_else(|| Error::msg("Request channel closed"))?; 33 | Ok(()) 34 | } 35 | 36 | // Note: This should only be called when a message is received by the receiver. 37 | pub async fn publish_status(&mut self, report: Report) { 38 | match self.sender.send(report).await { 39 | Ok(_) => (), 40 | Err(e) => panic!("StatusReporter could not send report {}", e), 41 | } 42 | } 43 | } 44 | 45 | pub struct StatusRequester { 46 | _sender: Sender, 47 | _receiver: Receiver, 48 | } 49 | 50 | impl StatusRequester { 51 | pub async fn _request_status(&mut self) -> Result { 52 | self._sender.send(()).await?; 53 | self._receiver 54 | .recv() 55 | .await 56 | .ok_or_else(|| Error::msg("Response channel closed")) 57 | } 58 | } 59 | 60 | pub fn create_status_channel() -> (StatusRequester, StatusReporter) { 61 | let (call_sender, call_receiver) = mpsc::channel::(1); 62 | let (reply_sender, reply_receiver) = mpsc::channel::(1); 63 | 64 | let status_requester = StatusRequester { 65 | _sender: call_sender, 66 | _receiver: reply_receiver, 67 | }; 68 | 69 | let status_reporter = StatusReporter { 70 | sender: reply_sender, 71 | receiver: call_receiver, 72 | }; 73 | 74 | (status_requester, status_reporter) 75 | } 76 | 77 | #[cfg(test)] 78 | mod tests { 79 | use super::*; 80 | 81 | #[tokio::test] 82 | async fn test_basic() -> Result<()> { 83 | let proxy_id = ProxyIdentifier::new(); 84 | 85 | let (mut status_requester, mut status_reporter) = create_status_channel(); 86 | tokio::spawn(async move { 87 | status_reporter 88 | .await_report_request() 89 | .await 90 | .expect("Request channel closed"); 91 | let report = Report { 92 | proxy_id, 93 | partition_id: None, 94 | connection_state: ConnectionSearchState::Idle, 95 | num_connections: 1, 96 | last_proxy_update: None, 97 | scale_up_attempt_count: 0, 98 | restart_count: 0, 99 | }; 100 | status_reporter.publish_status(report).await 101 | }); 102 | 103 | let r = status_requester._request_status().await?; 104 | assert_eq!(proxy_id, r.proxy_id); 105 | assert!(r.partition_id.is_none()); 106 | assert_eq!(r.connection_state, ConnectionSearchState::Idle); 107 | assert!(r.last_proxy_update.is_none()); 108 | assert_eq!(1, r.num_connections); 109 | Ok(()) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_add_stunnel_ca_options.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import tempfile 8 | 9 | import pytest 10 | 11 | import mount_efs 12 | 13 | try: 14 | import ConfigParser 15 | except ImportError: 16 | from configparser import ConfigParser 17 | 18 | CAPATH = "/capath" 19 | CAFILE = "/cafile.crt" 20 | DEFAULT_REGION = "us-east-1" 21 | ISOLATED_REGION = "us-iso-east-1" 22 | ISOLATED_REGION_STUNNEL_CAFILE = "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem" 23 | 24 | 25 | def _get_config(): 26 | try: 27 | config = ConfigParser.SafeConfigParser() 28 | except AttributeError: 29 | config = ConfigParser() 30 | config.add_section(mount_efs.CONFIG_SECTION) 31 | return config 32 | 33 | 34 | def _create_temp_file(tmpdir, content=""): 35 | temp_file = tmpdir.join(tempfile.mkstemp()[1]) 36 | temp_file.write(content, ensure=True) 37 | return temp_file 38 | 39 | 40 | def test_use_existing_cafile(tmpdir): 41 | options = {"cafile": str(_create_temp_file(tmpdir))} 42 | efs_config = {} 43 | 44 | mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) 45 | 46 | assert options["cafile"] == efs_config.get("CAfile") 47 | assert "CApath" not in efs_config 48 | 49 | 50 | def test_use_missing_cafile(capsys): 51 | options = {"cafile": "/missing1"} 52 | efs_config = {} 53 | 54 | with pytest.raises(SystemExit) as ex: 55 | mount_efs.add_tunnel_ca_options( 56 | efs_config, _get_config(), options, DEFAULT_REGION 57 | ) 58 | 59 | assert 0 != ex.value.code 60 | 61 | out, err = capsys.readouterr() 62 | assert "Failed to find certificate authority file for verification" in err 63 | 64 | 65 | def test_stunnel_cafile_configuration_in_option(mocker): 66 | options = {"cafile": CAFILE} 67 | efs_config = {} 68 | 69 | mocker.patch("os.path.exists", return_value=True) 70 | 71 | mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) 72 | 73 | assert CAFILE == efs_config.get("CAfile") 74 | 75 | 76 | def test_stunnel_cafile_configuration_in_config(mocker): 77 | options = {} 78 | efs_config = {} 79 | 80 | config = _get_config() 81 | config.set(mount_efs.CONFIG_SECTION, "stunnel_cafile", CAFILE) 82 | 83 | mocker.patch("os.path.exists", return_value=True) 84 | 85 | mount_efs.add_tunnel_ca_options(efs_config, config, options, DEFAULT_REGION) 86 | 87 | assert CAFILE == efs_config.get("CAfile") 88 | 89 | 90 | def test_stunnel_cafile_not_configured(mocker): 91 | options = {} 92 | efs_config = {} 93 | 94 | mocker.patch("os.path.exists", return_value=True) 95 | 96 | mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) 97 | 98 | assert mount_efs.DEFAULT_STUNNEL_CAFILE == efs_config.get("CAfile") 99 | 100 | 101 | def test_stunnel_cafile_configured_in_mount_region_section(mocker): 102 | options = {} 103 | efs_config = {} 104 | 105 | config = _get_config() 106 | config.set(mount_efs.CONFIG_SECTION, "stunnel_cafile", CAFILE) 107 | config_section = "%s.%s" % (mount_efs.CONFIG_SECTION, ISOLATED_REGION) 108 | config.add_section(config_section) 109 | config.set(config_section, "stunnel_cafile", ISOLATED_REGION_STUNNEL_CAFILE) 110 | 111 | mocker.patch("os.path.exists", return_value=True) 112 | 113 | mount_efs.add_tunnel_ca_options(efs_config, config, options, ISOLATED_REGION) 114 | 115 | assert ISOLATED_REGION_STUNNEL_CAFILE == efs_config.get("CAfile") 116 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdr-codec/tests/qc-record.rs: -------------------------------------------------------------------------------- 1 | extern crate quickcheck; 2 | 3 | use std::io::{Cursor, Write}; 4 | 5 | use quickcheck::{quickcheck, TestResult}; 6 | 7 | use xdr_codec::record::{XdrRecordReader, XdrRecordWriter}; 8 | use xdr_codec::Pack; 9 | 10 | // Make sure XdrRecordWriter writes the right stuff 11 | fn check_writerec(bufsz: usize, eor: bool, ref bytes: Vec) -> TestResult { 12 | const EOR: u32 = 1 << 31; 13 | 14 | if bufsz == 0 { 15 | return TestResult::discard(); 16 | } 17 | 18 | // Make an expected serialization into fragments 19 | let mut expected = Vec::new(); 20 | let nchunks = (bytes.len() + bufsz - 1) / bufsz; 21 | 22 | for (idx, c) in bytes.chunks(bufsz).enumerate() { 23 | let mut len = c.len() as u32; 24 | if nchunks - 1 == idx && eor { 25 | len |= EOR; 26 | } 27 | 28 | if let Err(e) = len.pack(&mut expected) { 29 | return TestResult::error(format!("pack failed: {:?}", e)); 30 | } 31 | expected.extend(c); 32 | } 33 | if !eor || nchunks == 0 { 34 | if let Err(e) = EOR.pack(&mut expected) { 35 | return TestResult::error(format!("eor pack failed: {:?}", e)); 36 | } 37 | } 38 | 39 | // Write the same data with XdrRecordWriter 40 | let mut buf = Vec::new(); 41 | { 42 | let mut xw = XdrRecordWriter::with_buffer(&mut buf, bufsz); 43 | if let Err(e) = xw.write(bytes) { 44 | return TestResult::error(format!("xw write failed: {:?}", e)); 45 | } 46 | if let Err(e) = xw.flush_eor(eor) { 47 | return TestResult::error(format!("xw flush_eor failed: {:?}", e)); 48 | } 49 | } 50 | 51 | if buf != expected { 52 | println!( 53 | "eor {} bufsz {} bytes {:?} len {}", 54 | eor, 55 | bufsz, 56 | bytes, 57 | bytes.len() 58 | ); 59 | println!("expected {:?} len {}", expected, expected.len()); 60 | println!(" buf {:?} len {}", buf, buf.len()); 61 | } 62 | 63 | TestResult::from_bool(buf == expected) 64 | } 65 | 66 | #[test] 67 | fn record_writerec() { 68 | quickcheck(check_writerec as fn(usize, bool, Vec) -> TestResult); 69 | } 70 | 71 | // Make sure record structure survives a round trip 72 | fn check_codec(bufsz: usize, ref records: Vec>) -> TestResult { 73 | if bufsz == 0 { 74 | return TestResult::discard(); 75 | } 76 | 77 | let mut buf = Vec::new(); 78 | 79 | for rec in records { 80 | let mut xw = XdrRecordWriter::with_buffer(&mut buf, bufsz); 81 | 82 | if let Err(e) = xw.write(rec) { 83 | return TestResult::error(format!("xw write failed: {:?}", e)); 84 | } 85 | } 86 | 87 | { 88 | let cur = Cursor::new(buf); 89 | let xr = XdrRecordReader::new(cur); 90 | 91 | for (res, orig) in xr.into_iter().zip(records) { 92 | match res { 93 | Err(e) => return TestResult::error(format!("xr failed {:?}", e)), 94 | Ok(ref rx) => { 95 | if rx != orig { 96 | println!( 97 | "bufsz {} mismatch orig {:?}, len {}", 98 | bufsz, 99 | orig, 100 | orig.len() 101 | ); 102 | println!(" rx {:?}, len {}", rx, rx.len()); 103 | return TestResult::failed(); 104 | } 105 | } 106 | } 107 | } 108 | } 109 | 110 | TestResult::passed() 111 | } 112 | 113 | #[test] 114 | fn record_codec() { 115 | quickcheck(check_codec as fn(usize, Vec>) -> TestResult); 116 | } 117 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdrgen/README.md: -------------------------------------------------------------------------------- 1 | # Rust XDR library 2 | 3 | [![Build Status](https://travis-ci.org/jsgf/rust-xdr.svg?branch=master)](https://travis-ci.org/jsgf/rust-xdr) 4 | [![Crates.io](https://img.shields.io/crates/v/xdrgen.svg)](https://crates.io/crates/xdrgen) 5 | [![Coverage Status](https://coveralls.io/repos/github/jsgf/promising-future/badge.svg?branch=master)](https://coveralls.io/github/jsgf/promising-future?branch=master) 6 | 7 | This crate provides xdrgen, which takes an XDR specification in a .x 8 | file, and produces Rust code to serialize and deserialize the 9 | specified types. It is intended to be used in conjunction with 10 | [xdr-codec](https://github.com/jsgf/rust-xdr-codec). 11 | 12 | The syntax of the .x file follows 13 | [RFC4506](https://tools.ietf.org/html/rfc4506.html). This has type definitions 14 | for XDR but does not include RPC protocol specifications. Correspondingly, 15 | xdrgen does not support auto-generation of RPC clients/servers. 16 | 17 | ## Changes in 0.4.0 18 | 19 | - Now uses the `quote` package, so it will work on stable Rust 20 | - Detects the use of Rust keywords in XDR specifications, and appends a `_` to them. 21 | 22 | ## Usage 23 | 24 | Usage is straightforward. You can generate the Rust code from a spec a build.rs: 25 | 26 | ``` 27 | extern crate xdrgen; 28 | 29 | fn main() { 30 | xdrgen::compile("src/simple.x").expect("xdrgen simple.x failed"); 31 | } 32 | ``` 33 | 34 | This code can then be included into a module: 35 | 36 | ``` 37 | mod simple { 38 | use xdr_codec; 39 | 40 | #[allow(dead_code)] 41 | include!(concat!(env!("OUT_DIR"), "/simple_xdr.rs")); 42 | } 43 | ``` 44 | 45 | Once you have this, you can call `mytype.pack(&mut output)`, and 46 | `let mything: MyThing = xdr_codec::unpack(&mut input)?;`. 47 | 48 | The serializers require your types to implement the `Pack` and `Unpack` 49 | traits, and generate code to write to `std::io::Write` implementation, and 50 | read from `std::io::Read`. 51 | 52 | All types and fields are generated public, so you can control their access 53 | outside your module or crate. If your spec references other types which are 54 | not defined within the spec, then you can define them within the module 55 | as well, either by aliasing them with other defined types, or implementing 56 | the `Pack` and `Unpack` traits yourself. 57 | 58 | Use can use xdr-codec's `XdrRecordReader` and `XdrRecordWriter` types as IO 59 | filters that implement XDR-RPC record marking. 60 | 61 | More [documentation for xdrgen 62 | here](https://docs.rs/xdrgen/). See the 63 | [documentation for 64 | xdr-codec](https://docs.rs/xdr-codec/) for more 65 | details about using the generated types and code. 66 | 67 | ## Limitations 68 | 69 | There are currently a few limitations: 70 | * The generated code uses identifiers as specified in the .x file, so the 71 | Rust code will not use normal formatting conventions. 72 | * Generated code follows no formatting convention - use rustfmt if desired. 73 | * XDR has discriminated unions, which are a good match for Rust enums. 74 | However, it also supports a `default` case if an unknown discriminator 75 | is encountered. This crate supports this for unpacking, but not for 76 | packing, as Rust does not allow enums to have unknown values. 77 | * The generated code uses `#[derive(Debug, Clone, ...)]` to generate 78 | implementations for common traits. However, rustc only supports `#[derive]` 79 | on fixed-size arrays with 0..32 elements; if you have an array larger than 80 | this, the generated code will fail to compile. Right now, the only workaround 81 | is to manually implement `Pack` and `Unpack` for such types. 82 | (TODO: add an option to omit derived traits.) 83 | 84 | ## License 85 | 86 | Licensed under either of 87 | 88 | * Apache License, Version 2.0, ([LICENSE-APACHE](http://www.apache.org/licenses/LICENSE-2.0)) 89 | * MIT license ([LICENSE-MIT](http://opensource.org/licenses/MIT)) 90 | 91 | at your option. 92 | 93 | ### Contribution 94 | 95 | Unless you explicitly state otherwise, any contribution intentionally submitted 96 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 97 | additional terms or conditions. 98 | -------------------------------------------------------------------------------- /dist/efs-utils.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | [DEFAULT] 10 | logging_level = INFO 11 | logging_max_bytes = 1048576 12 | logging_file_count = 10 13 | # mode for /var/run/efs and subdirectories in octal 14 | state_file_dir_mode = 750 15 | 16 | [mount] 17 | dns_name_format = {az}.{fs_id}.efs.{region}.{dns_name_suffix} 18 | dns_name_suffix = amazonaws.com 19 | #The region of the file system when mounting from on-premises or cross region. 20 | #region = us-east-1 21 | stunnel_debug_enabled = false 22 | #Uncomment the below option to save all stunnel logs for a file system to the same file 23 | #stunnel_logs_file = /var/log/amazon/efs/{fs_id}.stunnel.log 24 | stunnel_cafile = /etc/amazon/efs/efs-utils.crt 25 | 26 | # Validate the certificate hostname on mount. This option is not supported by certain stunnel versions. 27 | stunnel_check_cert_hostname = true 28 | 29 | # Use OCSP to check certificate validity. This option is not supported by certain stunnel versions. 30 | stunnel_check_cert_validity = false 31 | 32 | # Set to true to use FIPS-mode for stunnel. Enabling this will change the AWS SDK client to use FIPS as well. 33 | fips_mode_enabled = false 34 | 35 | # Define the port range that the TLS tunnel will choose from 36 | port_range_lower_bound = 20049 37 | port_range_upper_bound = 21049 38 | 39 | # Optimize read_ahead_kb for Linux 5.4+ 40 | optimize_readahead = true 41 | 42 | # By default, we enable the feature to fallback to mount with mount target ip address when dns name cannot be resolved 43 | fall_back_to_mount_target_ip_address_enabled = true 44 | 45 | # By default, we use IMDSv2 to get the instance metadata, set this to true if you want to disable IMDSv2 usage 46 | disable_fetch_ec2_metadata_token = false 47 | 48 | # By default, we enable efs-utils to retry failed mount.nfs command that due to (1) connection reset by peer (2) the 49 | # mount.nfs is not finished within 'retry_nfs_mount_command_timeout_sec'. If the retry count is set as N, initial N - 1 50 | # mount attempts will timeout if the command does not finish within 'retry_nfs_mount_command_timeout_sec' sec. 51 | # The last mount attempt will keep the existing behavior of mount.nfs. 52 | # 53 | retry_nfs_mount_command = true 54 | retry_nfs_mount_command_count = 3 55 | retry_nfs_mount_command_timeout_sec = 15 56 | 57 | [mount.cn-north-1] 58 | dns_name_suffix = amazonaws.com.cn 59 | 60 | 61 | [mount.cn-northwest-1] 62 | dns_name_suffix = amazonaws.com.cn 63 | 64 | 65 | [mount.eu-isoe-west-1] 66 | dns_name_suffix = cloud.adc-e.uk 67 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 68 | 69 | [mount.eusc-de-east-1] 70 | dns_name_suffix = amazonaws.eu 71 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 72 | 73 | [mount.us-iso-east-1] 74 | dns_name_suffix = c2s.ic.gov 75 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 76 | 77 | [mount.us-iso-west-1] 78 | dns_name_suffix = c2s.ic.gov 79 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 80 | 81 | [mount.us-isob-east-1] 82 | dns_name_suffix = sc2s.sgov.gov 83 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 84 | 85 | [mount.us-isob-west-1] 86 | dns_name_suffix = sc2s.sgov.gov 87 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 88 | 89 | [mount.us-isof-east-1] 90 | dns_name_suffix = csp.hci.ic.gov 91 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 92 | 93 | [mount.us-isof-south-1] 94 | dns_name_suffix = csp.hci.ic.gov 95 | stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem 96 | 97 | [mount-watchdog] 98 | enabled = true 99 | poll_interval_sec = 1 100 | unmount_count_for_consistency = 5 101 | unmount_grace_period_sec = 30 102 | 103 | # Set client auth/access point certificate renewal rate. Minimum value is 1 minute. 104 | tls_cert_renewal_interval_min = 60 105 | 106 | # Periodically check the health of stunnel to make sure the connection is fully established 107 | stunnel_health_check_enabled = true 108 | stunnel_health_check_interval_min = 5 109 | stunnel_health_check_command_timeout_sec = 30 110 | 111 | [cloudwatch-log] 112 | # enabled = true 113 | log_group_name = /aws/efs/utils 114 | 115 | # Possible values are : 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, and 3653 116 | # Comment this config to prevent log deletion 117 | retention_in_days = 14 118 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdr-codec/tests/test-record.rs: -------------------------------------------------------------------------------- 1 | // Don't rustfmt in here to avoid trashing vec![] formatting 2 | #![cfg_attr(rustfmt, rustfmt_skip)] 3 | 4 | use std::io::{Cursor, Read, Write}; 5 | 6 | use xdr_codec::record::{XdrRecordReader, XdrRecordWriter}; 7 | 8 | #[test] 9 | fn recread_full() { 10 | let inbuf = vec![128, 0, 0, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 11 | let cur = Cursor::new(inbuf); 12 | 13 | let mut recread = XdrRecordReader::new(cur); 14 | let mut buf = vec![0; 20]; 15 | 16 | assert_eq!(recread.read(&mut buf[..]).unwrap(), 10); 17 | assert_eq!( 18 | buf, 19 | vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] 20 | ); 21 | assert!(recread.eor()); 22 | } 23 | 24 | #[test] 25 | fn recread_short() { 26 | let inbuf = vec![128, 0, 0, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; 27 | let cur = Cursor::new(inbuf); 28 | 29 | let mut recread = XdrRecordReader::new(cur); 30 | let mut buf = vec![0; 5]; 31 | 32 | assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); 33 | assert!(recread.eor()); 34 | assert_eq!(buf, vec![0, 1, 2, 3, 4]); 35 | 36 | assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); 37 | assert!(recread.eor()); 38 | assert_eq!(buf, vec![5, 6, 7, 8, 9]); 39 | } 40 | 41 | #[test] 42 | fn recread_half() { 43 | let inbuf = vec![0, 0, 0, 5, 0, 1, 2, 3, 4, 128, 0, 0, 5, 5, 6, 7, 8, 9]; 44 | let cur = Cursor::new(inbuf); 45 | 46 | let mut recread = XdrRecordReader::new(cur); 47 | let mut buf = vec![0; 10]; 48 | 49 | assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); 50 | assert_eq!(buf, vec![0, 1, 2, 3, 4, 0, 0, 0, 0, 0]); 51 | assert!(!recread.eor()); 52 | 53 | assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); 54 | assert_eq!(buf, vec![5, 6, 7, 8, 9, 0, 0, 0, 0, 0]); 55 | assert!(recread.eor()); 56 | } 57 | 58 | #[test] 59 | fn recread_iter() { 60 | let inbuf = vec![ 61 | 0, 62 | 0, 63 | 0, 64 | 5, 65 | 0, 66 | 1, 67 | 2, 68 | 3, 69 | 4, 70 | 128, 71 | 0, 72 | 0, 73 | 5, 74 | 5, 75 | 6, 76 | 7, 77 | 8, 78 | 9, 79 | 128, 80 | 0, 81 | 0, 82 | 1, 83 | 99, 84 | ]; 85 | let cur = Cursor::new(inbuf); 86 | let recread = XdrRecordReader::new(cur); 87 | 88 | let expected = vec![vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9], vec![99]]; 89 | let got: Vec<_> = recread.into_iter().map(|r| r.expect("IO error")).collect(); 90 | 91 | assert_eq!(expected, got); 92 | } 93 | 94 | #[test] 95 | fn read_zerorec() { 96 | let inbuf = vec![0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0]; 97 | 98 | let cur = Cursor::new(inbuf); 99 | let mut recread = XdrRecordReader::new(cur); 100 | 101 | let mut buf = [0; 100]; 102 | assert_eq!(recread.read(&mut buf).unwrap(), 0); 103 | assert!(recread.eor()); 104 | } 105 | 106 | #[test] 107 | #[should_panic(expected = "must be non-zero")] 108 | fn zerosz() { 109 | let buf = Vec::new(); 110 | let _ = XdrRecordWriter::with_buffer(buf, 0); 111 | } 112 | 113 | #[test] 114 | fn smallrec() { 115 | let mut buf = Vec::new(); 116 | 117 | { 118 | let mut xw = XdrRecordWriter::new(&mut buf); 119 | 120 | assert_eq!(write!(xw, "hello").unwrap(), ()); 121 | } 122 | 123 | assert_eq!(buf, vec![128, 0, 0, 5, 104, 101, 108, 108, 111]) 124 | } 125 | 126 | #[test] 127 | fn largerec() { 128 | let mut buf = Vec::new(); 129 | 130 | { 131 | let mut xw = XdrRecordWriter::with_buffer(&mut buf, 3); 132 | 133 | assert_eq!(write!(xw, "hello").unwrap(), ()); 134 | } 135 | 136 | assert_eq!(buf, vec![0, 0, 0, 3, 104, 101, 108, 128, 0, 0, 2, 108, 111]) 137 | } 138 | 139 | #[test] 140 | fn largerec_flush() { 141 | let mut buf = Vec::new(); 142 | 143 | { 144 | let mut xw = XdrRecordWriter::with_buffer(&mut buf, 10); 145 | 146 | assert_eq!(write!(xw, "hel").unwrap(), ()); 147 | xw.flush().unwrap(); 148 | assert_eq!(write!(xw, "lo").unwrap(), ()); 149 | xw.flush().unwrap(); 150 | } 151 | 152 | assert_eq!( 153 | buf, 154 | vec![ 155 | 0, 156 | 0, 157 | 0, 158 | 3, 159 | 104, 160 | 101, 161 | 108, 162 | 0, 163 | 0, 164 | 0, 165 | 2, 166 | 108, 167 | 111, 168 | 128, 169 | 0, 170 | 0, 171 | 0, 172 | ] 173 | ) 174 | } 175 | -------------------------------------------------------------------------------- /dist/efs-utils.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF 3 | ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 4 | b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL 5 | MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv 6 | b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj 7 | ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM 8 | 9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw 9 | IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6 10 | VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L 11 | 93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm 12 | jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC 13 | AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA 14 | A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI 15 | U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs 16 | N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv 17 | o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU 18 | 5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy 19 | rqXRfboQnoZsG4q5WTP468SQvvG5 20 | -----END CERTIFICATE----- 21 | 22 | -----BEGIN CERTIFICATE----- 23 | MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF 24 | ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 25 | b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL 26 | MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv 27 | b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK 28 | gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ 29 | W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg 30 | 1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K 31 | 8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r 32 | 2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me 33 | z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR 34 | 8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj 35 | mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz 36 | 7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6 37 | +XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI 38 | 0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB 39 | Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm 40 | UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2 41 | LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY 42 | +gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS 43 | k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl 44 | 7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm 45 | btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl 46 | urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+ 47 | fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63 48 | n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE 49 | 76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H 50 | 9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT 51 | 4PsJYGw= 52 | -----END CERTIFICATE----- 53 | 54 | -----BEGIN CERTIFICATE----- 55 | MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 56 | MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g 57 | Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG 58 | A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg 59 | Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl 60 | ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j 61 | QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr 62 | ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr 63 | BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM 64 | YyRIHN8wfdVoOw== 65 | -----END CERTIFICATE----- 66 | 67 | -----BEGIN CERTIFICATE----- 68 | MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 69 | MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g 70 | Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG 71 | A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg 72 | Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi 73 | 9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk 74 | M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB 75 | /zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB 76 | MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw 77 | CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW 78 | 1KyLa2tJElMzrdfkviT8tQp21KW8EA== 79 | -----END CERTIFICATE----- -------------------------------------------------------------------------------- /test/mount_efs_test/test_parse_arguments.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import pytest 8 | 9 | import mount_efs 10 | 11 | 12 | def _test_parse_arguments_help(capsys, help): 13 | with pytest.raises(SystemExit) as ex: 14 | mount_efs.parse_arguments_early_exit(["mount", "foo", "bar", help]) 15 | 16 | assert 0 == ex.value.code 17 | 18 | out, err = capsys.readouterr() 19 | assert "Usage:" in out 20 | 21 | 22 | def test_parse_arguments_help_long(capsys): 23 | _test_parse_arguments_help(capsys, "--help") 24 | 25 | 26 | def test_parse_arguments_help_short(capsys): 27 | _test_parse_arguments_help(capsys, "-h") 28 | 29 | 30 | def test_parse_arguments_version(capsys): 31 | with pytest.raises(SystemExit) as ex: 32 | mount_efs.parse_arguments_early_exit(["mount", "foo", "bar", "--version"]) 33 | 34 | assert 0 == ex.value.code 35 | 36 | out, err = capsys.readouterr() 37 | assert "Version: %s" % mount_efs.VERSION in out 38 | 39 | 40 | def test_parse_arguments_no_fs_id(capsys): 41 | with pytest.raises(SystemExit) as ex: 42 | mount_efs.parse_arguments(None, ["mount"]) 43 | 44 | assert 0 != ex.value.code 45 | 46 | out, err = capsys.readouterr() 47 | assert "Usage:" in err 48 | 49 | 50 | def test_parse_arguments_no_mount_point(capsys): 51 | with pytest.raises(SystemExit) as ex: 52 | mount_efs.parse_arguments(None, ["mount", "fs-deadbeef"]) 53 | 54 | assert 0 != ex.value.code 55 | 56 | out, err = capsys.readouterr() 57 | assert "Usage:" in err 58 | 59 | 60 | def test_parse_arguments_default_path(): 61 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 62 | None, ["mount", "fs-deadbeef", "/dir"] 63 | ) 64 | 65 | assert "fs-deadbeef" == fsid 66 | assert "/" == path 67 | assert "/dir" == mountpoint 68 | assert {} == options 69 | 70 | 71 | def test_parse_arguments_custom_path(): 72 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 73 | None, ["mount", "fs-deadbeef:/home", "/dir"] 74 | ) 75 | 76 | assert "fs-deadbeef" == fsid 77 | assert "/home" == path 78 | assert "/dir" == mountpoint 79 | assert {} == options 80 | 81 | 82 | def test_parse_arguments_verbose(): 83 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 84 | None, ["mount", "fs-deadbeef:/home", "/dir", "-v", "-o", "foo,bar=baz,quux"] 85 | ) 86 | 87 | assert "fs-deadbeef" == fsid 88 | assert "/home" == path 89 | assert "/dir" == mountpoint 90 | assert {"foo": None, "bar": "baz", "quux": None} == options 91 | 92 | 93 | def test_parse_arguments(): 94 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 95 | None, ["mount", "fs-deadbeef:/home", "/dir", "-o", "foo,bar=baz,quux"] 96 | ) 97 | 98 | assert "fs-deadbeef" == fsid 99 | assert "/home" == path 100 | assert "/dir" == mountpoint 101 | assert {"foo": None, "bar": "baz", "quux": None} == options 102 | 103 | 104 | def test_parse_arguments_with_az_dns_name_mount_az_not_in_option(mocker): 105 | # When dns_name is provided for mounting, if the az is not provided in the mount option, also dns_name contains az 106 | # info, verify that the az info is present in the options 107 | dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" 108 | mocker.patch( 109 | "mount_efs.match_device", return_value=("fs-deadbeef", "/", "us-east-1a") 110 | ) 111 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 112 | None, ["mount", dns_name, "/dir", "-o", "foo,bar=baz,quux"] 113 | ) 114 | 115 | assert "fs-deadbeef" == fsid 116 | assert "/" == path 117 | assert "/dir" == mountpoint 118 | assert {"foo": None, "bar": "baz", "quux": None, "az": "us-east-1a"} == options 119 | 120 | 121 | def test_parse_arguments_macos(mocker): 122 | mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) 123 | fsid, path, mountpoint, options = mount_efs.parse_arguments( 124 | None, 125 | [ 126 | "mount", 127 | "-o", 128 | "foo", 129 | "-o", 130 | "bar=baz", 131 | "-o", 132 | "quux", 133 | "fs-deadbeef:/home", 134 | "/dir", 135 | ], 136 | ) 137 | 138 | assert "fs-deadbeef" == fsid 139 | assert "/home" == path 140 | assert "/dir" == mountpoint 141 | assert {"foo": None, "bar": "baz", "quux": None} == options 142 | -------------------------------------------------------------------------------- /test/global_test/test_global_version_match.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import os 10 | 11 | try: 12 | import ConfigParser 13 | except ImportError: 14 | from configparser import ConfigParser 15 | 16 | 17 | SPEC_FILE = "amazon-efs-utils.spec" 18 | DEB_FILE = "build-deb.sh" 19 | FILE_LIST = [ 20 | "build-deb.sh", 21 | "src/watchdog/__init__.py", 22 | "src/mount_efs/__init__.py", 23 | "build-deb.sh", 24 | "amazon-efs-utils.spec", 25 | ] 26 | 27 | GLOBAL_CONFIG = "config.ini" 28 | 29 | 30 | def test_file_version_match(): 31 | global_version = get_global_version() 32 | for f in FILE_LIST: 33 | version_in_file = get_version_for_file(f) 34 | assert ( 35 | version_in_file == global_version 36 | ), "version in {} is {}, does not match global version {}".format( 37 | f, version_in_file, global_version 38 | ) 39 | 40 | 41 | def test_file_release_match(): 42 | global_release = get_global_release() 43 | for f in [DEB_FILE, SPEC_FILE]: 44 | release_in_file = get_release_for_file(f) 45 | assert ( 46 | release_in_file == global_release 47 | ), "release in {} is {}, does not match global release {}".format( 48 | f, release_in_file, global_release 49 | ) 50 | 51 | 52 | def test_changelog_version_match(): 53 | global_version = get_global_version() 54 | 55 | version_in_changelog = get_version_for_changelog(SPEC_FILE) 56 | assert ( 57 | version_in_changelog is not None and version_in_changelog == global_version 58 | ), "version in {} is {}, does not match expected_version_release {}, you need to add changelog in the spec file".format( 59 | SPEC_FILE, version_in_changelog, global_version 60 | ) 61 | 62 | 63 | def get_global_version(): 64 | return get_global_value("version") 65 | 66 | 67 | def get_global_release(): 68 | return get_global_value("release") 69 | 70 | 71 | def get_version_for_changelog(file_path): 72 | mount_helper_root_folder = uppath(os.path.abspath(__file__), 3) 73 | file_to_check = os.path.join(mount_helper_root_folder, file_path) 74 | has_changelog = False 75 | with open(file_to_check) as fp: 76 | lines = fp.readlines() 77 | for line in lines: 78 | if line.startswith("%changelog"): 79 | has_changelog = True 80 | if has_changelog and line.startswith("*"): 81 | return line.split(" ")[-1].strip() 82 | return None 83 | 84 | 85 | def get_version_for_file(file_path): 86 | mount_helper_root_folder = uppath(os.path.abspath(__file__), 3) 87 | file_to_check = os.path.join(mount_helper_root_folder, file_path) 88 | with open(file_to_check) as fp: 89 | lines = fp.readlines() 90 | for line in lines: 91 | if line.startswith("VERSION"): 92 | return ( 93 | line.split("=")[1].strip().replace('"', "") 94 | ) # Replacing the double quotes instead of single quotes as 95 | # "black" reformates every single quotes to double quotes. 96 | if line.startswith("Version"): 97 | return line.split(":")[1].strip() 98 | return None 99 | 100 | 101 | def get_release_for_file(file_path): 102 | mount_helper_root_folder = uppath(os.path.abspath(__file__), 3) 103 | file_to_check = os.path.join(mount_helper_root_folder, file_path) 104 | with open(file_to_check) as fp: 105 | lines = fp.readlines() 106 | for line in lines: 107 | if line.startswith("RELEASE"): 108 | return line.split("=")[1].strip() 109 | if line.startswith("Release"): 110 | return line.split(":")[1].strip().split("%")[0] 111 | return None 112 | 113 | 114 | def get_global_value(key): 115 | mount_helper_root_folder = uppath(os.path.abspath(__file__), 3) 116 | config_file = os.path.join(mount_helper_root_folder, GLOBAL_CONFIG) 117 | cp = read_config(config_file) 118 | value = str(cp.get("global", key)) 119 | return value 120 | 121 | 122 | # Given: path : file path 123 | # n : the number of parent level we want to reach 124 | # Returns: parent path of certain level n 125 | # Example: uppath('/usr/lib/java', 1) -> '/usr/lib' 126 | # uppath('/usr/lib/java', 2) -> '/usr' 127 | def uppath(path, n): 128 | return os.sep.join(path.split(os.sep)[:-n]) 129 | 130 | 131 | def read_config(config_file): 132 | try: 133 | p = ConfigParser.SafeConfigParser() 134 | except AttributeError: 135 | p = ConfigParser() 136 | p.read(config_file) 137 | return p 138 | -------------------------------------------------------------------------------- /test/watchdog_test/test_clean_up_previous_tunnel_pids.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import json 10 | import tempfile 11 | from datetime import datetime 12 | 13 | import watchdog 14 | 15 | from .. import utils 16 | 17 | PID = 1234 18 | STATE = { 19 | "pid": PID, 20 | "commonName": "deadbeef.com", 21 | "certificate": "/tmp/foobar", 22 | "certificateCreationTime": datetime.utcnow().strftime( 23 | watchdog.CERT_DATETIME_FORMAT 24 | ), 25 | "mountStateDir": "fs-deadbeef.mount.dir.12345", 26 | "privateKey": "/tmp/foobarbaz", 27 | "accessPoint": "fsap-fedcba9876543210", 28 | } 29 | 30 | PROCESS_NAME_OUTPUT = "stunnel/var/run/efs/stunnel-config/fs-deadbeef.mount.dir.12345" 31 | PROCESS_NAME_OUTPUT_LWP = "/foo/bar/baz" 32 | PROCESS_NAME_OUTPUT_ERR = "" 33 | 34 | 35 | def setup_mocks(mocker, state_files, process_name_output): 36 | mocker.patch("watchdog.get_state_files", return_value=state_files) 37 | mocker.patch("watchdog.check_process_name", return_value=process_name_output) 38 | 39 | return mocker.patch("watchdog.rewrite_state_file") 40 | 41 | 42 | def create_state_file(tmpdir, content=json.dumps(STATE)): 43 | state_file = tmpdir.join(tempfile.mkstemp()[1]) 44 | 45 | state_file.write(content, ensure=True) 46 | 47 | return state_file.dirname, state_file.basename 48 | 49 | 50 | def test_malformed_state_file(mocker, tmpdir): 51 | state_file_dir, state_file = create_state_file(tmpdir, "not-json") 52 | 53 | rewrite_state_file_mock = setup_mocks( 54 | mocker, state_files={"mnt": state_file}, process_name_output=PROCESS_NAME_OUTPUT 55 | ) 56 | 57 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 58 | 59 | utils.assert_not_called(rewrite_state_file_mock) 60 | 61 | 62 | def test_clean_up_active_stunnel_from_previous_watchdog(mocker, tmpdir): 63 | state_file_dir, state_file = create_state_file(tmpdir) 64 | 65 | rewrite_state_file_mock = setup_mocks( 66 | mocker, state_files={"mnt": state_file}, process_name_output=PROCESS_NAME_OUTPUT 67 | ) 68 | 69 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 70 | 71 | utils.assert_not_called(rewrite_state_file_mock) 72 | 73 | 74 | def test_clean_up_active_LWP_from_driver(mocker, tmpdir): 75 | state_file_dir, state_file = create_state_file(tmpdir) 76 | 77 | rewrite_state_file_mock = setup_mocks( 78 | mocker, 79 | state_files={"mnt": state_file}, 80 | process_name_output=PROCESS_NAME_OUTPUT_LWP, 81 | ) 82 | 83 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 84 | 85 | utils.assert_called_once(rewrite_state_file_mock) 86 | 87 | 88 | def test_clean_up_stunnel_pid_from_previous_driver(mocker, tmpdir): 89 | state_file_dir, state_file = create_state_file(tmpdir) 90 | 91 | rewrite_state_file_mock = setup_mocks( 92 | mocker, 93 | state_files={"mnt": state_file}, 94 | process_name_output=PROCESS_NAME_OUTPUT_ERR, 95 | ) 96 | 97 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 98 | 99 | utils.assert_called_once(rewrite_state_file_mock) 100 | 101 | 102 | def test_no_state_files_from_previous_driver(mocker, tmpdir): 103 | rewrite_state_file_mock = setup_mocks( 104 | mocker, state_files={}, process_name_output=PROCESS_NAME_OUTPUT 105 | ) 106 | 107 | watchdog.clean_up_previous_tunnel_pids(tmpdir) 108 | 109 | utils.assert_not_called(rewrite_state_file_mock) 110 | 111 | 112 | def test_clean_up_multiple_stunnel_pids(mocker, tmpdir): 113 | state_file_dir, state_file_1 = create_state_file(tmpdir) 114 | 115 | state = dict(STATE) 116 | state["pid"] = 5678 117 | state_file_dir, state_file_2 = create_state_file(tmpdir, content=json.dumps(state)) 118 | 119 | rewrite_state_file_mock = setup_mocks( 120 | mocker, 121 | state_files={"mnt/a1": state_file_1, "mnt/a2": state_file_2}, 122 | process_name_output=PROCESS_NAME_OUTPUT_ERR, 123 | ) 124 | 125 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 126 | 127 | utils.assert_called(rewrite_state_file_mock) 128 | 129 | 130 | def test_clean_up_stunnel_no_pid(mocker, tmpdir): 131 | state = dict(STATE) 132 | state.pop("pid") 133 | 134 | state_file_dir, state_file = create_state_file(tmpdir, content=json.dumps(state)) 135 | 136 | rewrite_state_file_mock = setup_mocks( 137 | mocker, 138 | state_files={"mnt": state_file}, 139 | process_name_output=PROCESS_NAME_OUTPUT_LWP, 140 | ) 141 | 142 | watchdog.clean_up_previous_tunnel_pids(state_file_dir) 143 | 144 | utils.assert_not_called(rewrite_state_file_mock) 145 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdrgen/src/lib.rs: -------------------------------------------------------------------------------- 1 | //! XDR codec generation 2 | //! 3 | //! This crate provides library interfaces for programatically generating Rust code to implement 4 | //! RFC4506 XDR encoding/decoding, as well as a command line tool "xdrgen". 5 | //! 6 | //! It is intended to be used with the "xdr-codec" crate, which provides the runtime library for 7 | //! encoding/decoding primitive types, strings, opaque data and arrays. 8 | 9 | #![recursion_limit = "128"] 10 | 11 | extern crate xdr_codec as xdr; 12 | 13 | #[macro_use] 14 | extern crate quote; 15 | 16 | #[macro_use] 17 | extern crate lazy_static; 18 | 19 | #[macro_use] 20 | extern crate log; 21 | 22 | #[macro_use] 23 | extern crate nom; 24 | 25 | #[macro_use] 26 | extern crate bitflags; 27 | 28 | use std::env; 29 | use std::fmt::Display; 30 | use std::fs::File; 31 | use std::io::{Read, Write}; 32 | use std::path::{Path, PathBuf}; 33 | use std::result; 34 | 35 | use xdr::Result; 36 | 37 | mod spec; 38 | use spec::{Emit, Emitpack, Symtab}; 39 | 40 | fn result_option(resopt: result::Result, E>) -> Option> { 41 | match resopt { 42 | Ok(None) => None, 43 | Ok(Some(v)) => Some(Ok(v)), 44 | Err(e) => Some(Err(e)), 45 | } 46 | } 47 | 48 | /// Generate Rust code from an RFC4506 XDR specification 49 | /// 50 | /// `infile` is simply a string used in error messages; it may be empty. `input` is a read stream of 51 | /// the specification, and `output` is where the generated code is sent. 52 | pub fn generate(infile: &str, mut input: In, mut output: Out) -> Result<()> 53 | where 54 | In: Read, 55 | Out: Write, 56 | { 57 | let mut source = String::new(); 58 | 59 | input.read_to_string(&mut source)?; 60 | 61 | let xdr = match spec::specification(&source) { 62 | Ok(defns) => Symtab::new(&defns), 63 | Err(e) => return Err(xdr::Error::from(format!("parse error: {}", e))), 64 | }; 65 | 66 | let xdr = xdr; 67 | 68 | let res: Vec<_> = { 69 | let consts = xdr 70 | .constants() 71 | .filter_map(|(c, &(v, ref scope))| { 72 | if scope.is_none() { 73 | Some(spec::Const(c.clone(), v)) 74 | } else { 75 | None 76 | } 77 | }) 78 | .map(|c| c.define(&xdr)); 79 | 80 | let typespecs = xdr 81 | .typespecs() 82 | .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) 83 | .map(|c| c.define(&xdr)); 84 | 85 | let typesyns = xdr 86 | .typesyns() 87 | .map(|(n, ty)| spec::Typesyn(n.clone(), ty.clone())) 88 | .map(|c| c.define(&xdr)); 89 | 90 | let packers = xdr 91 | .typespecs() 92 | .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) 93 | .filter_map(|c| result_option(c.pack(&xdr))); 94 | 95 | let unpackers = xdr 96 | .typespecs() 97 | .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) 98 | .filter_map(|c| result_option(c.unpack(&xdr))); 99 | 100 | consts 101 | .chain(typespecs) 102 | .chain(typesyns) 103 | .chain(packers) 104 | .chain(unpackers) 105 | .collect::>>()? 106 | }; 107 | 108 | let _ = writeln!( 109 | output, 110 | r#" 111 | // GENERATED CODE 112 | // 113 | // Generated from {} by xdrgen. 114 | // 115 | // DO NOT EDIT 116 | 117 | "#, 118 | infile 119 | ); 120 | 121 | for it in res { 122 | let _ = writeln!(output, "{}\n", it.as_str()); 123 | } 124 | 125 | Ok(()) 126 | } 127 | 128 | /// Simplest possible way to generate Rust code from an XDR specification. 129 | /// 130 | /// It is intended for use in a build.rs script: 131 | /// 132 | /// ```ignore 133 | /// extern crate xdrgen; 134 | /// 135 | /// fn main() { 136 | /// xdrgen::compile("src/simple.x").unwrap(); 137 | /// } 138 | /// ``` 139 | /// 140 | /// Output is put into OUT_DIR, and can be included: 141 | /// 142 | /// ```ignore 143 | /// mod simple { 144 | /// use xdr_codec; 145 | /// 146 | /// include!(concat!(env!("OUT_DIR"), "/simple_xdr.rs")); 147 | /// } 148 | /// ``` 149 | /// 150 | /// If your specification uses types which are not within the specification, you can provide your 151 | /// own implementations of `Pack` and `Unpack` for them. 152 | pub fn compile

(infile: P) -> Result<()> 153 | where 154 | P: AsRef + Display, 155 | { 156 | let input = File::open(&infile)?; 157 | 158 | let mut outdir = PathBuf::from(env::var("OUT_DIR").unwrap_or(String::from("."))); 159 | let outfile = PathBuf::from(infile.as_ref()) 160 | .file_stem() 161 | .unwrap() 162 | .to_owned() 163 | .into_string() 164 | .unwrap() 165 | .replace("-", "_"); 166 | 167 | outdir.push(&format!("{}_xdr.rs", outfile)); 168 | 169 | let output = File::create(outdir)?; 170 | 171 | generate( 172 | infile.as_ref().as_os_str().to_str().unwrap_or(""), 173 | input, 174 | output, 175 | ) 176 | } 177 | -------------------------------------------------------------------------------- /test/watchdog_test/test_get_current_local_nfs_mounts.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import logging 10 | 11 | import watchdog 12 | 13 | MOUNT_FMT_LINE = "{address}:/ {mountpoint} {fs_type} {options} 0 0" 14 | DEFAULT_OPTS = "rw,port=12345" 15 | 16 | 17 | def _create_mount_file(tmpdir, lines): 18 | mount_file = tmpdir.join("mounts") 19 | mount_file.write("\n".join(lines)) 20 | return str(mount_file) 21 | 22 | 23 | def test_no_mounts(tmpdir): 24 | mount_file = _create_mount_file(tmpdir, []) 25 | 26 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 27 | 28 | assert {} == mounts 29 | 30 | 31 | def test_no_local_mounts(tmpdir): 32 | mount_file = _create_mount_file( 33 | tmpdir, 34 | [ 35 | MOUNT_FMT_LINE.format( 36 | address="10.1.0.1", 37 | mountpoint="/mnt", 38 | fs_type="nfs4", 39 | options=DEFAULT_OPTS, 40 | ) 41 | ], 42 | ) 43 | 44 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 45 | 46 | assert {} == mounts 47 | 48 | 49 | def test_no_local_nfs_mounts(tmpdir): 50 | mount_file = _create_mount_file( 51 | tmpdir, 52 | [ 53 | MOUNT_FMT_LINE.format( 54 | address="127.0.0.1", 55 | mountpoint="/mnt", 56 | fs_type="ext4", 57 | options=DEFAULT_OPTS, 58 | ) 59 | ], 60 | ) 61 | 62 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 63 | 64 | assert {} == mounts 65 | 66 | 67 | def test_invalid_mount_with_nfs(tmpdir, caplog): 68 | mount_file = _create_mount_file( 69 | tmpdir, 70 | [ 71 | MOUNT_FMT_LINE.format( 72 | address="127.0.0.1", 73 | mountpoint="/ mnt", 74 | fs_type="nfs4", 75 | options=DEFAULT_OPTS, 76 | ) 77 | ], 78 | ) 79 | with caplog.at_level(logging.WARNING): 80 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 81 | assert "Watchdog ignoring malformed nfs4 mount" in caplog.text 82 | 83 | 84 | def test_invalid_mount_without_nfs(tmpdir, caplog): 85 | mount_file = _create_mount_file( 86 | tmpdir, 87 | [ 88 | MOUNT_FMT_LINE.format( 89 | address="127.0.0.1", 90 | mountpoint="/ mnt", 91 | fs_type="overlay", 92 | options=DEFAULT_OPTS, 93 | ) 94 | ], 95 | ) 96 | with caplog.at_level(logging.DEBUG): 97 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 98 | assert "Watchdog ignoring malformed mount" in caplog.text 99 | 100 | 101 | def test_invalid_mount_arguments_without_nfs(tmpdir, caplog): 102 | mount_file = _create_mount_file( 103 | tmpdir, 104 | [ 105 | MOUNT_FMT_LINE.format( 106 | address="127.0.0.1", 107 | mountpoint="/ mnt", 108 | fs_type="overlay", 109 | options="rw,port= 12345", 110 | ) 111 | ], 112 | ) 113 | with caplog.at_level(logging.DEBUG): 114 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 115 | assert "Watchdog ignoring malformed mount" in caplog.text 116 | 117 | 118 | def test_local_nfs_mount(tmpdir): 119 | mount_file = _create_mount_file( 120 | tmpdir, 121 | [ 122 | MOUNT_FMT_LINE.format( 123 | address="127.0.0.1", 124 | mountpoint="/mnt", 125 | fs_type="nfs4", 126 | options=DEFAULT_OPTS, 127 | ) 128 | ], 129 | ) 130 | 131 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 132 | 133 | assert 1 == len(mounts) 134 | assert "mnt.12345" in mounts 135 | 136 | 137 | def test_local_nfs_mount_default_nfs_port(tmpdir): 138 | mount_file = _create_mount_file( 139 | tmpdir, 140 | [ 141 | MOUNT_FMT_LINE.format( 142 | address="127.0.0.1", 143 | mountpoint="/mnt", 144 | fs_type="nfs4", 145 | options="rw,noresvport", 146 | ) 147 | ], 148 | ) 149 | 150 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 151 | 152 | assert 1 == len(mounts) 153 | assert "mnt.2049" in mounts 154 | 155 | 156 | def test_local_nfs_mount_noresvport(tmpdir): 157 | mount_file = _create_mount_file( 158 | tmpdir, 159 | [ 160 | MOUNT_FMT_LINE.format( 161 | address="127.0.0.1", 162 | mountpoint="/mnt", 163 | fs_type="nfs4", 164 | options="rw,noresvport,port=12345", 165 | ) 166 | ], 167 | ) 168 | 169 | mounts = watchdog.get_current_local_nfs_mounts(mount_file) 170 | 171 | assert 1 == len(mounts) 172 | assert "mnt.12345" in mounts 173 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_environment_variables.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import os 8 | 9 | import pytest 10 | 11 | import mount_efs 12 | 13 | from .. import utils 14 | 15 | try: 16 | import ConfigParser 17 | except ImportError: 18 | from configparser import ConfigParser 19 | 20 | 21 | def test_get_aws_profile_with_env_variable(mocker): 22 | """Test that AWS_PROFILE environment variable is used when no mount option is provided""" 23 | options = {} 24 | use_iam = True 25 | 26 | # Mock environment variable 27 | mocker.patch.dict(os.environ, {"AWS_PROFILE": "test-profile"}) 28 | 29 | # Mock file reading to return empty configs 30 | mocker.patch("mount_efs.read_config", return_value=ConfigParser()) 31 | 32 | result = mount_efs.get_aws_profile(options, use_iam) 33 | assert result == "test-profile" 34 | 35 | 36 | def test_get_aws_profile_mount_option_takes_precedence(mocker): 37 | """Test that mount option takes precedence over environment variable""" 38 | options = {"awsprofile": "mount-profile"} 39 | use_iam = True 40 | 41 | # Mock environment variable 42 | mocker.patch.dict(os.environ, {"AWS_PROFILE": "env-profile"}) 43 | 44 | result = mount_efs.get_aws_profile(options, use_iam) 45 | assert result == "mount-profile" 46 | 47 | 48 | def test_get_aws_profile_no_env_variable(mocker): 49 | """Test fallback behavior when no environment variable is set""" 50 | options = {} 51 | use_iam = True 52 | 53 | # Ensure AWS_PROFILE is not set 54 | env_vars = {k: v for k, v in os.environ.items() if k != "AWS_PROFILE"} 55 | mocker.patch.dict(os.environ, env_vars, clear=True) 56 | 57 | # Mock config file to have default profile 58 | mock_config = mocker.MagicMock() 59 | mock_config.get.return_value = "fake_access_key" 60 | mocker.patch("mount_efs.read_config", return_value=mock_config) 61 | 62 | result = mount_efs.get_aws_profile(options, use_iam) 63 | assert result == "default" 64 | 65 | 66 | def test_get_target_region_with_aws_region_env(mocker): 67 | """Test that AWS_REGION environment variable is used""" 68 | config = mocker.MagicMock() 69 | options = {} 70 | 71 | # Mock environment variable 72 | mocker.patch.dict(os.environ, {"AWS_REGION": "us-west-2"}) 73 | 74 | result = mount_efs.get_target_region(config, options) 75 | assert result == "us-west-2" 76 | 77 | 78 | def test_get_target_region_with_aws_default_region_env(mocker): 79 | """Test that AWS_DEFAULT_REGION environment variable is used""" 80 | config = mocker.MagicMock() 81 | options = {} 82 | 83 | # Mock environment variables (AWS_REGION not set, AWS_DEFAULT_REGION set) 84 | env_vars = {k: v for k, v in os.environ.items() if k != "AWS_REGION"} 85 | env_vars["AWS_DEFAULT_REGION"] = "eu-central-1" 86 | mocker.patch.dict(os.environ, env_vars, clear=True) 87 | 88 | result = mount_efs.get_target_region(config, options) 89 | assert result == "eu-central-1" 90 | 91 | 92 | def test_get_target_region_mount_option_takes_precedence(mocker): 93 | """Test that region mount option takes precedence over environment variables""" 94 | config = mocker.MagicMock() 95 | options = {"region": "ap-southeast-1"} 96 | 97 | # Mock environment variables 98 | mocker.patch.dict( 99 | os.environ, {"AWS_REGION": "us-west-2", "AWS_DEFAULT_REGION": "eu-central-1"} 100 | ) 101 | 102 | result = mount_efs.get_target_region(config, options) 103 | assert result == "ap-southeast-1" 104 | 105 | 106 | def test_get_target_region_aws_region_precedence_over_default(mocker): 107 | """Test that AWS_REGION takes precedence over AWS_DEFAULT_REGION""" 108 | config = mocker.MagicMock() 109 | options = {} 110 | 111 | # Mock both environment variables 112 | mocker.patch.dict( 113 | os.environ, {"AWS_REGION": "us-west-2", "AWS_DEFAULT_REGION": "eu-central-1"} 114 | ) 115 | 116 | result = mount_efs.get_target_region(config, options) 117 | assert result == "us-west-2" 118 | 119 | 120 | def test_get_target_region_fallback_to_config_file(mocker): 121 | """Test fallback to config file when no environment variables are set""" 122 | config = mocker.MagicMock() 123 | config.get.return_value = "us-east-1" 124 | options = {} 125 | 126 | # Ensure environment variables are not set 127 | env_vars = { 128 | k: v 129 | for k, v in os.environ.items() 130 | if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"] 131 | } 132 | mocker.patch.dict(os.environ, env_vars, clear=True) 133 | 134 | result = mount_efs.get_target_region(config, options) 135 | assert result == "us-east-1" 136 | 137 | 138 | def test_get_target_region_fallback_to_metadata_service(mocker): 139 | """Test fallback to instance metadata when config file fails""" 140 | config = mocker.MagicMock() 141 | config.get.side_effect = mount_efs.NoOptionError("region", "section") 142 | options = {} 143 | 144 | # Ensure environment variables are not set 145 | env_vars = { 146 | k: v 147 | for k, v in os.environ.items() 148 | if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"] 149 | } 150 | mocker.patch.dict(os.environ, env_vars, clear=True) 151 | 152 | # Mock metadata service 153 | mocker.patch( 154 | "mount_efs.get_region_from_instance_metadata", return_value="us-west-1" 155 | ) 156 | 157 | result = mount_efs.get_target_region(config, options) 158 | assert result == "us-west-1" 159 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdr-codec/README.md: -------------------------------------------------------------------------------- 1 | # Rust XDR library 2 | 3 | [![Build Status](https://travis-ci.org/jsgf/rust-xdr.svg?branch=master)](https://travis-ci.org/jsgf/rust-xdr) 4 | [![Crates.io](https://img.shields.io/crates/v/xdr-codec.svg)]() 5 | [![Coverage Status](https://coveralls.io/repos/github/jsgf/rust-xdr/badge.svg?branch=master)](https://coveralls.io/github/jsgf/rust-xdr?branch=master) 6 | 7 | This crate provides a set of runtime routines to encode and decode 8 | basic XDR types, which can be used with 9 | [xdrgen's](https://github.com/jsgf/rust-xdrgen) automatically 10 | generated code, or with hand-written codecs. 11 | 12 | This crate also implements XDR-RPC record marking in the form of the 13 | `XdrRecordReader` and `XdrRecordWriter` IO filters. 14 | 15 | ## Usage 16 | 17 | The easiest way to use this library is with [xdrgen](https://crates.io/crates/xdrgen), 18 | which takes takes a specification in a `.x` file and generates all the necessary 19 | definitions for you. 20 | 21 | However, you can manually implement the `Pack` and `Unpack` traits for your own 22 | types: 23 | 24 | ``` 25 | struct MyType { 26 | a: u32, 27 | b: Vec, 28 | } 29 | 30 | impl Pack for MyType 31 | where W: Write 32 | { 33 | fn pack(&self, out: &mut W) -> xdr_codec::Result { 34 | let mut sz = 0; 35 | 36 | sz += try!(self.a.pack(out)); 37 | sz += try!(Opaque::borrowed(self.b).pack(out)); 38 | 39 | Ok(sz) 40 | } 41 | } 42 | 43 | impl Unpack for MyType 44 | where R: Read 45 | { 46 | fn unpack(input: &mut In) -> Result<(Self, usize)> { 47 | let mut rsz = 0; 48 | let ret = MyType { 49 | a: { let (v, sz) = try!(Unpack::unpack(input)); rsz += sz; v }, 50 | b: { let (v, sz) = try!(Opaque::unpack(input)); rsz += sz; v.into_owned() }, 51 | }; 52 | 53 | Ok((ret, rsz)) 54 | } 55 | } 56 | ``` 57 | 58 | or alternatively, put the following in src/mytype.x: 59 | 60 | ``` 61 | struct MyType { 62 | unsigned int a; 63 | opaque b<>; 64 | } 65 | ``` 66 | 67 | then add a build.rs to your Cargo.toml: 68 | 69 | ``` 70 | extern crate xdrgen; 71 | 72 | fn main() { 73 | xdrgen::compile("src/mytype.x").expect("xdrgen mytype.x failed"); 74 | } 75 | ``` 76 | 77 | then include the generated code in one of your modules: 78 | ``` 79 | extern crate xdr_codec; 80 | 81 | // ... 82 | 83 | include!(concat!(env!("OUT_DIR"), "/mytype_xdr.rs")); 84 | ``` 85 | 86 | ## Documentation 87 | 88 | Complete documentation is [here](https://docs.rs/xdr-codec/). 89 | 90 | ## Changes in 0.4.2 91 | 92 | Implement standard traits for `char`/`unsigned char` (`i8`/`u8` in Rust). 93 | 94 | Also handle `short`/`unsigned short` as an extension in .x files. They are still 95 | represented in memory as `i32`/`u32`. 96 | 97 | ## Changes in 0.4 98 | 99 | Version 0.4 added the `bytecodec` feature, which implements `Pack` and `Unpack` 100 | for byte types (`i8` and `u8`). This is normally unwanted, since bytes suffer from 101 | massive padding on the wire when used individually, or in an array of bytes (`opaque` 102 | is the preferred way to transport compact byte arrays). However, some protocols 103 | are mis-specified to use padded byte arrays, so `bytecodec` is available for them. 104 | 105 | ## Changes in 0.2 106 | 107 | Versions starting with 0.2 introduced a number of breaking changes: 108 | 109 | * `u8` no longer implements `Pack`/`Unpack` 110 | 111 | XDR doesn't directly support encoding individual bytes; if it did, it would 112 | require each one to be padded out to 4 bytes. xdr-codec 0.1 implemented 113 | `Pack` and `Unpack` for `u8` primarily to allow direct use of a `Vec` 114 | as an XDR `opaque<>`. However, this also allowed direct use of 115 | `u8::pack()` which makes it too easy to accidentally generate a malformed 116 | XDR stream without proper padding. 117 | 118 | In 0.2, u8 no longer implements `Pack` and `Unpack`. Instead, xdr-codec 119 | has a `Opaque<'a>(&'a [u8])` wrapper which does. This allows any `[u8]` 120 | slice to be packed and unpacked. 121 | 122 | It also has a set of helper functions for packing and unpacking both 123 | flexible and fixed-sized opaques, strings and general arrays. These make 124 | it straightforward to manage arrays in a way that is robust. This also allows 125 | xdrgen to generate code for fixed-sized arrays that's not completely unrolled 126 | unpack calls. 127 | 128 | (I'm not entirely happy with the proliferation of functions however, so 129 | I'm thinking about a trait-based approach that is more idiomatic Rust. That 130 | may have to be 0.3.) 131 | 132 | * Extensions to XDR record marking 133 | 134 | I added `XdrRecordReaderIter` which allows iteration over records. Previously 135 | all the records in the stream were flattened into a plain byte stream, which 136 | defeats the purpose of the records. `XdrRecordReader` still implements `Read` 137 | so that's still available, but it also implements `IntoIterator` so you can 138 | iterate records. 139 | 140 | The addition of more unit tests (see below) pointed out some poorly thought 141 | out corner cases, so now record generation and use of the EOR marker is more 142 | consistent. 143 | 144 | * More unit tests, including quickcheck generated ones 145 | 146 | I've increased the number of tests, and added quickcheck generated tests 147 | which cleared up a few corner cases. 148 | 149 | ## License 150 | 151 | Licensed under either of 152 | 153 | * Apache License, Version 2.0, ([LICENSE-APACHE](http://www.apache.org/licenses/LICENSE-2.0)) 154 | * MIT license ([LICENSE-MIT](http://opensource.org/licenses/MIT)) 155 | 156 | at your option. 157 | 158 | ### Contribution 159 | 160 | Unless you explicitly state otherwise, any contribution intentionally submitted 161 | for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any 162 | additional terms or conditions. 163 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_write_tls_tunnel_state_file.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import json 10 | import os 11 | from datetime import datetime 12 | 13 | import mount_efs 14 | 15 | FS_ID = "fs-deadbeef" 16 | PID = 1234 17 | PORT = 54323 18 | COMMAND = ["stunnel", "/some/config/file"] 19 | NETNS = "/proc/1/net/ns" 20 | NETNS_COMMAND = ["nsenter", "--net=" + NETNS] + COMMAND 21 | FILES = ["/tmp/foo", "/tmp/bar"] 22 | DATETIME_FORMAT = "%y%m%d%H%M%SZ" 23 | 24 | 25 | def test_write_tunnel_state_file_netns(tmpdir): 26 | state_file_dir = str(tmpdir) 27 | 28 | mount_point = "/home/user/foo/mount" 29 | 30 | current_time = datetime.utcnow() 31 | cert_creation_time = current_time.strftime(DATETIME_FORMAT) 32 | 33 | cert_details = { 34 | "accessPoint": "fsap-fedcba9876543210", 35 | "certificate": "/tmp/baz", 36 | "privateKey": "/tmp/key.pem", 37 | "mountStateDir": "fs-deadbeef.mount.dir.12345", 38 | "commonName": "fs-deadbeef.efs.us-east-1.amazonaws.com", 39 | "region": "us-east-1", 40 | "fsId": FS_ID, 41 | "certificateCreationTime": cert_creation_time, 42 | "useIam": True, 43 | } 44 | 45 | state_file = mount_efs.write_tunnel_state_file( 46 | FS_ID, 47 | mount_point, 48 | PORT, 49 | PID, 50 | NETNS_COMMAND, 51 | FILES, 52 | state_file_dir, 53 | cert_details, 54 | ) 55 | 56 | assert FS_ID in state_file 57 | assert os.sep not in state_file[state_file.find(FS_ID) :] 58 | 59 | assert os.path.exists(state_file_dir) 60 | 61 | state_file = os.path.join(state_file_dir, state_file) 62 | assert os.path.exists(state_file) 63 | 64 | with open(state_file) as f: 65 | state = json.load(f) 66 | 67 | assert PID == state.get("pid") 68 | assert NETNS_COMMAND == state.get("cmd") 69 | assert FILES == state.get("files") 70 | assert cert_details["commonName"] == state.get("commonName") 71 | assert cert_details["certificate"] == state.get("certificate") 72 | assert cert_details["certificateCreationTime"] == state.get( 73 | "certificateCreationTime" 74 | ) 75 | assert cert_details["mountStateDir"] == state.get("mountStateDir") 76 | assert cert_details["privateKey"] == state.get("privateKey") 77 | assert cert_details["region"] == state.get("region") 78 | assert cert_details["accessPoint"] == state.get("accessPoint") 79 | assert cert_details["fsId"] == state.get("fsId") 80 | assert cert_details["useIam"] == state.get("useIam") 81 | 82 | 83 | def test_write_tunnel_state_file(tmpdir): 84 | state_file_dir = str(tmpdir) 85 | 86 | mount_point = "/home/user/foo/mount" 87 | 88 | current_time = datetime.utcnow() 89 | cert_creation_time = current_time.strftime(DATETIME_FORMAT) 90 | 91 | cert_details = { 92 | "accessPoint": "fsap-fedcba9876543210", 93 | "certificate": "/tmp/baz", 94 | "privateKey": "/tmp/key.pem", 95 | "mountStateDir": "fs-deadbeef.mount.dir.12345", 96 | "commonName": "fs-deadbeef.efs.us-east-1.amazonaws.com", 97 | "region": "us-east-1", 98 | "fsId": FS_ID, 99 | "certificateCreationTime": cert_creation_time, 100 | "useIam": True, 101 | } 102 | 103 | state_file = mount_efs.write_tunnel_state_file( 104 | FS_ID, mount_point, PORT, PID, COMMAND, FILES, state_file_dir, cert_details 105 | ) 106 | 107 | assert FS_ID in state_file 108 | assert os.sep not in state_file[state_file.find(FS_ID) :] 109 | 110 | assert os.path.exists(state_file_dir) 111 | 112 | state_file = os.path.join(state_file_dir, state_file) 113 | assert os.path.exists(state_file) 114 | 115 | with open(state_file) as f: 116 | state = json.load(f) 117 | 118 | assert PID == state.get("pid") 119 | assert COMMAND == state.get("cmd") 120 | assert FILES == state.get("files") 121 | assert cert_details["commonName"] == state.get("commonName") 122 | assert cert_details["certificate"] == state.get("certificate") 123 | assert cert_details["certificateCreationTime"] == state.get( 124 | "certificateCreationTime" 125 | ) 126 | assert cert_details["mountStateDir"] == state.get("mountStateDir") 127 | assert cert_details["privateKey"] == state.get("privateKey") 128 | assert cert_details["region"] == state.get("region") 129 | assert cert_details["accessPoint"] == state.get("accessPoint") 130 | assert cert_details["fsId"] == state.get("fsId") 131 | assert cert_details["useIam"] == state.get("useIam") 132 | 133 | 134 | def test_write_tunnel_state_file_no_cert(tmpdir): 135 | state_file_dir = str(tmpdir) 136 | 137 | mount_point = "/home/user/foo/mount" 138 | 139 | state_file = mount_efs.write_tunnel_state_file( 140 | FS_ID, mount_point, PORT, PID, COMMAND, FILES, state_file_dir 141 | ) 142 | 143 | assert FS_ID in state_file 144 | assert os.sep not in state_file[state_file.find(FS_ID) :] 145 | 146 | assert os.path.exists(state_file_dir) 147 | 148 | state_file = os.path.join(state_file_dir, state_file) 149 | assert os.path.exists(state_file) 150 | 151 | with open(state_file) as f: 152 | state = json.load(f) 153 | 154 | assert PID == state.get("pid") 155 | assert COMMAND == state.get("cmd") 156 | assert FILES == state.get("files") 157 | assert "commonName" not in state 158 | assert "certificate" not in state 159 | assert "certificateCreationTime" not in state 160 | assert "mountStateDir" not in state 161 | assert "privateKey" not in state 162 | assert "region" not in state 163 | assert "accessPoint" not in state 164 | assert "fsId" not in state 165 | assert "useIam" not in state 166 | -------------------------------------------------------------------------------- /src/proxy/src/main.rs: -------------------------------------------------------------------------------- 1 | use crate::config_parser::ProxyConfig; 2 | use crate::connections::{PlainTextPartitionFinder, TlsPartitionFinder}; 3 | use crate::tls::get_tls_config; 4 | use crate::tls::TlsConfig; 5 | use clap::Parser; 6 | use controller::Controller; 7 | use log::{debug, error, info}; 8 | use std::path::Path; 9 | use std::sync::Arc; 10 | use tokio::io::AsyncWriteExt; 11 | use tokio::signal; 12 | use tokio::sync::Mutex; 13 | use tokio_util::sync::CancellationToken; 14 | 15 | mod config_parser; 16 | mod connection_task; 17 | mod connections; 18 | mod controller; 19 | mod efs_rpc; 20 | mod error; 21 | mod logger; 22 | mod proxy; 23 | mod proxy_identifier; 24 | mod proxy_task; 25 | mod rpc; 26 | mod shutdown; 27 | mod status_reporter; 28 | mod test_utils; 29 | mod tls; 30 | 31 | #[allow(clippy::all)] 32 | #[allow(deprecated)] 33 | #[allow(invalid_value)] 34 | #[allow(non_camel_case_types)] 35 | #[allow(unused_assignments)] 36 | mod efs_prot { 37 | include!(concat!(env!("OUT_DIR"), "/efs_prot_xdr.rs")); 38 | } 39 | 40 | #[tokio::main] 41 | async fn main() { 42 | let args = Args::parse(); 43 | 44 | let proxy_config = match ProxyConfig::from_path(Path::new(&args.proxy_config_path)) { 45 | Ok(config) => config, 46 | Err(e) => panic!("Failed to read configuration. {}", e), 47 | }; 48 | 49 | if let Some(_log_file_path) = &proxy_config.output { 50 | logger::init(&proxy_config) 51 | } 52 | 53 | info!("Running with configuration: {:?}", proxy_config); 54 | 55 | let pid_file_path = Path::new(&proxy_config.pid_file_path); 56 | let _ = write_pid_file(pid_file_path).await; 57 | 58 | // This "status reporter" is currently only used in tests 59 | let (_status_requester, status_reporter) = status_reporter::create_status_channel(); 60 | 61 | let sigterm_cancellation_token = CancellationToken::new(); 62 | let mut sigterm_listener = match signal::unix::signal(signal::unix::SignalKind::terminate()) { 63 | Ok(listener) => listener, 64 | Err(e) => panic!("Failed to create SIGTERM listener. {}", e), 65 | }; 66 | 67 | let controller_handle = if args.tls { 68 | let tls_config = match get_tls_config(&proxy_config).await { 69 | Ok(config) => Arc::new(Mutex::new(config)), 70 | Err(e) => panic!("Failed to obtain TLS config:{}", e), 71 | }; 72 | 73 | run_sighup_handler(proxy_config.clone(), tls_config.clone()); 74 | 75 | let controller = Controller::new( 76 | &proxy_config.nested_config.listen_addr, 77 | Arc::new(TlsPartitionFinder::new(tls_config)), 78 | status_reporter, 79 | ) 80 | .await; 81 | tokio::spawn(controller.run(sigterm_cancellation_token.clone())) 82 | } else { 83 | let controller = Controller::new( 84 | &proxy_config.nested_config.listen_addr, 85 | Arc::new(PlainTextPartitionFinder { 86 | mount_target_addr: proxy_config.nested_config.mount_target_addr.clone(), 87 | }), 88 | status_reporter, 89 | ) 90 | .await; 91 | tokio::spawn(controller.run(sigterm_cancellation_token.clone())) 92 | }; 93 | 94 | tokio::select! { 95 | shutdown_reason = controller_handle => error!("Shutting down. {:?}", shutdown_reason), 96 | _ = sigterm_listener.recv() => { 97 | info!("Received SIGTERM"); 98 | sigterm_cancellation_token.cancel(); 99 | }, 100 | } 101 | if pid_file_path.exists() { 102 | match tokio::fs::remove_file(&pid_file_path).await { 103 | Ok(()) => info!("Removed pid file"), 104 | Err(e) => error!("Unable to remove pid_file: {e}"), 105 | } 106 | } 107 | } 108 | 109 | async fn write_pid_file(pid_file_path: &Path) -> Result<(), anyhow::Error> { 110 | let mut pid_file = tokio::fs::OpenOptions::new() 111 | .write(true) 112 | .create(true) 113 | .truncate(true) 114 | .mode(0o644) 115 | .open(pid_file_path) 116 | .await?; 117 | pid_file 118 | .write_all(std::process::id().to_string().as_bytes()) 119 | .await?; 120 | pid_file.write_u8(b'\x0A').await?; 121 | pid_file.flush().await?; 122 | Ok(()) 123 | } 124 | 125 | fn run_sighup_handler(proxy_config: ProxyConfig, tls_config: Arc>) { 126 | tokio::spawn(async move { 127 | let mut sighup_listener = match signal::unix::signal(signal::unix::SignalKind::hangup()) { 128 | Ok(listener) => listener, 129 | Err(e) => panic!("Failed to create SIGHUP listener. {}", e), 130 | }; 131 | 132 | loop { 133 | sighup_listener 134 | .recv() 135 | .await 136 | .expect("SIGHUP listener stream is closed"); 137 | 138 | debug!("Received SIGHUP"); 139 | let mut locked_config = tls_config.lock().await; 140 | match get_tls_config(&proxy_config).await { 141 | Ok(config) => *locked_config = config, 142 | Err(e) => panic!("Failed to acquire TLS config. {}", e), 143 | } 144 | } 145 | }); 146 | } 147 | 148 | #[derive(Parser, Debug, Clone)] 149 | pub struct Args { 150 | pub proxy_config_path: String, 151 | 152 | #[arg(long, default_value_t = false)] 153 | pub tls: bool, 154 | } 155 | 156 | #[cfg(test)] 157 | pub mod tests { 158 | 159 | use super::*; 160 | 161 | #[tokio::test] 162 | async fn test_write_pid_file() -> Result<(), Box> { 163 | let pid_file = tempfile::NamedTempFile::new()?; 164 | let pid_file_path = pid_file.path(); 165 | 166 | write_pid_file(pid_file_path).await?; 167 | 168 | let expected_pid = std::process::id().to_string(); 169 | let read_pid = tokio::fs::read_to_string(pid_file_path).await?; 170 | assert_eq!(expected_pid + "\n", read_pid); 171 | Ok(()) 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/proxy/src/connection_task.rs: -------------------------------------------------------------------------------- 1 | use bytes::BytesMut; 2 | use log::{debug, error, trace}; 3 | use tokio::{ 4 | io::{split, AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf}, 5 | sync::mpsc::{self}, 6 | }; 7 | use tokio_util::sync::CancellationToken; 8 | 9 | use crate::{ 10 | connections::ProxyStream, 11 | rpc::RpcBatch, 12 | shutdown::{ShutdownHandle, ShutdownReason}, 13 | }; 14 | use crate::{ 15 | proxy_task::{ConnectionMessage, BUFFER_SIZE}, 16 | rpc::RpcFragmentParseError, 17 | }; 18 | 19 | pub struct ConnectionTask { 20 | stream: S, 21 | proxy_receiver: mpsc::Receiver, 22 | proxy_sender: mpsc::Sender, 23 | } 24 | 25 | impl ConnectionTask { 26 | pub fn new( 27 | stream: S, 28 | proxy_receiver: mpsc::Receiver, 29 | proxy_sender: mpsc::Sender, 30 | ) -> Self { 31 | Self { 32 | stream, 33 | proxy_receiver, 34 | proxy_sender, 35 | } 36 | } 37 | 38 | pub async fn run(self, shutdown_handle: ShutdownHandle) { 39 | let (r, w) = split(self.stream); 40 | 41 | let shutdown = shutdown_handle.clone(); 42 | 43 | // This CancellationToken facilitates graceful TLS connection closures by ensuring that 44 | // that the ReadHalf is dropped only after the WriteHalf.shutdown() has returned 45 | let connection_cancellation_token = CancellationToken::new(); 46 | 47 | // ConnectionTask Writer receives messages from NFSClient's Reader (ProxyTask reader) and writes them to connection socket 48 | let writer = Self::run_writer( 49 | w, 50 | self.proxy_receiver, 51 | shutdown_handle.clone(), 52 | connection_cancellation_token.clone(), 53 | ); 54 | tokio::spawn(async move { 55 | tokio::select! { 56 | _ = shutdown.cancellation_token.cancelled() => trace!("Cancelled"), 57 | _ = writer => {}, 58 | } 59 | }); 60 | 61 | // ConnectionTask Reader reads messages from NFSServer's socket and sends to NFSClient Writer (ProxyTask writer) 62 | let reader = Self::run_reader(r, self.proxy_sender, shutdown_handle.clone()); 63 | tokio::spawn(async move { 64 | tokio::select! { 65 | _ = connection_cancellation_token.cancelled() => trace!("Cancelled"), 66 | _ = reader => {}, 67 | } 68 | }); 69 | } 70 | 71 | // EFS to Proxy 72 | async fn run_reader( 73 | mut server_read_half: ReadHalf, 74 | sender: mpsc::Sender, 75 | shutdown: ShutdownHandle, 76 | ) { 77 | let reason; 78 | let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); 79 | loop { 80 | match server_read_half.read_buf(&mut buffer).await { 81 | Ok(n_read) => { 82 | if n_read == 0 { 83 | reason = Option::Some(ShutdownReason::NeedsRestart); 84 | break; 85 | } 86 | } 87 | Err(e) => { 88 | debug!("Error reading from server: {:?}", e); 89 | reason = Option::Some(ShutdownReason::NeedsRestart); 90 | break; 91 | } 92 | }; 93 | 94 | match RpcBatch::parse_batch(&mut buffer) { 95 | Ok(Some(batch)) => { 96 | if let Err(e) = sender.send(ConnectionMessage::Response(batch)).await { 97 | debug!("Error sending result back: {:?}", e); 98 | reason = Some(ShutdownReason::UnexpectedError); 99 | break; 100 | } 101 | } 102 | Err(RpcFragmentParseError::InvalidSizeTooSmall) => { 103 | drop(server_read_half); 104 | error!("Server Error: invalid RPC size - size too small"); 105 | reason = Some(ShutdownReason::UnexpectedError); 106 | break; 107 | } 108 | Err(RpcFragmentParseError::SizeLimitExceeded) => { 109 | drop(server_read_half); 110 | error!("Server Error: invalid RPC size - size limit exceeded"); 111 | reason = Some(ShutdownReason::UnexpectedError); 112 | break; 113 | } 114 | Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), 115 | } 116 | 117 | if buffer.capacity() == 0 { 118 | buffer.reserve(BUFFER_SIZE) 119 | } 120 | } 121 | shutdown.exit(reason).await; 122 | } 123 | 124 | // Proxy to EFS 125 | async fn run_writer( 126 | mut server_write_half: WriteHalf, 127 | mut receiver: mpsc::Receiver, 128 | shutdown: ShutdownHandle, 129 | connection_cancellation_token: CancellationToken, 130 | ) { 131 | let mut reason = Option::None; 132 | loop { 133 | let Some(batch) = receiver.recv().await else { 134 | debug!("sender dropped"); 135 | break; 136 | }; 137 | 138 | for b in &batch.rpcs { 139 | match server_write_half.write_all(b).await { 140 | Ok(_) => (), 141 | Err(e) => { 142 | debug!("Error writing to server: {:?}", e); 143 | reason = Option::Some(ShutdownReason::NeedsRestart); 144 | break; 145 | } 146 | }; 147 | } 148 | } 149 | 150 | tokio::spawn(async move { 151 | match server_write_half.shutdown().await { 152 | Ok(_) => (), 153 | Err(e) => debug!("Failed to gracefully shutdown connection: {}", e), 154 | }; 155 | connection_cancellation_token.cancel(); 156 | }); 157 | shutdown.exit(reason).await; 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_get_nfs_mount_options.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | from unittest.mock import MagicMock 8 | 9 | try: 10 | import ConfigParser 11 | except ImportError: 12 | from configparser import ConfigParser 13 | 14 | import pytest 15 | 16 | import mount_efs 17 | 18 | DEFAULT_OPTIONS = {"tlsport": "3030"} 19 | 20 | 21 | def _get_config(ocsp_enabled=False): 22 | try: 23 | config = ConfigParser.SafeConfigParser() 24 | except AttributeError: 25 | config = ConfigParser() 26 | 27 | mount_nfs_command_retry_count = 4 28 | mount_nfs_command_retry_timeout = 10 29 | mount_nfs_command_retry = "false" 30 | config.add_section(mount_efs.CONFIG_SECTION) 31 | config.set( 32 | mount_efs.CONFIG_SECTION, "retry_nfs_mount_command", mount_nfs_command_retry 33 | ) 34 | config.set( 35 | mount_efs.CONFIG_SECTION, 36 | "retry_nfs_mount_command_count", 37 | str(mount_nfs_command_retry_count), 38 | ) 39 | config.set( 40 | mount_efs.CONFIG_SECTION, 41 | "retry_nfs_mount_command_timeout_sec", 42 | str(mount_nfs_command_retry_timeout), 43 | ) 44 | if ocsp_enabled: 45 | config.set( 46 | mount_efs.CONFIG_SECTION, 47 | "stunnel_check_cert_validity", 48 | "true", 49 | ) 50 | return config 51 | 52 | 53 | def _mock_popen(mocker, returncode=0, stdout="stdout", stderr="stderr"): 54 | popen_mock = MagicMock() 55 | popen_mock.communicate.return_value = ( 56 | stdout, 57 | stderr, 58 | ) 59 | popen_mock.returncode = returncode 60 | 61 | return mocker.patch("subprocess.Popen", return_value=popen_mock) 62 | 63 | 64 | def test_get_default_nfs_mount_options(): 65 | nfs_opts = mount_efs.get_nfs_mount_options(dict(DEFAULT_OPTIONS), _get_config()) 66 | 67 | assert "nfsvers=4.1" in nfs_opts 68 | assert "rsize=1048576" in nfs_opts 69 | assert "wsize=1048576" in nfs_opts 70 | assert "hard" in nfs_opts 71 | assert "timeo=600" in nfs_opts 72 | assert "retrans=2" in nfs_opts 73 | assert "port=3030" in nfs_opts 74 | 75 | 76 | def test_override_nfs_version(): 77 | options = dict(DEFAULT_OPTIONS) 78 | options["nfsvers"] = 4.0 79 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 80 | 81 | assert "nfsvers=4.0" in nfs_opts 82 | assert "nfsvers=4.1" not in nfs_opts 83 | 84 | 85 | def test_override_nfs_version_alternate_option(): 86 | options = dict(DEFAULT_OPTIONS) 87 | options["vers"] = 4.0 88 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 89 | 90 | assert "vers=4.0" in nfs_opts 91 | assert "nfsvers=4.0" not in nfs_opts 92 | assert "nfsvers=4.1" not in nfs_opts 93 | 94 | 95 | def test_override_rsize(): 96 | options = dict(DEFAULT_OPTIONS) 97 | options["rsize"] = 1 98 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 99 | 100 | assert "rsize=1" in nfs_opts 101 | assert "rsize=1048576" not in nfs_opts 102 | 103 | 104 | def test_override_wsize(): 105 | options = dict(DEFAULT_OPTIONS) 106 | options["wsize"] = 1 107 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 108 | 109 | assert "wsize=1" in nfs_opts 110 | assert "wsize=1048576" not in nfs_opts 111 | 112 | 113 | def test_override_recovery_soft(): 114 | options = dict(DEFAULT_OPTIONS) 115 | options["soft"] = None 116 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 117 | 118 | assert "soft" in nfs_opts 119 | assert "soft=" not in nfs_opts 120 | assert "hard" not in nfs_opts 121 | 122 | 123 | def test_override_timeo(): 124 | options = dict(DEFAULT_OPTIONS) 125 | options["timeo"] = 1 126 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 127 | 128 | assert "timeo=1" in nfs_opts 129 | assert "timeo=600" not in nfs_opts 130 | 131 | 132 | def test_override_retrans(): 133 | options = dict(DEFAULT_OPTIONS) 134 | options["retrans"] = 1 135 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 136 | 137 | assert "retrans=1" in nfs_opts 138 | assert "retrans=2" not in nfs_opts 139 | 140 | 141 | def test_tlsport(): 142 | options = dict(DEFAULT_OPTIONS) 143 | options["tls"] = None 144 | nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) 145 | 146 | assert "port=3030" in nfs_opts 147 | assert "tls" not in nfs_opts 148 | 149 | 150 | def test_get_default_nfs_mount_options_macos(mocker): 151 | mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) 152 | nfs_opts = mount_efs.get_nfs_mount_options(dict(DEFAULT_OPTIONS), _get_config()) 153 | 154 | assert "nfsvers=4.0" in nfs_opts 155 | assert "rsize=1048576" in nfs_opts 156 | assert "wsize=1048576" in nfs_opts 157 | assert "hard" in nfs_opts 158 | assert "timeo=600" in nfs_opts 159 | assert "retrans=2" in nfs_opts 160 | assert "mountport=2049" in nfs_opts 161 | assert not "port=3030" in nfs_opts 162 | 163 | 164 | def _test_unsupported_mount_options_macos(mocker, capsys, options={}): 165 | mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) 166 | _mock_popen(mocker, stdout="nfs") 167 | with pytest.raises(SystemExit) as ex: 168 | mount_efs.get_nfs_mount_options(options, _get_config()) 169 | 170 | assert 0 != ex.value.code 171 | 172 | out, err = capsys.readouterr() 173 | assert "NFSv4.1 is not supported on MacOS" in err 174 | 175 | 176 | def test_unsupported_nfsvers_mount_options_macos(mocker, capsys): 177 | _test_unsupported_mount_options_macos(mocker, capsys, {"nfsvers": "4.1"}) 178 | 179 | 180 | def test_unsupported_vers_mount_options_macos(mocker, capsys): 181 | _test_unsupported_mount_options_macos(mocker, capsys, {"vers": "4.1"}) 182 | 183 | 184 | def test_unsupported_minorversion_mount_options_macos(mocker, capsys): 185 | _test_unsupported_mount_options_macos(mocker, capsys, {"minorversion": 1}) 186 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_choose_tls_port.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | import logging 7 | import random 8 | import socket 9 | import sys 10 | import tempfile 11 | import unittest 12 | from unittest.mock import MagicMock 13 | 14 | import pytest 15 | 16 | import mount_efs 17 | 18 | from .. import utils 19 | 20 | try: 21 | import ConfigParser 22 | except ImportError: 23 | from configparser import ConfigParser 24 | 25 | DEFAULT_TLS_PORT_RANGE_LOW = 20049 26 | DEFAULT_TLS_PORT_RANGE_HIGH = 21049 27 | DEFAULT_TLS_PORT = random.randrange( 28 | DEFAULT_TLS_PORT_RANGE_LOW, DEFAULT_TLS_PORT_RANGE_HIGH 29 | ) 30 | 31 | 32 | def _get_config(): 33 | try: 34 | config = ConfigParser.SafeConfigParser() 35 | except AttributeError: 36 | config = ConfigParser() 37 | config.add_section(mount_efs.CONFIG_SECTION) 38 | config.set( 39 | mount_efs.CONFIG_SECTION, 40 | "port_range_lower_bound", 41 | str(DEFAULT_TLS_PORT_RANGE_LOW), 42 | ) 43 | config.set( 44 | mount_efs.CONFIG_SECTION, 45 | "port_range_upper_bound", 46 | str(DEFAULT_TLS_PORT_RANGE_HIGH), 47 | ) 48 | return config 49 | 50 | 51 | def test_choose_tls_port_first_try(mocker, tmpdir): 52 | sock_mock = MagicMock() 53 | sock_mock.getsockname.return_value = ("local_host", DEFAULT_TLS_PORT) 54 | mocker.patch("socket.socket", return_value=sock_mock) 55 | options = {} 56 | 57 | tls_port_sock = mount_efs.choose_tls_port_and_get_bind_sock( 58 | _get_config(), options, str(tmpdir) 59 | ) 60 | tls_port = mount_efs.get_tls_port_from_sock(tls_port_sock) 61 | assert DEFAULT_TLS_PORT_RANGE_LOW <= tls_port <= DEFAULT_TLS_PORT_RANGE_HIGH 62 | 63 | 64 | def test_choose_tls_port_second_try(mocker, tmpdir): 65 | bad_sock = MagicMock() 66 | bad_sock.bind.side_effect = [socket.error, None] 67 | bad_sock.getsockname.return_value = ("local_host", DEFAULT_TLS_PORT) 68 | options = {} 69 | 70 | mocker.patch("socket.socket", return_value=bad_sock) 71 | 72 | tls_port_sock = mount_efs.choose_tls_port_and_get_bind_sock( 73 | _get_config(), options, str(tmpdir) 74 | ) 75 | tls_port = mount_efs.get_tls_port_from_sock(tls_port_sock) 76 | 77 | assert DEFAULT_TLS_PORT_RANGE_LOW <= tls_port <= DEFAULT_TLS_PORT_RANGE_HIGH 78 | assert 2 == bad_sock.bind.call_count 79 | assert 1 == bad_sock.getsockname.call_count 80 | 81 | 82 | @unittest.skipIf(sys.version_info < (3, 6), reason="requires python3.6") 83 | def test_choose_tls_port_collision(mocker, tmpdir, caplog): 84 | """Ensure we don't choose a port that is pending mount""" 85 | sock = MagicMock() 86 | mocker.patch("socket.socket", return_value=sock) 87 | mocker.patch( 88 | "random.shuffle", 89 | return_value=range(DEFAULT_TLS_PORT_RANGE_LOW, DEFAULT_TLS_PORT_RANGE_HIGH), 90 | ) 91 | 92 | port_suffix = ".%s" % str(DEFAULT_TLS_PORT_RANGE_LOW) 93 | temp_state_file = tempfile.NamedTemporaryFile( 94 | suffix=port_suffix, prefix="~", dir=tmpdir 95 | ) 96 | 97 | options = {} 98 | with caplog.at_level(logging.DEBUG): 99 | mount_efs.choose_tls_port_and_get_bind_sock(_get_config(), options, tmpdir) 100 | 101 | temp_state_file.close() 102 | sock.bind.assert_called_once_with(("localhost", DEFAULT_TLS_PORT_RANGE_LOW + 1)) 103 | assert "Skip binding TLS port" in caplog.text 104 | 105 | 106 | def test_choose_tls_port_never_succeeds(mocker, tmpdir, capsys): 107 | bad_sock = MagicMock() 108 | bad_sock.bind.side_effect = socket.error() 109 | options = {} 110 | 111 | mocker.patch("socket.socket", return_value=bad_sock) 112 | 113 | with pytest.raises(SystemExit) as ex: 114 | mount_efs.choose_tls_port_and_get_bind_sock(_get_config(), options, str(tmpdir)) 115 | 116 | assert 0 != ex.value.code 117 | 118 | out, err = capsys.readouterr() 119 | assert "Failed to locate an available port" in err 120 | 121 | assert ( 122 | DEFAULT_TLS_PORT_RANGE_HIGH - DEFAULT_TLS_PORT_RANGE_LOW 123 | == bad_sock.bind.call_count 124 | ) 125 | 126 | 127 | def test_choose_tls_port_option_specified(mocker, tmpdir): 128 | sock_mock = MagicMock() 129 | sock_mock.getsockname.return_value = ("local_host", DEFAULT_TLS_PORT) 130 | mocker.patch("socket.socket", return_value=sock_mock) 131 | options = {"tlsport": DEFAULT_TLS_PORT} 132 | 133 | tls_port_sock = mount_efs.choose_tls_port_and_get_bind_sock( 134 | _get_config(), options, str(tmpdir) 135 | ) 136 | tls_port = mount_efs.get_tls_port_from_sock(tls_port_sock) 137 | 138 | assert DEFAULT_TLS_PORT == tls_port 139 | 140 | 141 | def test_choose_tls_port_option_specified_unavailable(mocker, tmpdir, capsys): 142 | bad_sock = MagicMock() 143 | bad_sock.bind.side_effect = socket.error() 144 | options = {"tlsport": 1000} 145 | 146 | mocker.patch("socket.socket", return_value=bad_sock) 147 | 148 | with pytest.raises(SystemExit) as ex: 149 | mount_efs.choose_tls_port_and_get_bind_sock(_get_config(), options, str(tmpdir)) 150 | 151 | assert 0 != ex.value.code 152 | 153 | out, err = capsys.readouterr() 154 | assert "Specified port [1000] is unavailable" in err 155 | 156 | assert 1 == bad_sock.bind.call_count 157 | 158 | 159 | def test_choose_tls_port_under_netns(mocker, tmpdir): 160 | mocker.patch("builtins.open") 161 | setns_mock = mocker.patch("mount_efs.setns", return_value=(None, None)) 162 | mocker.patch("socket.socket", return_value=MagicMock()) 163 | options = {"netns": "/proc/1000/ns/net"} 164 | 165 | mount_efs.choose_tls_port_and_get_bind_sock(_get_config(), options, str(tmpdir)) 166 | utils.assert_called(setns_mock) 167 | 168 | 169 | def test_verify_tls_port(mocker): 170 | sock = MagicMock() 171 | sock.connect.side_effect = [ConnectionRefusedError, None] 172 | mocker.patch("socket.socket", return_value=sock) 173 | result = mount_efs.verify_tlsport_can_be_connected(1000) 174 | assert result is False 175 | result = mount_efs.verify_tlsport_can_be_connected(1000) 176 | assert result is True 177 | assert 2 == sock.connect.call_count 178 | -------------------------------------------------------------------------------- /src/proxy/rust-xdr/xdrgen/src/spec/test.rs: -------------------------------------------------------------------------------- 1 | use super::super::generate; 2 | use super::specification; 3 | use std::io::Cursor; 4 | 5 | #[test] 6 | fn typedef_void() { 7 | let s = specification( 8 | r#" 9 | typedef void; /* syntactically defined, semantically meaningless */ 10 | "#, 11 | ); 12 | 13 | println!("spec {:?}", s); 14 | assert!(s.is_err()) 15 | } 16 | 17 | #[test] 18 | fn kwishnames() { 19 | let kws = vec![ 20 | "bool", 21 | "case", 22 | "const", 23 | "default", 24 | "double", 25 | "enum", 26 | "float", 27 | "hyper", 28 | "int", 29 | "opaque", 30 | "quadruple", 31 | "string", 32 | "struct", 33 | "switch", 34 | "typedef", 35 | "union", 36 | "unsigned", 37 | "void", 38 | ]; 39 | let specs = vec![ 40 | "const {}x = 1;", 41 | "struct {}x { int i; };", 42 | "struct foo { int {}x; };", 43 | "typedef int {}x;", 44 | "union {}x switch (int x) { case 1: void; };", 45 | "union x switch (int {}x) { case 1: void; };", 46 | "union x switch (int y) { case 1: int {}x; };", 47 | ]; 48 | 49 | for sp in &specs { 50 | for kw in &kws { 51 | let spec = sp.replace("{}", kw); 52 | let s = specification(&spec); 53 | println!("spec {} => {:?}", spec, s); 54 | assert!(s.is_ok()) 55 | } 56 | } 57 | } 58 | 59 | #[test] 60 | fn kwnames() { 61 | let kws = vec![ 62 | "bool", 63 | "case", 64 | "const", 65 | "default", 66 | "double", 67 | "enum", 68 | "float", 69 | "hyper", 70 | "int", 71 | "opaque", 72 | "quadruple", 73 | "string", 74 | "struct", 75 | "switch", 76 | "typedef", 77 | "union", 78 | "unsigned", 79 | "void", 80 | ]; 81 | let specs = vec![ 82 | "const {} = 1;", 83 | "struct {} { int i; };", 84 | "struct foo { int {}; };", 85 | "typedef int {};", 86 | "union {} switch (int x) { case 1: void; };", 87 | "union x switch (int {}) { case 1: void; };", 88 | "union x switch (int y) { case 1: int {}; };", 89 | ]; 90 | 91 | for sp in &specs { 92 | for kw in &kws { 93 | let spec = sp.replace("{}", kw); 94 | let s = specification(&spec); 95 | println!("spec {} => {:?}", spec, s); 96 | assert!(s.is_err()) 97 | } 98 | } 99 | } 100 | 101 | #[test] 102 | fn inline_struct() { 103 | let spec = r#" 104 | struct thing { 105 | struct { int a; int b; } thing; 106 | }; 107 | "#; 108 | let s = specification(spec); 109 | 110 | println!("spec {:?}", s); 111 | assert!(s.is_ok()); 112 | 113 | let g = generate("", Cursor::new(spec.as_bytes()), Vec::new()); 114 | assert!(g.is_err()); 115 | } 116 | 117 | #[test] 118 | fn inline_union() { 119 | let spec = r#" 120 | struct thing { 121 | union switch(int x) { case 0: int a; case 1: int b; } thing; 122 | }; 123 | "#; 124 | let s = specification(spec); 125 | 126 | println!("spec {:?}", s); 127 | assert!(s.is_ok()); 128 | 129 | let g = generate("", Cursor::new(spec.as_bytes()), Vec::new()); 130 | assert!(g.is_err()); 131 | } 132 | 133 | #[test] 134 | fn case_type() { 135 | let specs = vec![ 136 | "enum Foo { A, B, C }; union Bar switch (Foo x) { case A: void; case B: void; case C: void; };", 137 | "union Bar switch (int x) { case 1: void; case 2: void; case 3: void; };", 138 | ]; 139 | 140 | for sp in specs { 141 | let s = specification(sp); 142 | println!("spec sp \"{}\" => {:?}", sp, s); 143 | assert!(s.is_ok()); 144 | 145 | let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); 146 | assert!(g.is_ok()); 147 | } 148 | } 149 | 150 | #[test] 151 | fn case_type_mismatch() { 152 | let specs = vec![ 153 | "enum Foo { A, B, C}; union Bar switch (Foo x) { case 1: void; case 2: void; case 3: void; };", 154 | "enum Foo { A, B, C}; union Bar switch (int x) { case A: void; case B: void; case C: void; };", 155 | ]; 156 | 157 | for sp in specs { 158 | let s = specification(sp); 159 | println!("spec sp \"{}\" => {:?}", sp, s); 160 | assert!(s.is_ok()); 161 | 162 | let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); 163 | assert!(g.is_err()); 164 | } 165 | } 166 | 167 | #[test] 168 | fn constants() { 169 | let specs = vec![ 170 | "const A = 0;", 171 | "const A = 0x0;", 172 | "const A = 00;", 173 | "const A = -0;", 174 | "const A = 0x123;", 175 | "const A = 0123;", 176 | "const A = -0123;", 177 | "const A = 123;", 178 | "const A = -123;", 179 | ]; 180 | 181 | for sp in specs { 182 | let s = specification(sp); 183 | println!("spec sp \"{}\" => {:?}", sp, s); 184 | assert!(s.is_ok()); 185 | 186 | let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); 187 | assert!(g.is_ok()); 188 | } 189 | } 190 | 191 | #[test] 192 | fn union_simple() { 193 | let s = specification( 194 | r#" 195 | union foo switch (int x) { 196 | case 0: 197 | int val; 198 | }; 199 | "#, 200 | ); 201 | println!("spec {:?}", s); 202 | assert!(s.is_ok()) 203 | } 204 | 205 | #[test] 206 | fn union_default() { 207 | let s = specification( 208 | r#" 209 | union foo switch (int x) { 210 | case 0: 211 | int val; 212 | default: 213 | void; 214 | }; 215 | "#, 216 | ); 217 | println!("spec {:?}", s); 218 | assert!(s.is_ok()) 219 | } 220 | 221 | #[test] 222 | fn union_default_nonempty() { 223 | let s = specification( 224 | r#" 225 | union foo switch (int x) { 226 | case 0: 227 | int val; 228 | default: 229 | bool bye; 230 | }; 231 | "#, 232 | ); 233 | println!("spec {:?}", s); 234 | assert!(s.is_ok()) 235 | } 236 | 237 | #[test] 238 | fn fallthrough_case() { 239 | let s = specification( 240 | r#" 241 | union foo switch (int x) { 242 | case 0: 243 | case 1: 244 | int val; 245 | case 2: 246 | void; 247 | }; 248 | "#, 249 | ); 250 | println!("spec {:?}", s); 251 | assert!(s.is_ok()) 252 | } 253 | -------------------------------------------------------------------------------- /src/proxy/src/test_utils.rs: -------------------------------------------------------------------------------- 1 | // Testing utility used for both unit and integration tests. 2 | // 3 | 4 | // Using #[allow(dead_code)] is a common and acceptable practice for test utility functions. 5 | #![allow(dead_code)] 6 | 7 | use crate::{ 8 | config_parser::ProxyConfig, 9 | efs_prot::{self, BindClientResponse, BindResponse, ScaleUpConfig}, 10 | efs_rpc::{parse_bind_client_to_partition_response, EFS_PROGRAM_NUMBER, EFS_PROGRAM_VERSION}, 11 | error::RpcError, 12 | proxy_identifier::ProxyIdentifier, 13 | tls::{create_config_builder, InsecureAcceptAllCertificatesHandler, TlsConfig}, 14 | }; 15 | use anyhow::Result; 16 | use rand::{Rng, RngCore}; 17 | use s2n_tls::config::Config; 18 | use std::{io::Cursor, path::Path}; 19 | use tokio::net::TcpListener; 20 | 21 | // Proxy Configuration testing utils 22 | // 23 | 24 | pub static TEST_CONFIG_PATH: &str = "tests/certs/test_config.ini"; 25 | const XID: u32 = 1; 26 | 27 | pub fn get_test_config() -> ProxyConfig { 28 | ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") 29 | } 30 | 31 | pub async fn get_client_config() -> Result { 32 | let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; 33 | let builder = create_config_builder(&tls_config); 34 | 35 | let config = builder.build()?; 36 | Ok(config) 37 | } 38 | 39 | pub async fn get_server_config() -> Result { 40 | let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; 41 | let mut builder = create_config_builder(&tls_config); 42 | 43 | // Accept all client certificates 44 | builder.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?; 45 | 46 | let config = builder.build()?; 47 | Ok(config) 48 | } 49 | 50 | pub async fn find_available_port() -> (TcpListener, u16) { 51 | for port in 10000..15000 { 52 | match TcpListener::bind(("127.0.0.1", port)).await { 53 | Ok(v) => { 54 | return (v, port); 55 | } 56 | Err(_) => continue, 57 | } 58 | } 59 | panic!("Failed to find port"); 60 | } 61 | 62 | /// generate_rpc_msg_fragments: Generates message fragments for tests 63 | /// 64 | /// This function generates a set of message fragments from random data. The fragments are constructed 65 | /// in a way that they can be later assembled into the full long message data 66 | /// function. 67 | /// 68 | /// # Arguments 69 | /// * `size` - The total size of the message. 70 | /// * `num_fragments` - The number of fragments to generate. 71 | /// 72 | pub fn generate_rpc_msg_fragments(size: usize, num_fragments: usize) -> (bytes::BytesMut, Vec) { 73 | let mut rng = rand::thread_rng(); 74 | let data: Vec = (0..size).map(|_| rng.gen()).collect(); 75 | 76 | let fragment_data_size = data.len() / num_fragments; 77 | 78 | let mut data_buffer = bytes::BytesMut::new(); 79 | for i in 0..num_fragments { 80 | let start_idx = i * fragment_data_size; 81 | let end_idx = std::cmp::min(size, start_idx + fragment_data_size); 82 | let fragment_data = &data[start_idx..end_idx]; 83 | 84 | let mut header = (end_idx - start_idx) as u32; 85 | if end_idx == size { 86 | header |= 1 << 31; 87 | } 88 | 89 | data_buffer.extend_from_slice(&header.to_be_bytes()); 90 | data_buffer.extend_from_slice(fragment_data); 91 | } 92 | assert_eq!(data_buffer.len(), (num_fragments * 4) + data.len()); 93 | 94 | (data_buffer, data) 95 | } 96 | 97 | pub fn generate_partition_id() -> efs_prot::PartitionId { 98 | let mut bytes = [0u8; efs_prot::PARTITION_ID_LENGTH as usize]; 99 | rand::thread_rng().fill_bytes(&mut bytes); 100 | efs_prot::PartitionId(bytes) 101 | } 102 | 103 | pub fn parse_bind_client_to_partition_request( 104 | request: &onc_rpc::RpcMessage<&[u8], &[u8]>, 105 | ) -> Result { 106 | let call_body = request.call_body().expect("not a call rpc"); 107 | 108 | if EFS_PROGRAM_NUMBER != call_body.program() 109 | || EFS_PROGRAM_VERSION != call_body.program_version() 110 | { 111 | return Err(RpcError::GarbageArgs); 112 | } 113 | 114 | let mut payload = Cursor::new(call_body.payload()); 115 | let raw_proxy_id = xdr_codec::unpack::<_, efs_prot::ProxyIdentifier>(&mut payload)?; 116 | 117 | Ok(ProxyIdentifier { 118 | uuid: uuid::Builder::from_bytes( 119 | raw_proxy_id 120 | .identifier 121 | .try_into() 122 | .expect("Failed not convert vec to sized array"), 123 | ) 124 | .into_uuid(), 125 | incarnation: i64::from_be_bytes( 126 | raw_proxy_id 127 | .incarnation 128 | .try_into() 129 | .expect("Failed to convert vec to sized array"), 130 | ), 131 | }) 132 | } 133 | 134 | pub fn create_bind_client_to_partition_response( 135 | xid: u32, 136 | bind_response: BindResponse, 137 | scale_up_config: ScaleUpConfig, 138 | ) -> Result, RpcError> { 139 | let mut payload_buf = Vec::new(); 140 | 141 | let response = BindClientResponse { 142 | bind_response, 143 | scale_up_config, 144 | }; 145 | xdr_codec::pack(&response, &mut payload_buf)?; 146 | 147 | create_bind_client_to_partition_response_from_accepted_status( 148 | xid, 149 | onc_rpc::AcceptedStatus::Success(payload_buf), 150 | ) 151 | } 152 | 153 | pub fn create_bind_client_to_partition_response_from_accepted_status( 154 | xid: u32, 155 | accepted_status: onc_rpc::AcceptedStatus>, 156 | ) -> Result, RpcError> { 157 | let reply_body = onc_rpc::ReplyBody::Accepted(onc_rpc::AcceptedReply::new( 158 | onc_rpc::auth::AuthFlavor::AuthNone::>(None), 159 | accepted_status, 160 | )); 161 | 162 | onc_rpc::RpcMessage::new(xid, onc_rpc::MessageType::Reply(reply_body)) 163 | .serialise() 164 | .map_err(|e| e.into()) 165 | } 166 | 167 | pub fn generate_parse_bind_client_to_partition_response_result( 168 | accepted_status: onc_rpc::AcceptedStatus>, 169 | ) -> Result { 170 | let response = 171 | create_bind_client_to_partition_response_from_accepted_status(XID, accepted_status)?; 172 | let deserialized = onc_rpc::RpcMessage::try_from(response.as_slice())?; 173 | parse_bind_client_to_partition_response(&deserialized) 174 | } 175 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_get_instance_id.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import json 8 | import socket 9 | 10 | import pytest 11 | 12 | import mount_efs 13 | 14 | from .. import utils 15 | 16 | try: 17 | import ConfigParser 18 | except ImportError: 19 | from configparser import ConfigParser 20 | 21 | try: 22 | from urllib2 import HTTPError, URLError 23 | except ImportError: 24 | from urllib.error import HTTPError, URLError 25 | 26 | INSTANCE_ID = "i-deadbeefdeadbeef0" 27 | INSTANCE_DATA = { 28 | "devpayProductCodes": None, 29 | "privateIp": "192.168.1.1", 30 | "availabilityZone": "us-east-1a", 31 | "version": "2010-08-31", 32 | "instanceId": INSTANCE_ID, 33 | "billingProducts": None, 34 | "pendingTime": "2017-06-20T18:32:00Z", 35 | "instanceType": "m3.xlarge", 36 | "accountId": "123412341234", 37 | "architecture": "x86_64", 38 | "kernelId": None, 39 | "ramdiskId": None, 40 | "imageId": "ami-deadbeef", 41 | "region": "us-east-1", 42 | } 43 | INSTANCE_DOCUMENT = json.dumps(INSTANCE_DATA) 44 | 45 | 46 | @pytest.fixture(autouse=True) 47 | def setup(mocker): 48 | mount_efs.INSTANCE_IDENTITY = None 49 | 50 | 51 | class MockHeaders(object): 52 | def __init__(self, content_charset=None): 53 | self.content_charset = content_charset 54 | 55 | def get_content_charset(self): 56 | return self.content_charset 57 | 58 | 59 | class MockUrlLibResponse(object): 60 | def __init__(self, code=200, data=INSTANCE_DOCUMENT, headers=MockHeaders()): 61 | self.code = code 62 | self.data = data 63 | self.headers = headers 64 | 65 | def getcode(self): 66 | return self.code 67 | 68 | def read(self): 69 | return self.data 70 | 71 | 72 | def get_config(): 73 | try: 74 | config = ConfigParser.SafeConfigParser() 75 | except AttributeError: 76 | config = ConfigParser() 77 | config.add_section(mount_efs.CONFIG_SECTION) 78 | return config 79 | 80 | 81 | def test_get_instance_id_helper(): 82 | return mount_efs.get_instance_identity_info_from_instance_metadata( 83 | get_config(), "instanceId" 84 | ) 85 | 86 | 87 | def test_get_instance_id_with_token(mocker): 88 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value="ABCDEFG==") 89 | mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse()) 90 | assert INSTANCE_ID == test_get_instance_id_helper() 91 | 92 | 93 | def test_get_instance_id_without_token(mocker): 94 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) 95 | mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse()) 96 | assert INSTANCE_ID == test_get_instance_id_helper() 97 | 98 | 99 | # Reproduce https://github.com/aws/efs-utils/issues/46 100 | def test_get_instance_id_token_fetch_time_out(mocker): 101 | # get_aws_ec2_metadata_token timeout, fallback to call without session token 102 | side_effect = [ 103 | socket.timeout 104 | for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) 105 | ] 106 | side_effect.append(MockUrlLibResponse()) 107 | mocker.patch("mount_efs.urlopen", side_effect=side_effect) 108 | assert INSTANCE_ID == test_get_instance_id_helper() 109 | 110 | 111 | def test_get_instance_id_py3_no_charset(mocker): 112 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) 113 | mocker.patch( 114 | "mount_efs.urlopen", 115 | return_value=MockUrlLibResponse(data=bytearray(INSTANCE_DOCUMENT, "us-ascii")), 116 | ) 117 | assert INSTANCE_ID == test_get_instance_id_helper() 118 | 119 | 120 | def test_get_instance_id_py3_utf8_charset(mocker): 121 | charset = "utf-8" 122 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) 123 | mocker.patch( 124 | "mount_efs.urlopen", 125 | return_value=MockUrlLibResponse(data=bytearray(INSTANCE_DOCUMENT, charset)), 126 | headers=MockHeaders(content_charset=charset), 127 | ) 128 | assert INSTANCE_ID == test_get_instance_id_helper() 129 | 130 | 131 | def test_get_instance_id_config_metadata_unavailable(mocker): 132 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) 133 | mocker.patch("mount_efs.urlopen", side_effect=URLError("test error")) 134 | instance_id = test_get_instance_id_helper() 135 | assert instance_id == None 136 | 137 | 138 | def _test_get_instance_id_error(mocker, response=None, error=None): 139 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) 140 | if (response and error) or (not response and not error): 141 | raise ValueError("Invalid arguments") 142 | elif response: 143 | mocker.patch("mount_efs.urlopen", return_value=response) 144 | elif error: 145 | mocker.patch("mount_efs.urlopen", side_effect=error) 146 | 147 | instance_id = test_get_instance_id_helper() 148 | assert instance_id == None 149 | 150 | 151 | def test_get_instance_id_bad_response(mocker): 152 | _test_get_instance_id_error( 153 | mocker, error=HTTPError("url", 400, "Bad Request Error", None, None) 154 | ) 155 | 156 | 157 | def test_get_instance_id_error_response(mocker): 158 | _test_get_instance_id_error(mocker, error=URLError("test error")) 159 | 160 | 161 | def test_get_instance_id_bad_json(mocker): 162 | _test_get_instance_id_error(mocker, response=MockUrlLibResponse(data="not json")) 163 | 164 | 165 | def test_get_instance_id_missing_instance_id(mocker): 166 | _test_get_instance_id_error( 167 | mocker, 168 | response=MockUrlLibResponse(data=json.dumps({"accountId": "123412341234"})), 169 | ) 170 | 171 | 172 | def test_get_instance_id_via_cached_instance_identity(mocker): 173 | mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value="ABCDEFG==") 174 | url_request_helper_mock_1 = mocker.patch( 175 | "mount_efs.urlopen", return_value=MockUrlLibResponse() 176 | ) 177 | assert mount_efs.INSTANCE_IDENTITY == None 178 | assert INSTANCE_ID == test_get_instance_id_helper() 179 | utils.assert_called_n_times(url_request_helper_mock_1, 1) 180 | 181 | # Verify the global INSTANCE_IDENTITY is cached with previous metadata api call result 182 | assert mount_efs.INSTANCE_IDENTITY == INSTANCE_DATA 183 | url_request_helper_mock_2 = mocker.patch("mount_efs.urlopen") 184 | assert INSTANCE_ID == test_get_instance_id_helper() 185 | # Verify there is no second api call when INSTANCE_IDENTITY is present 186 | utils.assert_not_called(url_request_helper_mock_2) 187 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_mount_with_proxy.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | from unittest.mock import MagicMock 3 | 4 | import pytest 5 | 6 | import mount_efs 7 | 8 | from .. import common, utils 9 | 10 | try: 11 | import ConfigParser 12 | except ImportError: 13 | from configparser import ConfigParser 14 | 15 | try: 16 | import ConfigParser 17 | except ImportError: 18 | from configparser import ConfigParser 19 | 20 | DNS_NAME = "fs-deadbeef.efs.us-east-1.amazonaws.com" 21 | FS_ID = "fs-deadbeef" 22 | INIT_SYSTEM = "upstart" 23 | FALLBACK_IP_ADDRESS = "192.0.0.1" 24 | MOUNT_POINT = "/mnt" 25 | PATH = "/" 26 | 27 | DEFAULT_OPTIONS = { 28 | "nfsvers": 4.1, 29 | "rsize": 1048576, 30 | "wsize": 1048576, 31 | "hard": None, 32 | "timeo": 600, 33 | "retrans": 2, 34 | "tlsport": 3049, 35 | } 36 | 37 | # indices of different arguments to the NFS call 38 | NFS_BIN_ARG_IDX = 0 39 | NFS_MOUNT_PATH_IDX = 1 40 | NFS_MOUNT_POINT_IDX = 2 41 | NFS_OPTION_FLAG_IDX = 3 42 | NFS_OPTIONS_IDX = 4 43 | 44 | # indices of different arguments to the NFS call to certain network namespace 45 | NETNS_NSENTER_ARG_IDX = 0 46 | NETNS_PATH_ARG_IDX = 1 47 | NETNS_NFS_OFFSET = 2 48 | 49 | # indices of different arguments to the NFS call for MACOS 50 | NFS_MOUNT_PATH_IDX_MACOS = -2 51 | NFS_MOUNT_POINT_IDX_MACOS = -1 52 | 53 | NETNS = "/proc/1/net/ns" 54 | 55 | 56 | def _get_config(ocsp_enabled=False): 57 | try: 58 | config = ConfigParser.SafeConfigParser() 59 | except AttributeError: 60 | config = ConfigParser() 61 | 62 | mount_nfs_command_retry_count = 4 63 | mount_nfs_command_retry_timeout = 10 64 | mount_nfs_command_retry = "false" 65 | config.add_section(mount_efs.CONFIG_SECTION) 66 | config.set( 67 | mount_efs.CONFIG_SECTION, "retry_nfs_mount_command", mount_nfs_command_retry 68 | ) 69 | config.set( 70 | mount_efs.CONFIG_SECTION, 71 | "retry_nfs_mount_command_count", 72 | str(mount_nfs_command_retry_count), 73 | ) 74 | config.set( 75 | mount_efs.CONFIG_SECTION, 76 | "retry_nfs_mount_command_timeout_sec", 77 | str(mount_nfs_command_retry_timeout), 78 | ) 79 | if ocsp_enabled: 80 | config.set( 81 | mount_efs.CONFIG_SECTION, 82 | "stunnel_check_cert_validity", 83 | "true", 84 | ) 85 | return config 86 | 87 | 88 | def _mock_popen(mocker, returncode=0, stdout="stdout", stderr="stderr"): 89 | popen_mock = MagicMock() 90 | popen_mock.communicate.return_value = ( 91 | stdout, 92 | stderr, 93 | ) 94 | popen_mock.returncode = returncode 95 | 96 | return mocker.patch("subprocess.Popen", return_value=popen_mock) 97 | 98 | 99 | def test_mount_with_proxy_efs_proxy_enabled(mocker, capsys): 100 | options = dict(DEFAULT_OPTIONS) 101 | options["tls"] = None 102 | 103 | bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") 104 | mocker.patch("os.path.ismount", return_value=False) 105 | mocker.patch("threading.Thread.start") 106 | mocker.patch("threading.Thread.join") 107 | mocker.patch("mount_efs.mount_nfs") 108 | _mock_popen(mocker, stdout="nfs") 109 | mount_efs.mount_with_proxy( 110 | _get_config(), 111 | INIT_SYSTEM, 112 | DNS_NAME, 113 | PATH, 114 | FS_ID, 115 | MOUNT_POINT, 116 | options, 117 | ) 118 | utils.assert_called_once(bootstrap_proxy_mock) 119 | 120 | kwargs = bootstrap_proxy_mock.call_args[1] 121 | assert kwargs["efs_proxy_enabled"] == True 122 | 123 | 124 | def test_mount_with_proxy_ocsp_config_enabled(mocker, capsys): 125 | options = dict(DEFAULT_OPTIONS) 126 | options["tls"] = None 127 | 128 | bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") 129 | mocker.patch("os.path.ismount", return_value=False) 130 | mocker.patch("threading.Thread.start") 131 | mocker.patch("threading.Thread.join") 132 | mocker.patch("mount_efs.mount_nfs") 133 | _mock_popen(mocker, stdout="nfs") 134 | mount_efs.mount_with_proxy( 135 | _get_config(ocsp_enabled=True), 136 | INIT_SYSTEM, 137 | DNS_NAME, 138 | PATH, 139 | FS_ID, 140 | MOUNT_POINT, 141 | options, 142 | ) 143 | utils.assert_called_once(bootstrap_proxy_mock) 144 | 145 | kwargs = bootstrap_proxy_mock.call_args[1] 146 | assert kwargs["efs_proxy_enabled"] == False 147 | 148 | 149 | def test_mount_with_proxy_ocsp_option_enabled(mocker, capsys): 150 | options = dict(DEFAULT_OPTIONS) 151 | options["tls"] = None 152 | options["ocsp"] = None 153 | 154 | bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") 155 | mocker.patch("os.path.ismount", return_value=False) 156 | mocker.patch("threading.Thread.start") 157 | mocker.patch("threading.Thread.join") 158 | mocker.patch("mount_efs.mount_nfs") 159 | _mock_popen(mocker, stdout="nfs") 160 | mount_efs.mount_with_proxy( 161 | _get_config(), 162 | INIT_SYSTEM, 163 | DNS_NAME, 164 | PATH, 165 | FS_ID, 166 | MOUNT_POINT, 167 | options, 168 | ) 169 | utils.assert_called_once(bootstrap_proxy_mock) 170 | 171 | kwargs = bootstrap_proxy_mock.call_args[1] 172 | assert kwargs["efs_proxy_enabled"] == False 173 | 174 | 175 | def test_mount_with_proxy_efs_proxy_enabled_non_tls_mount(mocker, capsys): 176 | options = dict(DEFAULT_OPTIONS) 177 | 178 | bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") 179 | mocker.patch("os.path.ismount", return_value=False) 180 | mocker.patch("threading.Thread.start") 181 | mocker.patch("threading.Thread.join") 182 | mocker.patch("mount_efs.mount_nfs") 183 | _mock_popen(mocker, stdout="nfs") 184 | mount_efs.mount_with_proxy( 185 | _get_config(), 186 | INIT_SYSTEM, 187 | DNS_NAME, 188 | PATH, 189 | FS_ID, 190 | MOUNT_POINT, 191 | options, 192 | ) 193 | utils.assert_called_once(bootstrap_proxy_mock) 194 | 195 | kwargs = bootstrap_proxy_mock.call_args[1] 196 | assert kwargs["efs_proxy_enabled"] == True 197 | 198 | 199 | def test_mount_with_proxy_stunnel_enabled(mocker, capsys): 200 | options = dict(DEFAULT_OPTIONS) 201 | options["stunnel"] = None 202 | 203 | bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") 204 | mocker.patch("os.path.ismount", return_value=False) 205 | mocker.patch("threading.Thread.start") 206 | mocker.patch("threading.Thread.join") 207 | mocker.patch("mount_efs.mount_nfs") 208 | _mock_popen(mocker, stdout="nfs") 209 | mount_efs.mount_with_proxy( 210 | _get_config(), 211 | INIT_SYSTEM, 212 | DNS_NAME, 213 | PATH, 214 | FS_ID, 215 | MOUNT_POINT, 216 | options, 217 | ) 218 | utils.assert_called_once(bootstrap_proxy_mock) 219 | 220 | kwargs = bootstrap_proxy_mock.call_args[1] 221 | assert kwargs["efs_proxy_enabled"] == False 222 | -------------------------------------------------------------------------------- /test/mount_efs_test/test_describe_availability_zone.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import pytest 8 | from botocore.exceptions import ClientError, EndpointConnectionError, NoCredentialsError 9 | 10 | import mount_efs 11 | 12 | from .. import utils 13 | 14 | MOCK_EC2_AGENT = "fake-client" 15 | AZ_NAME = "us-east-2b" 16 | AZ_ID = "use2-az2" 17 | OPERATION_NAME = "DescribeAvailabilityZones" 18 | 19 | 20 | def _test_describe_availability_zones_response( 21 | mocker, 22 | dryrun_effect, 23 | response, 24 | expected_describe_time, 25 | desired_az_id=None, 26 | desired_exception=None, 27 | desired_message=None, 28 | ): 29 | describe_mock = mocker.patch( 30 | "mount_efs.ec2_describe_availability_zones_helper", 31 | side_effect=[dryrun_effect, response], 32 | ) 33 | 34 | if desired_exception: 35 | assert desired_message != None 36 | with pytest.raises(mount_efs.FallbackException) as excinfo: 37 | mount_efs.get_az_id_by_az_name(MOCK_EC2_AGENT, AZ_NAME) 38 | assert desired_message in str(excinfo) 39 | else: 40 | az_id = mount_efs.get_az_id_by_az_name(MOCK_EC2_AGENT, AZ_NAME) 41 | assert az_id == desired_az_id 42 | 43 | utils.assert_called_n_times(describe_mock, expected_describe_time) 44 | 45 | 46 | def test_describe_availability_zones_dryrun_succeed_return_correct(mocker): 47 | dryrun_exception_response = { 48 | "Error": {"Code": "DryRunOperation", "Message": "DryRunOperation"} 49 | } 50 | response = { 51 | "AvailabilityZones": [ 52 | { 53 | "Messages": [], 54 | "ZoneId": AZ_ID, 55 | "State": "available", 56 | "ZoneName": AZ_NAME, 57 | "RegionName": "us-east-2", 58 | } 59 | ] 60 | } 61 | _test_describe_availability_zones_response( 62 | mocker, 63 | ClientError(dryrun_exception_response, OPERATION_NAME), 64 | response, 65 | 2, 66 | AZ_ID, 67 | ) 68 | 69 | 70 | def test_describe_availability_zones_dryrun_failed_unauthorized_operation(mocker): 71 | dryrun_exception_response = { 72 | "Error": {"Code": "UnauthorizedOperation", "Message": "UnauthorizedOperation"} 73 | } 74 | _test_describe_availability_zones_response( 75 | mocker, 76 | ClientError(dryrun_exception_response, OPERATION_NAME), 77 | None, 78 | 1, 79 | desired_exception=mount_efs.FallbackException, 80 | desired_message="Unauthorized to perform operation", 81 | ) 82 | 83 | 84 | def test_describe_availability_zones_dryrun_failed_invalid_az_name(mocker): 85 | dryrun_exception_response = { 86 | "Error": {"Code": "InvalidParameterValue", "Message": "InvalidParameterValue"} 87 | } 88 | _test_describe_availability_zones_response( 89 | mocker, 90 | ClientError(dryrun_exception_response, OPERATION_NAME), 91 | None, 92 | 1, 93 | desired_exception=mount_efs.FallbackException, 94 | desired_message="Invalid availability zone", 95 | ) 96 | 97 | 98 | def test_describe_availability_zones_dryrun_failed_service_unavailable(mocker): 99 | dryrun_exception_response = { 100 | "Error": { 101 | "Code": "ServiceUnavailableException", 102 | "Message": "ServiceUnavailableException", 103 | } 104 | } 105 | _test_describe_availability_zones_response( 106 | mocker, 107 | ClientError(dryrun_exception_response, OPERATION_NAME), 108 | None, 109 | 1, 110 | desired_exception=mount_efs.FallbackException, 111 | desired_message="The ec2 service cannot", 112 | ) 113 | 114 | 115 | def test_describe_availability_zones_dryrun_failed_access_denied(mocker): 116 | exception_message = "is not authorized to perform" 117 | dryrun_exception_response = { 118 | "Error": {"Code": "AccessDeniedException", "Message": exception_message} 119 | } 120 | _test_describe_availability_zones_response( 121 | mocker, 122 | ClientError(dryrun_exception_response, OPERATION_NAME), 123 | None, 124 | 1, 125 | desired_exception=mount_efs.FallbackException, 126 | desired_message=exception_message, 127 | ) 128 | 129 | 130 | def test_describe_availability_zones_dryrun_failed_unknown_exception(mocker): 131 | dryrun_exception_response = { 132 | "Error": {"Code": "UnknownException", "Message": "UnknownException"} 133 | } 134 | _test_describe_availability_zones_response( 135 | mocker, 136 | ClientError(dryrun_exception_response, OPERATION_NAME), 137 | None, 138 | 1, 139 | desired_exception=mount_efs.FallbackException, 140 | desired_message="Unexpected error", 141 | ) 142 | 143 | 144 | def test_describe_availability_zones_dryrun_failed_no_credential_error(mocker): 145 | _test_describe_availability_zones_response( 146 | mocker, 147 | NoCredentialsError(), 148 | None, 149 | 1, 150 | desired_exception=mount_efs.FallbackException, 151 | desired_message="confirm your aws credentials are properly configured", 152 | ) 153 | 154 | 155 | def test_describe_availability_zones_failed_unknown_error(mocker): 156 | _test_describe_availability_zones_response( 157 | mocker, 158 | Exception("Unknown"), 159 | None, 160 | 1, 161 | desired_exception=mount_efs.FallbackException, 162 | desired_message="Unknown error", 163 | ) 164 | 165 | 166 | def test_describe_availability_zones_failed_endpoing_error(mocker): 167 | _test_describe_availability_zones_response( 168 | mocker, 169 | EndpointConnectionError(endpoint_url="https://efs.us-east-1.com"), 170 | None, 171 | 1, 172 | desired_exception=mount_efs.FallbackException, 173 | desired_message="Could not connect to the endpoint", 174 | ) 175 | 176 | 177 | def test_describe_availability_zones_return_empty_az_info(mocker): 178 | dryrun_exception_response = { 179 | "Error": {"Code": "DryRunOperation", "Message": "DryRunOperation"} 180 | } 181 | response = {"AvailabilityZones": []} 182 | _test_describe_availability_zones_response( 183 | mocker, 184 | ClientError(dryrun_exception_response, OPERATION_NAME), 185 | response, 186 | 2, 187 | None, 188 | ) 189 | 190 | 191 | def test_describe_availability_zones_return_none_az_info(mocker): 192 | dryrun_exception_response = { 193 | "Error": {"Code": "DryRunOperation", "Message": "DryRunOperation"} 194 | } 195 | _test_describe_availability_zones_response( 196 | mocker, ClientError(dryrun_exception_response, OPERATION_NAME), None, 2, None 197 | ) 198 | -------------------------------------------------------------------------------- /test/watchdog_test/test_send_signal_to_stunnel_processes.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the MIT License. See the LICENSE accompanying this file 4 | # for the specific language governing permissions and limitations under 5 | # the License. 6 | 7 | import json 8 | import os 9 | import tempfile 10 | from signal import SIGHUP, SIGKILL, SIGTERM 11 | 12 | import watchdog 13 | 14 | from .. import utils 15 | 16 | PID = 100 17 | STATE_FILE = "fs-deadbeef.mnt.12345" 18 | MOUNT_STATE_DIR = STATE_FILE + "+" 19 | 20 | 21 | def create_file(tmpdir, file_name=tempfile.mkstemp()[1], content=""): 22 | temp_file = tmpdir.join(file_name) 23 | temp_file.write(content, ensure=True) 24 | return temp_file 25 | 26 | 27 | def create_dir(tmpdir, dirname): 28 | abs_dir_path = tmpdir.join(dirname) 29 | os.mkdir(str(abs_dir_path)) 30 | return abs_dir_path 31 | 32 | 33 | def create_pid_file(tmpdir, pid): 34 | pid_file = create_file(tmpdir, watchdog.STUNNEL_PID_FILE, str(pid)) 35 | return pid_file.dirname, pid_file.basename, str(pid_file) 36 | 37 | 38 | def create_state_file(tmpdir, state_dict, state_file_name): 39 | state_file = create_file(tmpdir, state_file_name, json.dumps(state_dict)) 40 | return state_file.dirname, state_file.basename, str(state_file) 41 | 42 | 43 | def test_is_pid_running_pid_empty(): 44 | assert False == watchdog.is_pid_running(None) 45 | 46 | 47 | def test_is_pid_running_pid_running(mocker): 48 | mocker.patch("os.kill") 49 | assert True == watchdog.is_pid_running(PID) 50 | 51 | 52 | def test_is_pid_running_pid_not_running(mocker): 53 | mocker.patch("os.kill", side_effect=OSError) 54 | assert False == watchdog.is_pid_running(PID) 55 | 56 | 57 | def test_get_pid_in_state_dir(tmpdir): 58 | mount_dir = create_dir(tmpdir, MOUNT_STATE_DIR) 59 | pid_dir, pid_file, abs_pid_file = create_pid_file(mount_dir, PID) 60 | assert os.path.exists(str(mount_dir)) 61 | assert os.path.exists(abs_pid_file) 62 | 63 | state_dict = {"pid": PID} 64 | state_dir, state_file, abs_state_file = create_state_file( 65 | tmpdir, state_dict, STATE_FILE 66 | ) 67 | assert os.path.exists(abs_state_file) 68 | 69 | assert PID == int(watchdog.get_pid_in_state_dir(STATE_FILE, str(tmpdir))) 70 | 71 | 72 | def test_pid_file_not_in_state_dir(tmpdir): 73 | mount_dir = create_dir(tmpdir, MOUNT_STATE_DIR) 74 | assert os.path.exists(str(mount_dir)) 75 | assert not os.path.exists(os.path.join(str(mount_dir), watchdog.STUNNEL_PID_FILE)) 76 | 77 | state_dict = {"pid": PID} 78 | state_dir, state_file, abs_state_file = create_state_file( 79 | tmpdir, state_dict, STATE_FILE 80 | ) 81 | assert os.path.exists(abs_state_file) 82 | 83 | assert watchdog.get_pid_in_state_dir(STATE_FILE, str(tmpdir)) is None 84 | 85 | 86 | def test_is_mount_stunnel_proc_running_pid_empty(tmpdir): 87 | assert False == watchdog.is_mount_stunnel_proc_running(None, STATE_FILE, tmpdir) 88 | 89 | 90 | def test_is_mount_stunnel_proc_running_process_not_stunnel(mocker, tmpdir): 91 | mocker.patch("watchdog.check_process_name", return_value="java") 92 | mock_log_debug = mocker.patch("logging.debug") 93 | 94 | assert False == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, tmpdir) 95 | debug_log = mock_log_debug.call_args[0][0] 96 | assert "is not an efs-proxy or stunnel process" in debug_log 97 | 98 | 99 | def test_is_mount_stunnel_proc_running_process_not_running(mocker, tmpdir): 100 | mocker.patch("watchdog.check_process_name", return_value="stunnel") 101 | mocker.patch("watchdog.is_pid_running", return_value=False) 102 | mock_log_debug = mocker.patch("logging.debug") 103 | 104 | assert False == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, tmpdir) 105 | 106 | debug_log = mock_log_debug.call_args[0][0] 107 | assert "is not running anymore" in debug_log 108 | 109 | 110 | def test_is_mount_stunnel_proc_running_pid_file_not_exist(mocker, tmpdir): 111 | mocker.patch("watchdog.check_process_name", return_value="stunnel") 112 | mocker.patch("watchdog.is_pid_running", return_value=True) 113 | mount_dir = create_dir(tmpdir, MOUNT_STATE_DIR) 114 | assert not os.path.exists(os.path.join(str(mount_dir), watchdog.STUNNEL_PID_FILE)) 115 | mock_log_debug = mocker.patch("logging.debug") 116 | 117 | assert True == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, str(tmpdir)) 118 | 119 | assert "Pid file of stunnel does not exist" in str( 120 | mock_log_debug.call_args_list[0][0] 121 | ) 122 | assert "is running with pid " in str(mock_log_debug.call_args_list[1][0]) 123 | 124 | 125 | def test_is_mount_stunnel_proc_running_pid_mismatch(mocker, tmpdir): 126 | mocker.patch("watchdog.check_process_name", return_value="stunnel") 127 | mocker.patch("watchdog.is_pid_running", return_value=True) 128 | mount_dir = create_dir(tmpdir, MOUNT_STATE_DIR) 129 | create_pid_file(mount_dir, PID + 1) 130 | mock_log_warning = mocker.patch("logging.warning") 131 | 132 | assert False == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, str(tmpdir)) 133 | 134 | warning_log = mock_log_warning.call_args[0][0] 135 | assert "Stunnel pid mismatch in state file" in warning_log 136 | 137 | 138 | def test_is_mount_stunnel_proc_running(mocker, tmpdir): 139 | mocker.patch("watchdog.check_process_name", return_value="stunnel") 140 | mocker.patch("watchdog.is_pid_running", return_value=True) 141 | mount_dir = create_dir(tmpdir, MOUNT_STATE_DIR) 142 | create_pid_file(mount_dir, PID) 143 | mock_log_debug = mocker.patch("logging.debug") 144 | 145 | assert True == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, str(tmpdir)) 146 | 147 | debug_log = mock_log_debug.call_args[0][0] 148 | assert "is running with pid" in debug_log 149 | 150 | 151 | def test_send_sigkill_to_stunnel_process_group(mocker, tmpdir): 152 | _test_send_signal_to_stunnel_process_group_helper(mocker, tmpdir, SIGKILL) 153 | 154 | 155 | def test_send_sigkill_to_stunnel_process_group_not_running(mocker, tmpdir): 156 | _test_send_signal_to_stunnel_process_group_helper( 157 | mocker, tmpdir, SIGKILL, is_process_running=False 158 | ) 159 | 160 | 161 | def test_send_sighup_to_stunnel_process_group(mocker, tmpdir): 162 | _test_send_signal_to_stunnel_process_group_helper(mocker, tmpdir, SIGHUP) 163 | 164 | 165 | def test_send_sighup_to_stunnel_process_group_not_running(mocker, tmpdir): 166 | _test_send_signal_to_stunnel_process_group_helper( 167 | mocker, tmpdir, SIGHUP, is_process_running=False 168 | ) 169 | 170 | 171 | def test_send_sigterm_to_stunnel_process_group(mocker, tmpdir): 172 | _test_send_signal_to_stunnel_process_group_helper(mocker, tmpdir, SIGTERM) 173 | 174 | 175 | def test_send_sigterm_to_stunnel_process_group_not_running(mocker, tmpdir): 176 | _test_send_signal_to_stunnel_process_group_helper( 177 | mocker, tmpdir, SIGTERM, is_process_running=False 178 | ) 179 | 180 | 181 | def _test_send_signal_to_stunnel_process_group_helper( 182 | mocker, tmpdir, signal, is_process_running=True 183 | ): 184 | mocker.patch( 185 | "watchdog.is_mount_stunnel_proc_running", return_value=is_process_running 186 | ) 187 | mocker.patch("os.getpgid") 188 | kill_mock = mocker.patch("os.killpg") 189 | 190 | send_result = watchdog.send_signal_to_running_stunnel_process_group( 191 | PID, STATE_FILE, tmpdir, signal 192 | ) 193 | if is_process_running: 194 | assert True == send_result 195 | utils.assert_called_once(kill_mock) 196 | else: 197 | assert False == send_result 198 | utils.assert_not_called(kill_mock) 199 | -------------------------------------------------------------------------------- /test/watchdog_test/test_clean_up_mount_state.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. 3 | # 4 | # Licensed under the MIT License. See the LICENSE accompanying this file 5 | # for the specific language governing permissions and limitations under 6 | # the License. 7 | # 8 | 9 | import json 10 | import os 11 | import tempfile 12 | 13 | import watchdog 14 | 15 | from .. import utils 16 | 17 | FAKE_MOUNT_STATE_DIR = "/fake/path" 18 | PID = 99999999999999999 19 | 20 | 21 | def create_temp_file(tmpdir, content=""): 22 | temp_file = tmpdir.join(tempfile.mkstemp()[1]) 23 | temp_file.write(content, ensure=True) 24 | return temp_file 25 | 26 | 27 | def create_state_file(tmpdir, extra_files=list(), mount_state_dir=None): 28 | state_dict = {"pid": PID, "files": extra_files} 29 | 30 | if mount_state_dir: 31 | state_dict["mountStateDir"] = mount_state_dir 32 | 33 | state_file = create_temp_file(tmpdir, json.dumps(state_dict)) 34 | 35 | return state_file.dirname, state_file.basename, str(state_file) 36 | 37 | 38 | def setup_mock( 39 | mocker, 40 | is_stunnel_proc_running_first_check=True, 41 | is_stunnel_proc_running_second_check=False, 42 | ): 43 | mocker.patch("os.getpgid") 44 | # The watchdog clean_up_mount_state function has two procedures: 45 | # 1. Kill the stunnel process if it is still running 46 | # 2. Check whether the stunnel process is still running, if not, cleanup the state file of the mount 47 | # Each procedure will check whether the mount stunnel process is running, so we have two mock here. 48 | mocker.patch( 49 | "watchdog.is_mount_stunnel_proc_running", 50 | side_effect=[ 51 | is_stunnel_proc_running_first_check, 52 | is_stunnel_proc_running_second_check, 53 | ], 54 | ) 55 | return mocker.patch("os.killpg") 56 | 57 | 58 | def test_clean_up_on_first_try(mocker, tmpdir): 59 | """ 60 | This test verifies when the stunnel is running at first then got killed, watchdog will cleanup the mount state file 61 | """ 62 | killpg_mock = setup_mock(mocker) 63 | 64 | state_dir, state_file, abs_state_file = create_state_file(tmpdir) 65 | 66 | assert os.path.exists(abs_state_file) 67 | 68 | watchdog.clean_up_mount_state(state_dir, state_file, PID) 69 | 70 | utils.assert_called_once(killpg_mock) 71 | assert not os.path.exists(abs_state_file) 72 | 73 | 74 | def _test_clean_up_files(mocker, tmpdir, files_should_exist): 75 | killpg_mock = setup_mock(mocker) 76 | 77 | extra_files = [ 78 | str(create_temp_file(tmpdir)), 79 | str(create_temp_file(tmpdir)), 80 | ] 81 | 82 | state_dir, state_file, abs_state_file = create_state_file(tmpdir, extra_files) 83 | 84 | assert os.path.exists(abs_state_file) 85 | for f in extra_files: 86 | if not files_should_exist: 87 | os.remove(f) 88 | assert os.path.exists(f) or not files_should_exist 89 | 90 | watchdog.clean_up_mount_state(state_dir, state_file, PID) 91 | 92 | utils.assert_called_once(killpg_mock) 93 | assert not os.path.exists(abs_state_file) 94 | for f in extra_files: 95 | assert not os.path.exists(f) 96 | 97 | 98 | def test_clean_up_nonexistent_files(mocker, tmpdir): 99 | _test_clean_up_files(mocker, tmpdir, files_should_exist=False) 100 | 101 | 102 | def test_clean_up_multiple_files(mocker, tmpdir): 103 | """ 104 | This test verifies when there are extra files created in the mount state dir, watchdog can clean up the state file 105 | """ 106 | _test_clean_up_files(mocker, tmpdir, files_should_exist=True) 107 | 108 | 109 | def test_clean_up_pid_still_lives(mocker, tmpdir): 110 | """ 111 | This test verifies when the stunnel process is still running after killing event, watchdog won't clean up the state 112 | file 113 | """ 114 | killpg_mock = setup_mock( 115 | mocker, 116 | is_stunnel_proc_running_first_check=True, 117 | is_stunnel_proc_running_second_check=True, 118 | ) 119 | 120 | state_dir, state_file, abs_state_file = create_state_file(tmpdir) 121 | 122 | assert os.path.exists(abs_state_file) 123 | 124 | watchdog.clean_up_mount_state(state_dir, state_file, PID) 125 | 126 | utils.assert_called_once(killpg_mock) 127 | assert os.path.exists(abs_state_file) 128 | 129 | 130 | def test_clean_up_pid_already_killed(mocker, tmpdir): 131 | """ 132 | This test verifies when the stunnel process is already killed, the kill signal won't be sent, and watchdog will 133 | clean up the state file 134 | """ 135 | state_dir, state_file, abs_state_file = create_state_file(tmpdir) 136 | pid = None 137 | is_running = watchdog.is_mount_stunnel_proc_running(pid, state_file, state_dir) 138 | killpg_mock = setup_mock( 139 | mocker, 140 | is_stunnel_proc_running_first_check=is_running, 141 | is_stunnel_proc_running_second_check=False, 142 | ) 143 | 144 | assert os.path.exists(abs_state_file) 145 | 146 | watchdog.clean_up_mount_state(state_dir, state_file, pid) 147 | 148 | utils.assert_not_called(killpg_mock) 149 | assert not os.path.exists(abs_state_file) 150 | 151 | 152 | def test_pid_not_running(mocker, tmpdir): 153 | """ 154 | This test verifies when the stunnel process is already not running, the kill signal won't be sent, and watchdog will 155 | clean up the state file 156 | """ 157 | killpg_mock = setup_mock( 158 | mocker, 159 | is_stunnel_proc_running_first_check=False, 160 | is_stunnel_proc_running_second_check=False, 161 | ) 162 | 163 | state_dir, state_file, abs_state_file = create_state_file(tmpdir) 164 | 165 | assert os.path.exists(abs_state_file) 166 | 167 | watchdog.clean_up_mount_state(state_dir, state_file, PID) 168 | 169 | utils.assert_not_called(killpg_mock) 170 | assert not os.path.exists(abs_state_file) 171 | 172 | 173 | def test_clean_up_mount_state_dir_success(mocker, tmpdir): 174 | """ 175 | This test verifies when the stunnel process is already not running, watchdog will clean up the mount state dir 176 | """ 177 | setup_mock( 178 | mocker, 179 | is_stunnel_proc_running_first_check=False, 180 | is_stunnel_proc_running_second_check=False, 181 | ) 182 | mocker.patch("os.path.isdir", return_value=True) 183 | rm_tree = mocker.patch("shutil.rmtree") 184 | 185 | state_dir, state_file, abs_state_file = create_state_file( 186 | tmpdir, mount_state_dir=FAKE_MOUNT_STATE_DIR 187 | ) 188 | 189 | assert os.path.exists(abs_state_file) 190 | 191 | watchdog.clean_up_mount_state( 192 | state_dir, state_file, PID, mount_state_dir=FAKE_MOUNT_STATE_DIR 193 | ) 194 | 195 | utils.assert_called_once(rm_tree) 196 | assert not os.path.exists(abs_state_file) 197 | 198 | 199 | def test_clean_up_mount_state_dir_fail(mocker, tmpdir): 200 | """ 201 | This test verifies when the stunnel process is already not running, watchdog will not clean up the mount state dir 202 | if the mount state dir path is not a directory 203 | """ 204 | setup_mock( 205 | mocker, 206 | is_stunnel_proc_running_first_check=False, 207 | is_stunnel_proc_running_second_check=False, 208 | ) 209 | mocker.patch("os.path.isdir", return_value=False) 210 | rm_tree = mocker.patch("shutil.rmtree") 211 | 212 | state_dir, state_file, abs_state_file = create_state_file( 213 | tmpdir, mount_state_dir=FAKE_MOUNT_STATE_DIR 214 | ) 215 | 216 | assert os.path.exists(abs_state_file) 217 | 218 | watchdog.clean_up_mount_state( 219 | state_dir, state_file, PID, mount_state_dir=FAKE_MOUNT_STATE_DIR 220 | ) 221 | 222 | utils.assert_not_called(rm_tree) 223 | assert not os.path.exists(abs_state_file) 224 | -------------------------------------------------------------------------------- /src/proxy/src/rpc.rs: -------------------------------------------------------------------------------- 1 | use std::io::Cursor; 2 | 3 | use bytes::{Buf, Bytes, BytesMut}; 4 | use tokio::io::AsyncReadExt; 5 | 6 | use crate::connections::ProxyStream; 7 | 8 | // Each element is an RPC call. 9 | pub struct RpcBatch { 10 | pub rpcs: Vec, 11 | } 12 | 13 | #[derive(Debug, PartialEq)] 14 | pub enum RpcFragmentParseError { 15 | InvalidSizeTooSmall, 16 | SizeLimitExceeded, 17 | Incomplete, 18 | } 19 | 20 | pub const RPC_LAST_FRAG: u32 = 0x80000000; 21 | pub const RPC_SIZE_MASK: u32 = 0x7FFFFFFF; 22 | pub const RPC_HEADER_SIZE: usize = 4; 23 | 24 | /* The sunrpc server implementation in linux has a maximum payload of 1MB + 1 page 25 | * (see include/linux/sunrpc/svc.h#RPCSVC_MAXPAYLOAD and sv_max_mesg). 26 | */ 27 | pub const RPC_MAX_SIZE: usize = 1024 * 1024 + 4 * 1024; 28 | pub const RPC_MIN_SIZE: usize = 2; 29 | 30 | impl RpcBatch { 31 | pub fn parse_batch(buffer: &mut BytesMut) -> Result, RpcFragmentParseError> { 32 | let mut batch = RpcBatch { rpcs: Vec::new() }; 33 | 34 | loop { 35 | match Self::check_rpc_message(Cursor::new(&buffer[..])) { 36 | Ok(len) => { 37 | let rpc_message = buffer.split_to(len); 38 | batch.rpcs.push(rpc_message.freeze()); 39 | } 40 | Err(RpcFragmentParseError::Incomplete) => break, 41 | Err(e) => return Err(e), 42 | } 43 | } 44 | 45 | if batch.rpcs.is_empty() { 46 | Ok(None) 47 | } else { 48 | Ok(Some(batch)) 49 | } 50 | } 51 | 52 | pub fn check_rpc_message(mut src: Cursor<&[u8]>) -> Result { 53 | loop { 54 | if src.remaining() < RPC_HEADER_SIZE { 55 | return Err(RpcFragmentParseError::Incomplete); 56 | } 57 | 58 | let fragment_header = src.get_u32(); 59 | let fragment_size = (fragment_header & RPC_SIZE_MASK) as usize; 60 | let is_last_fragment = (fragment_header & RPC_LAST_FRAG) != 0; 61 | 62 | if fragment_size <= RPC_MIN_SIZE { 63 | return Err(RpcFragmentParseError::InvalidSizeTooSmall); 64 | } 65 | 66 | if fragment_size >= RPC_MAX_SIZE { 67 | return Err(RpcFragmentParseError::SizeLimitExceeded); 68 | } 69 | 70 | if src.remaining() < fragment_size { 71 | return Err(RpcFragmentParseError::Incomplete); 72 | } 73 | 74 | src.advance(fragment_size); 75 | 76 | if is_last_fragment { 77 | return Ok(src.position() as usize); 78 | } 79 | } 80 | } 81 | } 82 | 83 | pub async fn read_rpc_bytes(stream: &mut dyn ProxyStream) -> Result, tokio::io::Error> { 84 | let mut header = [0; RPC_HEADER_SIZE]; 85 | stream.read_exact(&mut header).await?; 86 | 87 | // NOTE: onc-rpc crate does not support fragmentation out of the box. Add 4 to include the header. 88 | let len = (RPC_SIZE_MASK & extract_u32_from_bytes(&header)) + RPC_HEADER_SIZE as u32; 89 | 90 | let mut payload = vec![0; len as usize]; 91 | payload[0..RPC_HEADER_SIZE].clone_from_slice(&header); 92 | 93 | stream.read_exact(&mut payload[RPC_HEADER_SIZE..]).await?; 94 | 95 | Ok(payload) 96 | } 97 | 98 | fn extract_u32_from_bytes(header: &[u8]) -> u32 { 99 | u32::from_be_bytes([header[0], header[1], header[2], header[3]]) 100 | } 101 | 102 | #[cfg(test)] 103 | pub mod test { 104 | use crate::{rpc::RPC_MAX_SIZE, test_utils::generate_rpc_msg_fragments}; 105 | 106 | use super::{RpcBatch, RpcFragmentParseError, RPC_HEADER_SIZE, RPC_LAST_FRAG}; 107 | use bytes::{BufMut, BytesMut}; 108 | 109 | #[test] 110 | fn multiple_messages() { 111 | let mut b = BytesMut::with_capacity(8); 112 | b.put_u32(RPC_LAST_FRAG | 4); 113 | b.put_u32(42); 114 | b.put_u32(RPC_LAST_FRAG | 4); 115 | 116 | let batch = RpcBatch::parse_batch(&mut b); 117 | let batch = batch.unwrap().unwrap(); 118 | assert_eq!(batch.rpcs[0].len(), 8); 119 | assert_eq!(batch.rpcs.len(), 1); 120 | 121 | b.put_u32(43); 122 | let batch = RpcBatch::parse_batch(&mut b); 123 | let batch = batch.unwrap().unwrap(); 124 | assert_eq!(batch.rpcs[0].len(), 8); 125 | assert_eq!(batch.rpcs.len(), 1); 126 | 127 | let batch = RpcBatch::parse_batch(&mut b); 128 | assert!(matches!(batch, Ok(None))); 129 | } 130 | 131 | #[test] 132 | fn test_invalid_rpc_small_fragment() { 133 | let num_fragments = 1; 134 | let (mut input_buffer, _) = generate_rpc_msg_fragments(1, num_fragments); 135 | let result = RpcBatch::parse_batch(&mut input_buffer); 136 | assert!(matches!( 137 | result, 138 | Err(RpcFragmentParseError::InvalidSizeTooSmall) 139 | )); 140 | } 141 | 142 | #[test] 143 | fn test_invalid_rpc_big_fragment() { 144 | let num_fragments = 1; 145 | let (mut input_buffer, _) = generate_rpc_msg_fragments(RPC_MAX_SIZE + 1, num_fragments); 146 | let result = RpcBatch::parse_batch(&mut input_buffer); 147 | assert!(matches!( 148 | result, 149 | Err(RpcFragmentParseError::SizeLimitExceeded) 150 | )); 151 | } 152 | 153 | #[test] 154 | fn test_parse_batch_single_message() { 155 | // Create an input buffer with multiple RPC fragments 156 | let num_fragments = 3; 157 | let message_size = 12; 158 | let (mut input_buffer, _) = generate_rpc_msg_fragments(message_size, num_fragments); 159 | let mut rpc_batch = RpcBatch::parse_batch(&mut input_buffer) 160 | .expect("parse batch failed") 161 | .expect("no rpc messages found"); 162 | 163 | assert_eq!(1, rpc_batch.rpcs.len()); 164 | let rpc_message = rpc_batch.rpcs.pop().expect("No RPC messages"); 165 | 166 | let expected_message_size = num_fragments * RPC_HEADER_SIZE + message_size; 167 | assert_eq!(expected_message_size, rpc_message.len()); 168 | } 169 | 170 | #[test] 171 | fn test_parse_batch_multiple_message() { 172 | // Create an input buffer with multiple RPC messages 173 | let num_fragments_1 = 3; 174 | let message_size_1 = 12; 175 | let (mut input_buffer, _) = generate_rpc_msg_fragments(message_size_1, num_fragments_1); 176 | 177 | let num_fragments_2 = 6; 178 | let message_size_2 = 24; 179 | let (input_buffer_2, _) = generate_rpc_msg_fragments(message_size_2, num_fragments_2); 180 | 181 | let num_fragments_3 = 1; 182 | let message_size_3 = 50; 183 | let (input_buffer_3, _) = generate_rpc_msg_fragments(message_size_3, num_fragments_3); 184 | 185 | input_buffer.extend_from_slice(&input_buffer_2); 186 | input_buffer.extend_from_slice(&input_buffer_3); 187 | 188 | let mut rpc_batch = RpcBatch::parse_batch(&mut input_buffer) 189 | .expect("parse batch failed") 190 | .expect("no rpc messages found"); 191 | 192 | assert_eq!(3, rpc_batch.rpcs.len()); 193 | 194 | let rpc_message_3 = rpc_batch.rpcs.pop().expect("No RPC messages"); 195 | let expected_message_size_3 = num_fragments_3 * RPC_HEADER_SIZE + message_size_3; 196 | assert_eq!(expected_message_size_3, rpc_message_3.len()); 197 | 198 | let rpc_message_2 = rpc_batch.rpcs.pop().expect("No RPC messages"); 199 | let expected_message_size_2 = num_fragments_2 * RPC_HEADER_SIZE + message_size_2; 200 | assert_eq!(expected_message_size_2, rpc_message_2.len()); 201 | 202 | let rpc_message_1 = rpc_batch.rpcs.pop().expect("No RPC messages"); 203 | let expected_message_size_1 = num_fragments_1 * RPC_HEADER_SIZE + message_size_1; 204 | assert_eq!(expected_message_size_1, rpc_message_1.len()); 205 | } 206 | } 207 | -------------------------------------------------------------------------------- /src/proxy/src/config_parser.rs: -------------------------------------------------------------------------------- 1 | use log::LevelFilter; 2 | use serde::{Deserialize, Serialize}; 3 | use std::{error::Error, path::Path, str::FromStr}; 4 | 5 | const DEFAULT_LOG_LEVEL: LevelFilter = LevelFilter::Warn; 6 | 7 | fn default_log_level() -> String { 8 | DEFAULT_LOG_LEVEL.to_string() 9 | } 10 | 11 | fn deserialize_bool<'de, D>(deserializer: D) -> Result 12 | where 13 | D: serde::Deserializer<'de>, 14 | { 15 | let s = String::deserialize(deserializer)?; 16 | match s.to_lowercase().as_str() { 17 | "true" | "yes" | "1" => Ok(true), 18 | "false" | "no" | "0" => Ok(false), 19 | _ => Err(serde::de::Error::custom(format!("Invalid value: {}", s))), 20 | } 21 | } 22 | 23 | #[derive(Default, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] 24 | pub struct ProxyConfig { 25 | #[serde(alias = "fips", deserialize_with = "deserialize_bool")] 26 | pub fips: bool, 27 | 28 | /// Logging level. Values should correspond to the log::LevelFilter enum. 29 | #[serde(alias = "debug", default = "default_log_level")] 30 | pub debug: String, 31 | 32 | /// Output path for log files. Logging is disabled if this value is not provided. 33 | #[serde(alias = "output")] 34 | pub output: Option, 35 | 36 | /// The proxy process is responsible for writing it's PID into this file so that the Watchdog 37 | /// process can monitor it 38 | #[serde(alias = "pid")] 39 | pub pid_file_path: String, 40 | 41 | /// This nested structure is required for backwards compatibility 42 | #[serde(alias = "efs")] 43 | pub nested_config: EfsConfig, 44 | } 45 | 46 | impl FromStr for ProxyConfig { 47 | type Err = serde_ini::de::Error; 48 | 49 | fn from_str(s: &str) -> Result { 50 | serde_ini::from_str(s) 51 | } 52 | } 53 | 54 | impl ProxyConfig { 55 | pub fn from_path(config_path: &Path) -> Result> { 56 | let config_string = std::fs::read_to_string(config_path)?; 57 | let config = ProxyConfig::from_str(&config_string)?; 58 | Ok(config) 59 | } 60 | } 61 | 62 | #[derive(Default, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] 63 | pub struct EfsConfig { 64 | /// The mount target address - DNS name or IP address 65 | #[serde(alias = "connect")] 66 | pub mount_target_addr: String, 67 | 68 | /// Listen for and accept connections on the specified host:port 69 | #[serde(alias = "accept")] 70 | pub listen_addr: String, 71 | 72 | /// File path of the file that contains the client-side certificate and public key 73 | #[serde(alias = "cert", default)] 74 | pub client_cert_pem_file: String, 75 | 76 | /// File path of the file that contains the client private key 77 | #[serde(alias = "key", default)] 78 | pub client_private_key_pem_file: String, 79 | 80 | /// The hostname that is expected to be on the TLS certificate that the remote server presents 81 | #[serde(alias = "checkHost", default)] 82 | pub expected_server_hostname_tls: String, 83 | 84 | /// File path of the certificate authority file. 85 | /// This is used to verify the EFS server-side TLS certificate. 86 | #[serde(alias = "CAfile", default)] 87 | pub ca_file: String, 88 | } 89 | 90 | #[cfg(test)] 91 | pub mod tests { 92 | use super::*; 93 | use crate::test_utils::TEST_CONFIG_PATH; 94 | use std::{path::Path, string::String}; 95 | 96 | #[test] 97 | fn test_read_config_from_file() { 98 | assert!(ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).is_ok()); 99 | } 100 | 101 | #[test] 102 | fn test_parse_config() { 103 | let config_string = r#"fips = yes 104 | foreground = quiet 105 | socket = l:SO_REUSEADDR=yes 106 | socket = a:SO_BINDTODEVICE=lo 107 | debug = debug 108 | output = /var/log/amazon/efs/fs-12341234.home.ec2-user.efs.21036.efs-proxy.log 109 | pid = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid 110 | port = 8081 111 | initial_partition_ip = 127.0.0.1:2049 112 | 113 | [efs] 114 | accept = 127.0.0.1:21036 115 | connect = fs-12341234.efs.us-east-1.amazonaws.com:2049 116 | sslVersion = TLSv1.2 117 | renegotiation = no 118 | TIMEOUTbusy = 20 119 | TIMEOUTclose = 0 120 | TIMEOUTidle = 70 121 | delay = yes 122 | verify = 2 123 | CAfile = /etc/amazon/efs/efs-utils.crt 124 | cert = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem 125 | key = /etc/amazon/efs/privateKey.pem 126 | checkHost = fs-12341234.efs.us-east-1.amazonaws.com 127 | "#; 128 | 129 | let result_config = ProxyConfig::from_str(config_string).unwrap(); 130 | let expected_proxy_config = ProxyConfig { 131 | fips: true, 132 | pid_file_path: String::from( 133 | "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid", 134 | ), 135 | debug: LevelFilter::Debug.to_string().to_ascii_lowercase(), 136 | output: Some(String::from( 137 | "/var/log/amazon/efs/fs-12341234.home.ec2-user.efs.21036.efs-proxy.log", 138 | )), 139 | nested_config: EfsConfig { 140 | listen_addr: String::from("127.0.0.1:21036"), 141 | mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), 142 | ca_file: String::from("/etc/amazon/efs/efs-utils.crt"), 143 | client_cert_pem_file: String::from( 144 | "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem", 145 | ), 146 | client_private_key_pem_file: String::from("/etc/amazon/efs/privateKey.pem"), 147 | expected_server_hostname_tls: String::from( 148 | "fs-12341234.efs.us-east-1.amazonaws.com", 149 | ), 150 | }, 151 | }; 152 | 153 | assert_eq!(result_config, expected_proxy_config); 154 | } 155 | 156 | #[test] 157 | fn test_parse_config_fips_disabled() { 158 | let config_string = r#"fips = no 159 | foreground = quiet 160 | socket = l:SO_REUSEADDR=yes 161 | socket = a:SO_BINDTODEVICE=lo 162 | pid = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid 163 | port = 8081 164 | initial_partition_ip = 127.0.0.1:2049 165 | 166 | [efs] 167 | accept = 127.0.0.1:21036 168 | connect = fs-12341234.efs.us-east-1.amazonaws.com:2049 169 | sslVersion = TLSv1.2 170 | renegotiation = no 171 | TIMEOUTbusy = 20 172 | TIMEOUTclose = 0 173 | TIMEOUTidle = 70 174 | delay = yes 175 | verify = 2 176 | CAfile = /etc/amazon/efs/efs-utils.crt 177 | cert = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem 178 | key = /etc/amazon/efs/privateKey.pem 179 | checkHost = fs-12341234.efs.us-east-1.amazonaws.com 180 | "#; 181 | 182 | let result_config = ProxyConfig::from_str(config_string).unwrap(); 183 | let expected_proxy_config = ProxyConfig { 184 | fips: false, 185 | pid_file_path: String::from( 186 | "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid", 187 | ), 188 | debug: DEFAULT_LOG_LEVEL.to_string(), 189 | output: None, 190 | nested_config: EfsConfig { 191 | listen_addr: String::from("127.0.0.1:21036"), 192 | mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), 193 | ca_file: String::from("/etc/amazon/efs/efs-utils.crt"), 194 | client_cert_pem_file: String::from( 195 | "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem", 196 | ), 197 | client_private_key_pem_file: String::from("/etc/amazon/efs/privateKey.pem"), 198 | expected_server_hostname_tls: String::from( 199 | "fs-12341234.efs.us-east-1.amazonaws.com", 200 | ), 201 | }, 202 | }; 203 | 204 | assert_eq!(result_config, expected_proxy_config); 205 | } 206 | } 207 | --------------------------------------------------------------------------------