From ddc2bd0684e8baee11907923fccd64d8e19ea83a Mon Sep 17 00:00:00 2001 From: Sean Zatz Date: Mon, 8 Apr 2024 20:51:00 +0000 Subject: [PATCH 01/51] Update Readme markdown for crossaccount prerequisites --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a92f96ed..88691852 100644 --- a/README.md +++ b/README.md @@ -273,7 +273,7 @@ Given a client instance in Account A/VPC A and an EFS instance in Account B/VPC - Create an EFS Mount Target in each of the Availability Zones from the above step in VPC B if they do not exist already. - Attach a VPC Security Group to each of the EFS Mount Targets which allow inbound NFS access from VPC A’s CIDR block. - Route 53 Setup: - - For a mount target A in , create a Route 53 Hosted Zone for the domain ..efs..amazonaws.com. + - For a mount target A in \, create a Route 53 Hosted Zone for the domain \.\.efs.\.amazonaws.com. - Then, add an A record in the Hosted Zone which resolves to mount target A's IP Address. Leave the subdomain blank. From 0c5b52d3f6bccbe27a1b6cf1b3089e964141308e Mon Sep 17 00:00:00 2001 From: Ryan Stankiewicz Date: Fri, 12 Apr 2024 14:02:22 +0000 Subject: [PATCH 02/51] Release efs-utils v2.0.0 This commit replaces stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. --- .circleci/config.yml | 113 +- Makefile | 5 +- README.md | 40 +- amazon-efs-utils.spec | 38 +- build-deb.sh | 8 +- config.ini | 2 +- config.toml | 12 + dist/amazon-efs-utils.control | 2 +- dist/efs-utils.crt | 12 - man/mount.efs.8 | 20 +- src/mount_efs/__init__.py | 358 ++-- src/proxy/Cargo.toml | 38 + src/proxy/build.rs | 5 + src/proxy/src/config_parser.rs | 211 +++ src/proxy/src/connections.rs | 710 ++++++++ src/proxy/src/controller.rs | 1614 +++++++++++++++++ src/proxy/src/efs_prot.x | 57 + src/proxy/src/efs_rpc.rs | 318 ++++ src/proxy/src/error.rs | 41 + src/proxy/src/lib.rs | 4 + src/proxy/src/logger.rs | 65 + src/proxy/src/main.rs | 138 ++ src/proxy/src/proxy.rs | 525 ++++++ src/proxy/src/proxy_identifier.rs | 54 + src/proxy/src/rpc.rs | 242 +++ src/proxy/src/shutdown.rs | 85 + src/proxy/src/status_reporter.rs | 110 ++ src/proxy/src/tls.rs | 230 +++ src/watchdog/__init__.py | 152 +- .../test_add_stunnel_ca_options.py | 12 +- ...otstrap_tls.py => test_bootstrap_proxy.py} | 252 ++- .../test_get_nfs_mount_options.py | 83 +- test/mount_efs_test/test_main.py | 33 +- test/mount_efs_test/test_mount_nfs.py | 41 +- test/mount_efs_test/test_mount_with_proxy.py | 221 +++ .../test_write_stunnel_config_file.py | 215 ++- .../test_write_tls_tunnel_state_file.py | 12 +- .../test_send_signal_to_stunnel_processes.py | 2 +- test/watchdog_test/test_start_tls_tunnel.py | 74 +- 39 files changed, 5793 insertions(+), 361 deletions(-) create mode 100644 config.toml create mode 100644 src/proxy/Cargo.toml create mode 100644 src/proxy/build.rs create mode 100644 src/proxy/src/config_parser.rs create mode 100644 src/proxy/src/connections.rs create mode 100644 src/proxy/src/controller.rs create mode 100644 src/proxy/src/efs_prot.x create mode 100644 src/proxy/src/efs_rpc.rs create mode 100644 src/proxy/src/error.rs create mode 100644 src/proxy/src/lib.rs create mode 100644 src/proxy/src/logger.rs create mode 100644 src/proxy/src/main.rs create mode 100644 src/proxy/src/proxy.rs create mode 100644 src/proxy/src/proxy_identifier.rs create mode 100644 src/proxy/src/rpc.rs create mode 100644 src/proxy/src/shutdown.rs create mode 100644 src/proxy/src/status_reporter.rs create mode 100644 src/proxy/src/tls.rs rename test/mount_efs_test/{test_bootstrap_tls.py => test_bootstrap_proxy.py} (51%) create mode 100644 test/mount_efs_test/test_mount_with_proxy.py diff --git a/.circleci/config.yml b/.circleci/config.yml index c59a7922..1d7fda93 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -37,10 +37,20 @@ commands: name: Repo update command: | apt-get update + - run: + name: Install curl + command: | + apt-get -y install curl + - run: + name: Install latest Rust + command: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + . "$HOME/.cargo/env" - run: name: Install dependencies command: | - apt-get -y install binutils git + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata + apt-get -y install binutils git rustc cargo pkg-config libssl-dev - run: name: Add local build repo as safe git directory command: | @@ -49,6 +59,9 @@ commands: - run: name: Build DEB command: | + . "$HOME/.cargo/env" + rustc --version + cargo --version ./build-deb.sh - run: name: Install package @@ -64,7 +77,7 @@ commands: - run: name: Install dependencies command: | - yum -y install rpm-build make systemd + yum -y install rpm-build make systemd rust cargo openssl-devel - run: name: Build RPM command: | @@ -81,6 +94,40 @@ commands: name: Check changelog command: | rpm -q --changelog amazon-efs-utils + build-rpm-rustup: + steps: + - run: + name: Install dependencies + command: | + yum install -y curl + - run: + name: Install latest Rust + command: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + - checkout + - run: + name: Install dependencies + command: | + yum -y install rpm-build make systemd rust cargo openssl-devel + - run: + name: Build RPM + command: | + . "$HOME/.cargo/env" + rustc --version + make rpm + - run: + name: Install package + command: | + yum -y install build/amazon-efs-utils*rpm + - run: + name: Check installed successfully + command: | + mount.efs --version + - run: + name: Check changelog + command: | + rpm -q --changelog amazon-efs-utils + build-suse-rpm: steps: - checkout @@ -88,14 +135,24 @@ commands: name: Refresh source command: | zypper refresh + - run: + name: Install curl + command: | + zypper install -y curl + - run: + name: Install latest Rust + command: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - run: name: Install dependencies command: | zypper install -y --force-resolution rpm-build - zypper install -y make systemd + zypper install -y make systemd rust cargo openssl-devel - run: name: Build RPM command: | + . "$HOME/.cargo/env" + rustc --version make rpm - run: name: Install package @@ -116,14 +173,6 @@ commands: command: | sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - build-debian-eol-repo: - steps: - - run: - name: change repo url to archive.debian.org and remove updates repo for EOL versions - command: | - sed -i 's/deb.debian.org/archive.debian.org/g' /etc/apt/sources.list - sed -i 's/security.debian.org/archive.debian.org/g' /etc/apt/sources.list - sed -i '/stretch-updates/d' /etc/apt/sources.list jobs: test: parameters: @@ -152,7 +201,7 @@ jobs: image: << parameters.image >> steps: - build-rpm - build-suse-rpm-package: + build-rpm-package-rustup: parameters: image: type: string @@ -160,8 +209,8 @@ jobs: name: linux image: << parameters.image >> steps: - - build-suse-rpm - build-centos-rpm-package: + - build-rpm-rustup + build-suse-rpm-package: parameters: image: type: string @@ -169,9 +218,8 @@ jobs: name: linux image: << parameters.image >> steps: - - build-centos-repo - - build-rpm - build-debian-eol-rpm-package: + - build-suse-rpm + build-centos-rpm-package: parameters: image: type: string @@ -179,8 +227,8 @@ jobs: name: linux image: << parameters.image >> steps: - - build-debian-eol-repo - - build-deb + - build-centos-repo + - build-rpm-rustup workflows: workflow: jobs: @@ -217,21 +265,12 @@ workflows: - build-deb-package: name: ubuntu22 image: ubuntu:22.04 - - build-debian-eol-rpm-package: - name: debian9 - image: debian:stretch - - build-deb-package: - name: debian10 - image: debian:buster - build-deb-package: name: debian11 image: debian:bullseye - build-centos-rpm-package: name: centos-latest image: centos:latest - - build-rpm-package: - name: centos7 - image: centos:centos7 - build-centos-rpm-package: name: centos8 image: centos:centos8 @@ -244,31 +283,25 @@ workflows: - build-rpm-package: name: amazon-linux-2 image: amazonlinux:2 - - build-rpm-package: - name: amazon-linux - image: amazonlinux:1 - build-rpm-package: name: fedora-latest image: fedora:latest - - build-rpm-package: - name: fedora28 - image: fedora:28 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora29 image: fedora:29 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora30 image: fedora:30 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora31 image: fedora:31 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora32 image: fedora:32 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora33 image: fedora:33 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora34 image: fedora:34 - build-rpm-package: diff --git a/Makefile b/Makefile index fd48bfc1..3cc4e47f 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,7 @@ PACKAGE_NAME = amazon-efs-utils SOURCE_TARBALL = $(PACKAGE_NAME).tar.gz SPECFILE = $(PACKAGE_NAME).spec BUILD_DIR = build/rpmbuild +PROXY_VERSION = 2.0.0 export PYTHONPATH := $(shell pwd)/src .PHONY: clean @@ -31,6 +32,7 @@ tarball: clean mkdir -p $(PACKAGE_NAME)/src cp -rp src/mount_efs $(PACKAGE_NAME)/src cp -rp src/watchdog $(PACKAGE_NAME)/src + cp -rp src/proxy $(PACKAGE_NAME)/src mkdir -p ${PACKAGE_NAME}/man cp -rp man/mount.efs.8 ${PACKAGE_NAME}/man @@ -45,7 +47,8 @@ rpm-only: mkdir -p $(BUILD_DIR)/{SPECS,COORD_SOURCES,DATA_SOURCES,BUILD,RPMS,SOURCES,SRPMS} cp $(SPECFILE) $(BUILD_DIR)/SPECS cp $(SOURCE_TARBALL) $(BUILD_DIR)/SOURCES - rpmbuild -ba --define "_topdir `pwd`/$(BUILD_DIR)" $(BUILD_DIR)/SPECS/$(SPECFILE) + cp config.toml $(BUILD_DIR)/SOURCES + rpmbuild -ba --define "_topdir `pwd`/$(BUILD_DIR)" --define "include_vendor_tarball false" $(BUILD_DIR)/SPECS/$(SPECFILE) cp $(BUILD_DIR)/RPMS/*/*rpm build .PHONY: rpm diff --git a/README.md b/README.md index 88691852..dac59fae 100644 --- a/README.md +++ b/README.md @@ -8,21 +8,17 @@ The `efs-utils` package has been verified against the following Linux distributi | Distribution | Package Type | `init` System | |----------------------| ----- | --------- | -| Amazon Linux 2017.09 | `rpm` | `upstart` | | Amazon Linux 2 | `rpm` | `systemd` | | Amazon Linux 2023 | `rpm` | `systemd` | -| CentOS 7 | `rpm` | `systemd` | | CentOS 8 | `rpm` | `systemd` | | RHEL 7 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | -| Fedora 28 | `rpm` | `systemd` | | Fedora 29 | `rpm` | `systemd` | | Fedora 30 | `rpm` | `systemd` | | Fedora 31 | `rpm` | `systemd` | | Fedora 32 | `rpm` | `systemd` | -| Debian 9 | `deb` | `systemd` | -| Debian 10 | `deb` | `systemd` | +| Debian 11 | `deb` | `systemd` | | Ubuntu 16.04 | `deb` | `systemd` | | Ubuntu 18.04 | `deb` | `systemd` | | Ubuntu 20.04 | `deb` | `systemd` | @@ -55,6 +51,7 @@ The `efs-utils` package has been verified against the following MacOS distributi - [MacOS](#macos) - [amazon-efs-mount-watchdog](#amazon-efs-mount-watchdog) - [Troubleshooting](#troubleshooting) + - [Upgrading to efs-utils v2.0.0](#upgrading-from-efs-utils-v1-to-v2) - [Upgrading stunnel for RHEL/CentOS](#upgrading-stunnel-for-rhelcentos) - [Upgrading stunnel for SLES12](#upgrading-stunnel-for-sles12) - [Upgrading stunnel for MacOS](#upgrading-stunnel-for-macos) @@ -81,9 +78,11 @@ The `efs-utils` package has been verified against the following MacOS distributi ## Prerequisites * `nfs-utils` (RHEL/CentOS/Amazon Linux/Fedora) or `nfs-common` (Debian/Ubuntu) -* OpenSSL 1.0.2+ +* OpenSSL-devel 1.0.2+ * Python 3.4+ * `stunnel` 4.56+ +- `rust` 1.68+ +- `cargo` ## Optional @@ -93,7 +92,7 @@ The `efs-utils` package has been verified against the following MacOS distributi ### On Amazon Linux distributions -For those using Amazon Linux or Amazon Linux 2, the easiest way to install `efs-utils` is from Amazon's repositories: +For those using Amazon Linux, the easiest way to install `efs-utils` is from Amazon's repositories: ```bash $ sudo yum -y install amazon-efs-utils @@ -121,7 +120,7 @@ Other distributions require building the package from source and installing it. If the distribution is not OpenSUSE or SLES ```bash -$ sudo yum -y install git rpm-build make +$ sudo yum -y install git rpm-build make rust cargo openssl-devel $ git clone https://github.com/aws/efs-utils $ cd efs-utils $ make rpm @@ -132,7 +131,7 @@ Otherwise ```bash $ sudo zypper refresh -$ sudo zypper install -y git rpm-build make +$ sudo zypper install -y git rpm-build make rust cargo openssl-devel $ git clone https://github.com/aws/efs-utils $ cd efs-utils $ make rpm @@ -152,13 +151,20 @@ sudo zypper refresh ```bash $ sudo apt-get update -$ sudo apt-get -y install git binutils +$ sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev $ git clone https://github.com/aws/efs-utils $ cd efs-utils $ ./build-deb.sh $ sudo apt-get -y install ./build/amazon-efs-utils*deb ``` +If your Debian distribution doesn't provide a rust or cargo package, or your distribution provides versions +that are older than 1.68, then you can install rust and cargo through rustup: +```bash +$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +. "$HOME/.cargo/env" +``` + ### On MacOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution For EC2 Mac instances running macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the @@ -194,8 +200,10 @@ $ make test ## Usage ### mount.efs +`efs-utils` includes a mount helper utility, `mount.efs`, that simplifies and improves the performance of EFS file system mounts. -`efs-utils` includes a mount helper utility to simplify mounting and using EFS file systems. +`mount.efs` launches a proxy process that forwards NFS traffic from the kernel's NFS client to EFS. +This proxy is responsible for TLS encryption, and for providing improved throughput performance. To mount with the recommended default options, simply run: @@ -318,6 +326,16 @@ You can also enable stunnel debug logs with Make sure to perform the failed mount again after running the prior commands before pulling the logs. +## Upgrading from efs-utils v1 to v2 +Efs-utils v2.0.0 replaces stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. +Efs-proxy lays the foundation for upcoming feature launches at EFS. + +To utilize the improved performance benefits of efs-proxy, you must re-mount any existing mounts. + +Efs-proxy is not compatible with OCSP or Mac clients. In these cases, efs-utils will automatically revert back to using stunnel. + +If you are building efs-utils v2.0.0 from source, then you need Rust and Cargo >= 1.68. + ## Upgrading stunnel for RHEL/CentOS By default, when using the EFS mount helper with TLS, it enforces certificate hostname checking. The EFS mount helper uses the `stunnel` program for its TLS functionality. Please note that some versions of Linux do not include a version of `stunnel` that supports TLS features by default. When using such a Linux version, mounting an EFS file system using TLS will fail. diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index ff6eabdd..658ad89b 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -6,6 +6,8 @@ # the License. # +%bcond_without check + %if 0%{?amzn1} %global python_requires python36 %else @@ -34,8 +36,13 @@ %global efs_bindir /sbin %endif +%global proxy_name efs-proxy +%global proxy_version 2.0.0 + +%{?!include_vendor_tarball:%define include_vendor_tarball true} + Name : amazon-efs-utils -Version : 1.36.0 +Version : 2.0.0 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -43,8 +50,7 @@ Group : Amazon/Tools License : MIT URL : https://aws.amazon.com/efs - -BuildArch : noarch +BuildArchitectures: x86_64 aarch64 Requires : nfs-utils %if 0%{?amzn2} @@ -67,13 +73,32 @@ Requires(preun) : /sbin/service /sbin/chkconfig Requires(postun) : /sbin/service %endif -Source : %{name}.tar.gz +BuildRequires : cargo rust +BuildRequires: openssl-devel + +Source0 : %{name}.tar.gz +%if "%{include_vendor_tarball}" == "true" +Source1 : %{proxy_name}-%{proxy_version}-vendor.tar.xz +Source2 : config.toml +%endif %description This package provides utilities for simplifying the use of EFS file systems +%global debug_package %{nil} + %prep %setup -n %{name} +mkdir -p %{_builddir}/%{name}/src/proxy/.cargo +%if "%{include_vendor_tarball}" == "true" +cp %{SOURCE2} %{_builddir}/%{name}/src/proxy/.cargo/ +tar xf %{SOURCE1} +mv vendor %{_builddir}/%{name}/src/proxy/ +%endif + +%build +cd %{_builddir}/%{name}/src/proxy +cargo build --release --manifest-path %{_builddir}/%{name}/src/proxy/Cargo.toml %install mkdir -p %{buildroot}%{_sysconfdir}/amazon/efs @@ -95,6 +120,7 @@ install -p -m 444 %{_builddir}/%{name}/dist/efs-utils.crt %{buildroot}%{_sysconf install -p -m 755 %{_builddir}/%{name}/src/mount_efs/__init__.py %{buildroot}%{efs_bindir}/mount.efs install -p -m 755 %{_builddir}/%{name}/src/watchdog/__init__.py %{buildroot}%{_bindir}/amazon-efs-mount-watchdog install -p -m 644 %{_builddir}/%{name}/man/mount.efs.8 %{buildroot}%{_mandir}/man8 +install -p -m 755 %{_builddir}/%{name}/src/proxy/target/release/efs-proxy %{buildroot}%{efs_bindir}/efs-proxy %files %defattr(-,root,root,-) @@ -105,6 +131,7 @@ install -p -m 644 %{_builddir}/%{name}/man/mount.efs.8 %{buildroot}%{_mandir}/ma %endif %{_sysconfdir}/amazon/efs/efs-utils.crt %{efs_bindir}/mount.efs +%{efs_bindir}/efs-proxy %{_bindir}/amazon-efs-mount-watchdog /var/log/amazon %{_mandir}/man8/mount.efs.8.gz @@ -138,6 +165,9 @@ fi %clean %changelog +* Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 +- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. + * Mon Mar 18 2024 Sean Zatz - 1.36.0 - Support new mount option: crossaccount, conduct cross account mounts via ip address. Use client AZ-ID to choose mount target. diff --git a/build-deb.sh b/build-deb.sh index f2e499d6..d97ab37a 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=1.36.0 +VERSION=2.0.0 RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release @@ -28,12 +28,18 @@ mkdir -p ${BUILD_ROOT}/usr/bin mkdir -p ${BUILD_ROOT}/var/log/amazon/efs mkdir -p ${BUILD_ROOT}/usr/share/man/man8 +echo 'Building efs-proxy' +cd src/proxy +cargo build --release --manifest-path ${BASE_DIR}/src/proxy/Cargo.toml +cd ${BASE_DIR} + echo 'Copying application files' install -p -m 644 dist/amazon-efs-mount-watchdog.conf ${BUILD_ROOT}/etc/init install -p -m 644 dist/amazon-efs-mount-watchdog.service ${BUILD_ROOT}/etc/systemd/system install -p -m 444 dist/efs-utils.crt ${BUILD_ROOT}/etc/amazon/efs install -p -m 644 dist/efs-utils.conf ${BUILD_ROOT}/etc/amazon/efs install -p -m 755 src/mount_efs/__init__.py ${BUILD_ROOT}/sbin/mount.efs +install -p -m 755 src/proxy/target/release/efs-proxy ${BUILD_ROOT}/usr/bin/efs-proxy install -p -m 755 src/watchdog/__init__.py ${BUILD_ROOT}/usr/bin/amazon-efs-mount-watchdog echo 'Copying install scripts' diff --git a/config.ini b/config.ini index 49ba7066..6944864d 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=1.36.0 +version=2.0.0 release=1 diff --git a/config.toml b/config.toml new file mode 100644 index 00000000..ba8862ec --- /dev/null +++ b/config.toml @@ -0,0 +1,12 @@ +[source] + +# Under the `source` table are a number of other tables whose keys are a +# name for the relevant source. For example this section defines a new +# source, called `my-vendor-source`, which comes from a directory +# located at `vendor` relative to the directory containing this `.cargo/config.toml` +# file +[source.my-vendor-source] +directory = "vendor" + +[source.crates-io] +replace-with = "my-vendor-source" \ No newline at end of file diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index d40419ca..d7d71c79 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 1.36.0 +Version: 2.0.0 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/dist/efs-utils.crt b/dist/efs-utils.crt index fdcc5585..11caee70 100644 --- a/dist/efs-utils.crt +++ b/dist/efs-utils.crt @@ -1,12 +1,3 @@ -# -# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. -# -# Licensed under the MIT License. See the LICENSE accompanying this file -# for the specific language governing permissions and limitations under -# the License. -# - -# Amazon Root CA 1 -----BEGIN CERTIFICATE----- MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 @@ -28,7 +19,6 @@ o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU rqXRfboQnoZsG4q5WTP468SQvvG5 -----END CERTIFICATE----- -# Amazon Root CA 2 -----BEGIN CERTIFICATE----- MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6 @@ -61,7 +51,6 @@ n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE 4PsJYGw= -----END CERTIFICATE----- -# Amazon Root CA 3 -----BEGIN CERTIFICATE----- MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5 MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g @@ -75,7 +64,6 @@ BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM YyRIHN8wfdVoOw== -----END CERTIFICATE----- -# Amazon Root CA 4 -----BEGIN CERTIFICATE----- MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5 MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g diff --git a/man/mount.efs.8 b/man/mount.efs.8 index 3a18f30a..f962fd97 100644 --- a/man/mount.efs.8 +++ b/man/mount.efs.8 @@ -7,11 +7,14 @@ .SH "DESCRIPTION" .sp \fBmount\&.efs\fR is part of the \fBamazon\-efs\-utils\fR \ -package, which simplifies using EFS file systems\&. +package. It improves mount performance and simplifies using EFS file systems\&. .sp \fBmount\&.efs\fR is meant to be used through the \ \fBmount\fR(8) command for mounting EFS file systems\&. .sp +\fBmount\&.efs\fR launches a proxy process that forwards NFS traffic from the kernel's NFS client to EFS. \ +This proxy is responsible for TLS encryption, and for providing improved throughput performance. +.sp \fIfs-id-or-dns-name\fR has to be of one of the following \ two forms: .P @@ -77,8 +80,9 @@ this option is by default passed and the EFS file system is mounted over TLS\&. Mounts the EFS file system without TLS, applies for Mac distributions only\&. .TP \fBtlsport=\fR\fIn\fR -Configure the TLS relay to listen on the specified port\&. By default, the \ -tlsport is choosing randomly from port range defined in the config file located \ +Configures the proxy process to listen for connections from the NFS client on the specified port\&. This is applicable to both non-tls and tls mounts. + By default, the \ +tlsport is chosen randomly from port range defined in the config file located \ at \fI/etc/amazon/efs/efs\-utils\&.conf\&\fR. .TP \fBverify=\fR\fIn\fR @@ -88,7 +92,9 @@ more information, see \fBstunnel(8)\fR\&. \fBocsp / noocsp\fR Selects whether to perform OCSP validation on TLS certificates\&, \ overriding /etc/amazon/efs/efs-utils.conf. By default OCSP is disabled. \ -For more information, see \fBstunnel(8)\fR\&. +For more information, see \fBstunnel(8)\fR\&. \ +The ocsp mount option is incompatible with the efs-proxy process, and will revert efs-utils \ +to the legacy "stunnel" mode, which does not support improved per-client throughput performance. .TP \fBiam\fR Use the system's IAM identity to authenticate with EFS. The mount helper will try \ @@ -132,6 +138,12 @@ Use the port 2049 to bypass portmapper daemon on EC2 Mac instances running macOS .TP \fBmounttargetip\fR Mount the EFS file system to the specified mount target ip address\&. +.TP +\fBstunnel\fR +Forward NFS traffic from the local NFS client to EFS using stunnel instead of efs-proxy. +This will enable compatibility with the ocsp mount option, but will not +deliver the increased throughput performance provided by efs-proxy. \ +This option is enabled by default for Mac clients. .if n \{\ .RE .\} diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index b6a91462..c8b7566c 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "1.36.0" +VERSION = "2.0.0" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -222,9 +222,13 @@ DEFAULT_STUNNEL_VERIFY_LEVEL = 2 DEFAULT_STUNNEL_CAFILE = "/etc/amazon/efs/efs-utils.crt" +LEGACY_STUNNEL_MOUNT_OPTION = "stunnel" + NOT_BEFORE_MINS = 15 NOT_AFTER_HOURS = 3 +EFS_PROXY_TLS_OPTION = "--tls" + EFS_ONLY_OPTIONS = [ "accesspoint", "awscredsuri", @@ -244,6 +248,7 @@ "jwtpath", "fsap", "crossaccount", + LEGACY_STUNNEL_MOUNT_OPTION, ] UNSUPPORTED_OPTIONS = ["capath"] @@ -1039,6 +1044,11 @@ def get_resp_obj(request_resp, url, unsuccessful_resp): def parse_options(options): + """ + Parses a comma delineated string of key=value options (e.g. 'opt1,opt2=val'). + Returns a dictionary of key,value pairs, where value = None if + it was not provided. + """ opts = {} for o in options.split(","): if "=" in o: @@ -1172,7 +1182,8 @@ def serialize_stunnel_config(config, header=None): return lines -def add_stunnel_ca_options(efs_config, config, options, region): +# These options are used by both stunnel and efs-proxy for TLS mounts +def add_tunnel_ca_options(efs_config, config, options, region): if "cafile" in options: stunnel_cafile = options["cafile"] else: @@ -1257,6 +1268,11 @@ def _stunnel_bin(): return find_command_path("stunnel", installation_message) +def _efs_proxy_bin(): + error_message = "The efs-proxy binary is packaged with efs-utils. It was deleted or not installed correctly." + return find_command_path("efs-proxy", error_message) + + def find_command_path(command, install_method): # If not running on macOS, use linux paths if not check_if_platform_is_mac(): @@ -1314,6 +1330,7 @@ def write_stunnel_config_file( log_dir=LOG_DIR, cert_details=None, fallback_ip_address=None, + efs_proxy_enabled=True, ): """ Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to @@ -1326,12 +1343,13 @@ def write_stunnel_config_file( system_release_version = get_system_release_version() global_config = dict(STUNNEL_GLOBAL_CONFIG) - if is_stunnel_option_supported( + if not efs_proxy_enabled and is_stunnel_option_supported( stunnel_options, b"foreground", b"quiet", emit_warning_log=False ): # Do not log to stderr of subprocess in addition to the destinations specified with syslog and output. # Only support in stunnel version 5.25+. global_config["foreground"] = "quiet" + if any( release in system_release_version for release in SKIP_NO_SO_BINDTODEVICE_RELEASES @@ -1350,12 +1368,17 @@ def write_stunnel_config_file( CONFIG_SECTION, "stunnel_logs_file" ).replace("{fs_id}", fs_id) else: + proxy_log_file = ( + "%s.efs-proxy.log" if efs_proxy_enabled else "%s.stunnel.log" + ) global_config["output"] = os.path.join( - log_dir, "%s.stunnel.log" % mount_filename + log_dir, proxy_log_file % mount_filename ) + global_config["pid"] = os.path.join( state_file_dir, mount_filename + "+", "stunnel.pid" ) + if get_fips_config(config): global_config["fips"] = "yes" @@ -1367,9 +1390,11 @@ def write_stunnel_config_file( else: efs_config["connect"] = efs_config["connect"] % dns_name - efs_config["verify"] = verify_level - if verify_level > 0: - add_stunnel_ca_options(efs_config, config, options, region) + # Verify level is only valid for tls mounts + if (verify_level is not None) and tls_enabled(options): + efs_config["verify"] = verify_level + if verify_level > 0: + add_tunnel_ca_options(efs_config, config, options, region) if cert_details: efs_config["cert"] = cert_details["certificate"] @@ -1381,27 +1406,30 @@ def write_stunnel_config_file( % (CONFIG_FILE, "https://docs.aws.amazon.com/console/efs/troubleshooting-tls") ) - if get_boolean_config_item_value( - config, CONFIG_SECTION, "stunnel_check_cert_hostname", default_value=True - ): - if is_stunnel_option_supported(stunnel_options, b"checkHost"): - # Stunnel checkHost option checks if the specified DNS host name or wildcard matches any of the provider in peer - # certificate's CN fields, after introducing the AZ field in dns name, the host name in the stunnel config file - # is not valid, remove the az info there - efs_config["checkHost"] = dns_name[dns_name.index(fs_id) :] - else: - fatal_error(tls_controls_message % "stunnel_check_cert_hostname") + if tls_enabled(options): + # These config options are not applicable to non-tls mounts with efs-proxy + if get_boolean_config_item_value( + config, CONFIG_SECTION, "stunnel_check_cert_hostname", default_value=True + ): + if (not efs_proxy_enabled) and ( + not is_stunnel_option_supported(stunnel_options, b"checkHost") + ): + fatal_error(tls_controls_message % "stunnel_check_cert_hostname") + else: + efs_config["checkHost"] = dns_name[dns_name.index(fs_id) :] - # Only use the config setting if the override is not set - if ocsp_enabled: - if is_stunnel_option_supported(stunnel_options, b"OCSPaia"): - efs_config["OCSPaia"] = "yes" - else: - fatal_error(tls_controls_message % "stunnel_check_cert_validity") + # Only use the config setting if the override is not set + if not efs_proxy_enabled and ocsp_enabled: + if is_stunnel_option_supported(stunnel_options, b"OCSPaia"): + efs_config["OCSPaia"] = "yes" + else: + fatal_error(tls_controls_message % "stunnel_check_cert_validity") # If the stunnel libwrap option is supported, we disable the usage of /etc/hosts.allow and /etc/hosts.deny by # setting the option to no - if is_stunnel_option_supported(stunnel_options, b"libwrap"): + if not efs_proxy_enabled and is_stunnel_option_supported( + stunnel_options, b"libwrap" + ): efs_config["libwrap"] = "no" stunnel_config = "\n".join( @@ -1420,7 +1448,7 @@ def write_stunnel_config_file( return stunnel_config_file -def write_tls_tunnel_state_file( +def write_tunnel_state_file( fs_id, mountpoint, tls_port, @@ -1433,6 +1461,8 @@ def write_tls_tunnel_state_file( """ Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a non-temporary version following a successful mount. + + The "tunnel" here refers to efs-proxy, or stunnel. """ state_file = "~" + get_mount_specific_filename(fs_id, mountpoint, tls_port) @@ -1453,19 +1483,19 @@ def write_tls_tunnel_state_file( return state_file -def rewrite_tls_tunnel_state_file(state, state_file_dir, state_file): +def rewrite_tunnel_state_file(state, state_file_dir, state_file): with open(os.path.join(state_file_dir, state_file), "w") as f: json.dump(state, f) return state_file -def update_tls_tunnel_temp_state_file_with_tunnel_pid( +def update_tunnel_temp_state_file_with_tunnel_pid( temp_tls_state_file, state_file_dir, stunnel_pid ): with open(os.path.join(state_file_dir, temp_tls_state_file), "r") as f: state = json.load(f) state["pid"] = stunnel_pid - temp_tls_state_file = rewrite_tls_tunnel_state_file( + temp_tls_state_file = rewrite_tunnel_state_file( state, state_file_dir, temp_tls_state_file ) return temp_tls_state_file @@ -1476,9 +1506,9 @@ def test_tunnel_process(tunnel_proc, fs_id): if tunnel_proc.returncode is not None: _, err = tunnel_proc.communicate() fatal_error( - "Failed to initialize TLS tunnel for %s, please check mount.log for the failure reason." + "Failed to initialize tunnel for %s, please check mount.log for the failure reason." % fs_id, - 'Failed to start TLS tunnel (errno=%d), stderr="%s". If the stderr is lacking enough details, please ' + 'Failed to start tunnel (errno=%d), stderr="%s". If the stderr is lacking enough details, please ' "enable stunnel debug log in efs-utils config file and retry the mount to capture more info." % (tunnel_proc.returncode, err.strip()), ) @@ -1642,8 +1672,12 @@ def get_tls_port_from_sock(tls_port_sock): return tls_port_sock.getsockname()[1] +def tls_enabled(options): + return "tls" in options + + @contextmanager -def bootstrap_tls( +def bootstrap_proxy( config, init_system, dns_name, @@ -1652,85 +1686,115 @@ def bootstrap_tls( options, state_file_dir=STATE_FILE_DIR, fallback_ip_address=None, + efs_proxy_enabled=True, ): - tls_port_sock = choose_tls_port_and_get_bind_sock(config, options, state_file_dir) - tls_port = get_tls_port_from_sock(tls_port_sock) + """ + Generates a TLS private key and client-side certificate, a stunnel configuration file, and a state file + that is used to pass information to the Watchdog process. + + This function will spin up a stunnel or efs-proxy process, and pass it the stunnel configuration file. + The client-side certificate generated by this function contains IAM information that can be used by the EFS backend to enforce + file system policies. + + The state file passes information about the mount and the associated proxy process (whether that's stunnel or efs-proxy) to + the Watchdog daemon service. This allows Watchdog to monitor the proxy process's health. + + This function will yield a handle on the proxy process, whether it's efs-proxy or stunnel. + """ + + proxy_listen_sock = choose_tls_port_and_get_bind_sock( + config, options, state_file_dir + ) + proxy_listen_port = get_tls_port_from_sock(proxy_listen_sock) try: # override the tlsport option so that we can later override the port the NFS client uses to connect to stunnel. # if the user has specified tlsport=X at the command line this will just re-set tlsport to X. - options["tlsport"] = tls_port + options["tlsport"] = proxy_listen_port use_iam = "iam" in options ap_id = options.get("accesspoint") - cert_details = {} + cert_details = None security_credentials = None client_info = get_client_info(config) region = get_target_region(config) - if use_iam: - aws_creds_uri = options.get("awscredsuri") - role_arn = options.get("rolearn") - jwt_path = options.get("jwtpath") - if aws_creds_uri: - kwargs = {"aws_creds_uri": aws_creds_uri} - elif role_arn and jwt_path: - kwargs = {"role_arn": role_arn, "jwt_path": jwt_path} - else: - kwargs = {"awsprofile": get_aws_profile(options, use_iam)} - - security_credentials, credentials_source = get_aws_security_credentials( - config, use_iam, region, **kwargs - ) + if tls_enabled(options): + cert_details = {} + # IAM can only be used for tls mounts + if use_iam: + aws_creds_uri = options.get("awscredsuri") + role_arn = options.get("rolearn") + jwt_path = options.get("jwtpath") + if aws_creds_uri: + kwargs = {"aws_creds_uri": aws_creds_uri} + elif role_arn and jwt_path: + kwargs = {"role_arn": role_arn, "jwt_path": jwt_path} + else: + kwargs = {"awsprofile": get_aws_profile(options, use_iam)} - if credentials_source: - cert_details["awsCredentialsMethod"] = credentials_source - logging.debug( - "AWS credentials source used for IAM authentication: ", - credentials_source, + security_credentials, credentials_source = get_aws_security_credentials( + config, use_iam, region, **kwargs ) - if ap_id: - cert_details["accessPoint"] = ap_id + if credentials_source: + cert_details["awsCredentialsMethod"] = credentials_source + logging.debug( + "AWS credentials source used for IAM authentication: ", + credentials_source, + ) - # additional symbol appended to avoid naming collisions - cert_details["mountStateDir"] = ( - get_mount_specific_filename(fs_id, mountpoint, tls_port) + "+" - ) - # common name for certificate signing request is max 64 characters - cert_details["commonName"] = socket.gethostname()[0:64] - cert_details["region"] = region - cert_details["certificateCreationTime"] = create_certificate( - config, - cert_details["mountStateDir"], - cert_details["commonName"], - cert_details["region"], - fs_id, - security_credentials, - ap_id, - client_info, - base_path=state_file_dir, - ) - cert_details["certificate"] = os.path.join( - state_file_dir, cert_details["mountStateDir"], "certificate.pem" - ) - cert_details["privateKey"] = get_private_key_path() - cert_details["fsId"] = fs_id + # Access points must be mounted over TLS + if ap_id: + cert_details["accessPoint"] = ap_id + + # additional symbol appended to avoid naming collisions + cert_details["mountStateDir"] = ( + get_mount_specific_filename(fs_id, mountpoint, proxy_listen_port) + "+" + ) + # common name for certificate signing request is max 64 characters + cert_details["commonName"] = socket.gethostname()[0:64] + cert_details["region"] = region + cert_details["certificateCreationTime"] = create_certificate( + config, + cert_details["mountStateDir"], + cert_details["commonName"], + cert_details["region"], + fs_id, + security_credentials, + ap_id, + client_info, + base_path=state_file_dir, + ) + cert_details["certificate"] = os.path.join( + state_file_dir, cert_details["mountStateDir"], "certificate.pem" + ) + cert_details["privateKey"] = get_private_key_path() + cert_details["fsId"] = fs_id if not os.path.exists(state_file_dir): create_required_directory(config, state_file_dir) start_watchdog(init_system) - verify_level = int(options.get("verify", DEFAULT_STUNNEL_VERIFY_LEVEL)) + verify_level = ( + int(options.get("verify", DEFAULT_STUNNEL_VERIFY_LEVEL)) + if tls_enabled(options) + else None + ) + ocsp_enabled = is_ocsp_enabled(config, options) + if ocsp_enabled: + assert ( + not efs_proxy_enabled + ), "OCSP is not supported by efs-proxy, and efs-utils failed to revert to stunnel-mode." stunnel_config_file = write_stunnel_config_file( config, state_file_dir, fs_id, mountpoint, - tls_port, + proxy_listen_port, dns_name, verify_level, ocsp_enabled, @@ -1738,16 +1802,31 @@ def bootstrap_tls( region, cert_details=cert_details, fallback_ip_address=fallback_ip_address, - ) - tunnel_args = [_stunnel_bin(), stunnel_config_file] + efs_proxy_enabled=efs_proxy_enabled, + ) + if efs_proxy_enabled: + if "tls" in options: + tunnel_args = [ + _efs_proxy_bin(), + stunnel_config_file, + EFS_PROXY_TLS_OPTION, + ] + else: + tunnel_args = [ + _efs_proxy_bin(), + stunnel_config_file, + ] + else: + tunnel_args = [_stunnel_bin(), stunnel_config_file] + if "netns" in options: tunnel_args = ["nsenter", "--net=" + options["netns"]] + tunnel_args # This temp state file is acting like a tlsport lock file, which is why pid =- 1 - temp_tls_state_file = write_tls_tunnel_state_file( + temp_tls_state_file = write_tunnel_state_file( fs_id, mountpoint, - tls_port, + proxy_listen_port, -1, tunnel_args, [stunnel_config_file], @@ -1755,13 +1834,21 @@ def bootstrap_tls( cert_details=cert_details, ) finally: - # Always close the socket we created when choosing TLS port only until now to - # 1. avoid concurrent TLS mount port collision 2. enable stunnel process to bind the port - logging.debug("Closing socket used to choose TLS port %s.", tls_port) - tls_port_sock.close() + # When choosing a TLS port for efs-proxy/stunnel to listen on, we open the port to ensure it is free. + # However, we must free it again so efs-proxy/stunnel can bind to it. We make sure to only free it after + # we write the temporary state file, which acts like a tlsport lock file. This ensures we don't encounter + # any race conditions when choosing tls ports during concurrent mounts. + logging.debug( + "Closing socket used to choose proxy listen port %s.", proxy_listen_port + ) + proxy_listen_sock.close() # launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog - logging.info('Starting TLS tunnel: "%s"', " ".join(tunnel_args)) + logging.info( + 'Starting %s: "%s"', + "efs-proxy" if efs_proxy_enabled else "stunnel", + " ".join(tunnel_args), + ) tunnel_proc = subprocess.Popen( tunnel_args, stdout=subprocess.DEVNULL, @@ -1769,9 +1856,13 @@ def bootstrap_tls( preexec_fn=os.setsid, close_fds=True, ) - logging.info("Started TLS tunnel, pid: %d", tunnel_proc.pid) + logging.info( + "Started %s, pid: %d", + "efs-proxy" if efs_proxy_enabled else "stunnel", + tunnel_proc.pid, + ) - update_tls_tunnel_temp_state_file_with_tunnel_pid( + update_tunnel_temp_state_file_with_tunnel_pid( temp_tls_state_file, state_file_dir, tunnel_proc.pid ) @@ -1784,6 +1875,8 @@ def bootstrap_tls( try: yield tunnel_proc finally: + # The caller of this function should use this function in the context of a `with` statement + # so that the state file is correctly renamed. os.rename( os.path.join(state_file_dir, temp_tls_state_file), os.path.join(state_file_dir, temp_tls_state_file[1:]), @@ -1813,7 +1906,17 @@ def check_if_nfsvers_is_compatible_with_macos(options): fatal_error("NFSv4.1 is not supported on MacOS, please switch to NFSv4.0") -def get_nfs_mount_options(options): +# Use stunnel instead of efs-proxy for tls mounts, +# and attach non-tls mounts directly to the mount target. +def legacy_stunnel_mode_enabled(options, config): + return ( + LEGACY_STUNNEL_MOUNT_OPTION in options + or check_if_platform_is_mac() + or is_ocsp_enabled(config, options) + ) + + +def get_nfs_mount_options(options, config): # If you change these options, update the man page as well at man/mount.efs.8 if "nfsvers" not in options and "vers" not in options: options["nfsvers"] = "4.1" if not check_if_platform_is_mac() else "4.0" @@ -1838,7 +1941,11 @@ def get_nfs_mount_options(options): if check_if_platform_is_mac(): options["mountport"] = "2049" - if "tls" in options: + if legacy_stunnel_mode_enabled(options, config): + # Non-tls mounts in stunnel mode should not re-map the port + if "tls" in options: + options["port"] = options["tlsport"] + else: options["port"] = options["tlsport"] def to_nfs_option(k, v): @@ -1854,12 +1961,15 @@ def to_nfs_option(k, v): def mount_nfs(config, dns_name, path, mountpoint, options, fallback_ip_address=None): - if "tls" in options: - mount_path = "127.0.0.1:%s" % path - elif fallback_ip_address: - mount_path = "%s:%s" % (fallback_ip_address, path) + if legacy_stunnel_mode_enabled(options, config): + if "tls" in options: + mount_path = "127.0.0.1:%s" % path + elif fallback_ip_address: + mount_path = "%s:%s" % (fallback_ip_address, path) + else: + mount_path = "%s:%s" % (dns_name, path) else: - mount_path = "%s:%s" % (dns_name, path) + mount_path = "127.0.0.1:%s" % path if not check_if_platform_is_mac(): command = [ @@ -1867,13 +1977,13 @@ def mount_nfs(config, dns_name, path, mountpoint, options, fallback_ip_address=N mount_path, mountpoint, "-o", - get_nfs_mount_options(options), + get_nfs_mount_options(options, config), ] else: command = [ "/sbin/mount_nfs", "-o", - get_nfs_mount_options(options), + get_nfs_mount_options(options, config), mount_path, mountpoint, ] @@ -2448,7 +2558,8 @@ def read_config(config_file=CONFIG_FILE): return p -def bootstrap_logging(config, log_dir=LOG_DIR): +# Retrieve and parse the logging level from the config file. +def get_log_level_from_config(config): raw_level = config.get(CONFIG_SECTION, "logging_level") levels = { "debug": logging.DEBUG, @@ -2465,6 +2576,26 @@ def bootstrap_logging(config, log_dir=LOG_DIR): level_error = True level = logging.INFO + return (level, raw_level, level_error) + + +# Convert the log level provided in the config into a log level string +# that is understandable by efs-proxy +def get_efs_proxy_log_level(config): + level, raw_level, level_error = get_log_level_from_config(config) + if level_error: + return "info" + + # Efs-proxy does not have a CRITICAL log level + if level == logging.CRITICAL: + return "error" + + return raw_level.lower() + + +def bootstrap_logging(config, log_dir=LOG_DIR): + level, raw_level, level_error = get_log_level_from_config(config) + max_bytes = config.getint(CONFIG_SECTION, "logging_max_bytes") file_count = config.getint(CONFIG_SECTION, "logging_file_count") @@ -3023,7 +3154,7 @@ def is_nfs_mount(mountpoint): return False -def mount_tls( +def mount_with_proxy( config, init_system, dns_name, @@ -3033,6 +3164,11 @@ def mount_tls( options, fallback_ip_address=None, ): + """ + This function is responsible for launching a efs-proxy process and attaching a NFS mount to that process + over the loopback interface. Efs-proxy is responsible for forwarding NFS operations to EFS. + When the legacy 'stunnel' mount option is used, this function will launch a stunnel process instead of efs-proxy. + """ if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint): sys.stdout.write( "%s is already mounted, please run 'mount' command to verify\n" % mountpoint @@ -3040,7 +3176,10 @@ def mount_tls( logging.warning("%s is already mounted, mount aborted" % mountpoint) return - with bootstrap_tls( + efs_proxy_enabled = not legacy_stunnel_mode_enabled(options, config) + logging.debug("mount_with_proxy: efs_proxy_enabled = %s", efs_proxy_enabled) + + with bootstrap_proxy( config, init_system, dns_name, @@ -3048,6 +3187,7 @@ def mount_tls( mountpoint, options, fallback_ip_address=fallback_ip_address, + efs_proxy_enabled=efs_proxy_enabled, ) as tunnel_proc: mount_completed = threading.Event() t = threading.Thread( @@ -3907,22 +4047,22 @@ def main(): if check_if_platform_is_mac() and "notls" not in options: options["tls"] = None - if "tls" in options: - mount_tls( + if "tls" not in options and legacy_stunnel_mode_enabled(options, config): + mount_nfs( config, - init_system, dns_name, path, - fs_id, mountpoint, options, fallback_ip_address=fallback_ip_address, ) else: - mount_nfs( + mount_with_proxy( config, + init_system, dns_name, path, + fs_id, mountpoint, options, fallback_ip_address=fallback_ip_address, diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml new file mode 100644 index 00000000..4bbf3042 --- /dev/null +++ b/src/proxy/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "efs-proxy" +edition = "2021" +build = "build.rs" +# The version of efs-proxy is tied to efs-utils. +version = "2.0.0" +publish = false + +[dependencies] +anyhow = "1.0.72" +async-trait = "0.1" +bytes = { version = "1.4.0" } +chrono = "0.4" +clap = { version = "=4.0.0", features = ["derive"] } +fern = "0.6" +futures = "0.3" +log = "0.4" +log4rs = { version = "0", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"]} +nix = { version = "0.26.2", features = ["signal"]} +onc-rpc = "0.2.3" +rand = "0.8.5" +s2n-tls = "0.0" +s2n-tls-tokio = "0.0" +s2n-tls-sys = "0.0" +serde = {version="1.0.175",features=["derive"]} +serde_ini = "0.2.0" +thiserror = "1.0.44" +tokio = { version = "1.29.0", features = ["full"] } +tokio-util = "0.7.8" +uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics"]} +xdr-codec = "0.4.4" + +[dev-dependencies] +test-case = "*" +tokio = { version = "1.29.0", features = ["test-util"] } + +[build-dependencies] +xdrgen = "0.4.4" \ No newline at end of file diff --git a/src/proxy/build.rs b/src/proxy/build.rs new file mode 100644 index 00000000..71e8d0da --- /dev/null +++ b/src/proxy/build.rs @@ -0,0 +1,5 @@ +extern crate xdrgen; + +fn main() { + xdrgen::compile("src/efs_prot.x").expect("xdrgen efs_prot.x failed"); +} diff --git a/src/proxy/src/config_parser.rs b/src/proxy/src/config_parser.rs new file mode 100644 index 00000000..0e26757b --- /dev/null +++ b/src/proxy/src/config_parser.rs @@ -0,0 +1,211 @@ +use log::LevelFilter; +use serde::{Deserialize, Serialize}; +use std::{error::Error, path::Path, str::FromStr}; + +const DEFAULT_LOG_LEVEL: LevelFilter = LevelFilter::Warn; + +fn default_log_level() -> String { + DEFAULT_LOG_LEVEL.to_string() +} + +fn deserialize_bool<'de, D>(deserializer: D) -> Result +where + D: serde::Deserializer<'de>, +{ + let s = String::deserialize(deserializer)?; + match s.to_lowercase().as_str() { + "true" | "yes" | "1" => Ok(true), + "false" | "no" | "0" => Ok(false), + _ => Err(serde::de::Error::custom(format!("Invalid value: {}", s))), + } +} + +#[derive(Default, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct ProxyConfig { + #[serde(alias = "fips", deserialize_with = "deserialize_bool")] + pub fips: bool, + + /// Logging level. Values should correspond to the log::LevelFilter enum. + #[serde(alias = "debug", default = "default_log_level")] + pub debug: String, + + /// Output path for log files. Logging is disabled if this value is not provided. + #[serde(alias = "output")] + pub output: Option, + + /// The proxy process is responsible for writing it's PID into this file so that the Watchdog + /// process can monitor it + #[serde(alias = "pid")] + pub pid_file_path: String, + + /// This nested structure is required for backwards compatibility + #[serde(alias = "efs")] + pub nested_config: EfsConfig, +} + +impl FromStr for ProxyConfig { + type Err = serde_ini::de::Error; + + fn from_str(s: &str) -> Result { + serde_ini::from_str(s) + } +} + +impl ProxyConfig { + pub fn from_path(config_path: &Path) -> Result> { + let config_string = std::fs::read_to_string(config_path)?; + let config = ProxyConfig::from_str(&config_string)?; + Ok(config) + } +} + +#[derive(Default, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct EfsConfig { + /// The mount target address - DNS name or IP address + #[serde(alias = "connect")] + pub mount_target_addr: String, + + /// Listen for and accept connections on the specified host:port + #[serde(alias = "accept")] + pub listen_addr: String, + + /// File path of the file that contains the client-side certificate and public key + #[serde(alias = "cert", default)] + pub client_cert_pem_file: String, + + /// File path of the file that contains the client private key + #[serde(alias = "key", default)] + pub client_private_key_pem_file: String, + + /// The hostname that is expected to be on the TLS certificate that the remote server presents + #[serde(alias = "checkHost", default)] + pub expected_server_hostname_tls: String, + + /// File path of the certificate authority file. + /// This is used to verify the EFS server-side TLS certificate. + #[serde(alias = "CAfile", default)] + pub ca_file: String, +} + +#[cfg(test)] +pub mod tests { + use super::*; + use std::{path::Path, string::String}; + + pub static TEST_CONFIG_PATH: &str = "tests/certs/test_config.ini"; + + pub fn get_test_config() -> ProxyConfig { + ProxyConfig::from_path(&Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") + } + + #[test] + fn test_read_config_from_file() { + assert!(ProxyConfig::from_path(&Path::new(TEST_CONFIG_PATH)).is_ok()); + } + + #[test] + fn test_parse_config() { + let config_string = r#"fips = yes +foreground = quiet +socket = l:SO_REUSEADDR=yes +socket = a:SO_BINDTODEVICE=lo +debug = debug +output = /var/log/amazon/efs/fs-12341234.home.ec2-user.efs.21036.efs-proxy.log +pid = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid +port = 8081 +initial_partition_ip = 127.0.0.1:2049 + +[efs] +accept = 127.0.0.1:21036 +connect = fs-12341234.efs.us-east-1.amazonaws.com:2049 +sslVersion = TLSv1.2 +renegotiation = no +TIMEOUTbusy = 20 +TIMEOUTclose = 0 +TIMEOUTidle = 70 +delay = yes +verify = 2 +CAfile = /etc/amazon/efs/efs-utils.crt +cert = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem +key = /etc/amazon/efs/privateKey.pem +checkHost = fs-12341234.efs.us-east-1.amazonaws.com +"#; + + let result_config = ProxyConfig::from_str(&config_string).unwrap(); + let expected_proxy_config = ProxyConfig { + fips: true, + pid_file_path: String::from( + "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid", + ), + debug: LevelFilter::Debug.to_string().to_ascii_lowercase(), + output: Some(String::from( + "/var/log/amazon/efs/fs-12341234.home.ec2-user.efs.21036.efs-proxy.log", + )), + nested_config: EfsConfig { + listen_addr: String::from("127.0.0.1:21036"), + mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), + ca_file: String::from("/etc/amazon/efs/efs-utils.crt"), + client_cert_pem_file: String::from( + "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem", + ), + client_private_key_pem_file: String::from("/etc/amazon/efs/privateKey.pem"), + expected_server_hostname_tls: String::from( + "fs-12341234.efs.us-east-1.amazonaws.com", + ), + }, + }; + + assert_eq!(result_config, expected_proxy_config); + } + + #[test] + fn test_parse_config_fips_disabled() { + let config_string = r#"fips = no +foreground = quiet +socket = l:SO_REUSEADDR=yes +socket = a:SO_BINDTODEVICE=lo +pid = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid +port = 8081 +initial_partition_ip = 127.0.0.1:2049 + +[efs] +accept = 127.0.0.1:21036 +connect = fs-12341234.efs.us-east-1.amazonaws.com:2049 +sslVersion = TLSv1.2 +renegotiation = no +TIMEOUTbusy = 20 +TIMEOUTclose = 0 +TIMEOUTidle = 70 +delay = yes +verify = 2 +CAfile = /etc/amazon/efs/efs-utils.crt +cert = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem +key = /etc/amazon/efs/privateKey.pem +checkHost = fs-12341234.efs.us-east-1.amazonaws.com +"#; + + let result_config = ProxyConfig::from_str(&config_string).unwrap(); + let expected_proxy_config = ProxyConfig { + fips: false, + pid_file_path: String::from( + "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid", + ), + debug: DEFAULT_LOG_LEVEL.to_string(), + output: None, + nested_config: EfsConfig { + listen_addr: String::from("127.0.0.1:21036"), + mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), + ca_file: String::from("/etc/amazon/efs/efs-utils.crt"), + client_cert_pem_file: String::from( + "/var/run/efs/fs-12341234.home.ec2-user.efs.21036+/certificate.pem", + ), + client_private_key_pem_file: String::from("/etc/amazon/efs/privateKey.pem"), + expected_server_hostname_tls: String::from( + "fs-12341234.efs.us-east-1.amazonaws.com", + ), + }, + }; + + assert_eq!(result_config, expected_proxy_config); + } +} diff --git a/src/proxy/src/connections.rs b/src/proxy/src/connections.rs new file mode 100644 index 00000000..41b25a4f --- /dev/null +++ b/src/proxy/src/connections.rs @@ -0,0 +1,710 @@ +use crate::efs_prot::{BindClientResponse, BindResponse, ScaleUpConfig}; +use crate::efs_rpc::{self, PartitionId}; +use crate::error::{ConnectError, RpcError}; +use crate::proxy_identifier::ProxyIdentifier; +use crate::{ + controller::Event, shutdown::ShutdownHandle, tls::establish_tls_stream, tls::TlsConfig, +}; +use async_trait::async_trait; +use futures::future; +use log::{debug, info, warn}; +use s2n_tls_tokio::TlsStream; +use std::sync::Arc; +use std::{collections::HashMap, time::Duration}; +use tokio::task::JoinHandle; +use tokio::time::timeout; +use tokio::{ + io::AsyncWriteExt, + io::{AsyncRead, AsyncWrite}, + net::TcpStream, + sync::mpsc, +}; + +const CONCURRENT_ATTEMPT_COUNT: u32 = 3; + +pub const MAX_ATTEMPT_COUNT: u32 = 120; +const SINGLE_CONNECTION_TIMEOUT_SEC: u64 = 15; +pub const MULTIPLEX_CONNECTION_TIMEOUT_SEC: u64 = 15; + +pub trait ProxyStream: AsyncRead + AsyncWrite + Unpin + Send + 'static {} +impl ProxyStream for T {} + +#[async_trait] +pub trait PartitionFinder { + async fn establish_connection( + &self, + proxy_id: ProxyIdentifier, + ) -> Result<(S, Option, Option), ConnectError>; + + async fn spawn_establish_connection_task( + &self, + proxy_id: ProxyIdentifier, + ) -> JoinHandle), ConnectError>>; + + // Establish multiple connections to an EFS "Partition" to enable higher IO throughput. A + // `target` partition should be provided if the proxy owns an existing connection to EFS. When + // provided, the search will prefer to find a connection that maps to this `target` partition. + // This `target` does not represent a hard requirement, as connections mapping to a different + // partition can still be returned. + // + async fn inner_establish_multiplex_connection( + &self, + proxy_id: ProxyIdentifier, + target: Option, + shutdown_handle: ShutdownHandle, + ) -> Result<(PartitionId, Vec, ScaleUpConfig), (ConnectError, Option)> { + let mut connect_futures = Vec::with_capacity(CONCURRENT_ATTEMPT_COUNT as usize); + for _ in 0..CONCURRENT_ATTEMPT_COUNT { + connect_futures.push(self.spawn_establish_connection_task(proxy_id).await); + } + + let mut connected_partitions: HashMap> = HashMap::new(); + + let mut failure_count = 0; + let mut attempt_count = CONCURRENT_ATTEMPT_COUNT; + + let overall_timeout = + tokio::time::sleep(Duration::from_secs(MULTIPLEX_CONNECTION_TIMEOUT_SEC)); + tokio::pin!(overall_timeout); + + loop { + tokio::select! { + (join_result, index, _) = future::select_all(connect_futures.iter_mut()) => { + let Ok(connection_result) = join_result else { + warn!("JoinError encountered during connection search."); + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::MultiplexFailure, None)); + }; + + let (stream, bind_result) = match connection_result { + Ok(r) => r, + Err(ConnectError::IoError(e)) => { + debug!("Retryable ConnectError encountered during connection search. Error: {:?}", e); + failure_count += 1; + self.retry_multiplex_connection_attempt(proxy_id, &mut attempt_count, index, &mut connect_futures).await?; + continue; + }, + Err(e) => { + warn!("Non-retryable ConnectError encountered during connection search. Error: {}", e); + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::MultiplexFailure, None)) + } + }; + + let response = match bind_result { + Ok(r) => r, + Err(RpcError::IoError(e)) => { + debug!("Retryable RpcError encountered during connection search. Error: {:?}", e); + failure_count += 1; + self.retry_multiplex_connection_attempt(proxy_id, &mut attempt_count, index, &mut connect_futures).await?; + continue; + }, + Err(e) => { + warn!("Non-retryable RpcError encountered during connection search. Error: {}", e); + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::MultiplexFailure, None)) + } + }; + + let bind_response = response.bind_response; + let new_scale_up_config = response.scale_up_config; + debug!("Received {}", get_bind_response_string(&bind_response)); + match bind_response { + BindResponse::READY(id) => { + let partition_id = PartitionId { id: id.0 }; + + if Some(partition_id) == target { + debug!("Connection to target partition found. Attempt Count: {}, Failure Count: {}", attempt_count, failure_count); + } else { + debug!("Connection to non-target partition found. Attempt Count: {}, Failure Count: {}", attempt_count, failure_count); + } + + if let Some(mut streams) = connected_partitions.remove(&partition_id) { + streams.push(stream); + + let target_connection_count = if Some(partition_id) == target { + (new_scale_up_config.max_multiplexed_connections - 1) as usize + } else { + new_scale_up_config.max_multiplexed_connections as usize + }; + + if streams.len() >= target_connection_count { + tokio::spawn(shutdown_connections(connected_partitions)); + return Ok((partition_id, streams, new_scale_up_config)); + } else { + connected_partitions.insert(partition_id, streams); + } + } else { + connected_partitions.insert(partition_id, vec!(stream)); + } + }, + BindResponse::RETRY(_) | BindResponse::PREFERRED(_) => (), + BindResponse::RETRY_LATER(_) | BindResponse::ERROR(_) | BindResponse::default => { + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::MultiplexFailure, Some(new_scale_up_config))) + }, + }; + + debug!("Continuing partition search. Attempt Count: {}, Failure Count: {}, Partitions Found: {}", attempt_count, failure_count, connected_partitions.len()); + self.retry_multiplex_connection_attempt(proxy_id, &mut attempt_count, index, &mut connect_futures).await?; + }, + _ = &mut overall_timeout => { + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::Timeout, None)); + }, + _ = shutdown_handle.cancellation_token.cancelled() => { + tokio::spawn(shutdown_connections(connected_partitions)); + return Err((ConnectError::Cancelled, None)); + } + } + } + } + + async fn retry_multiplex_connection_attempt( + &self, + proxy_id: ProxyIdentifier, + attempt_count: &mut u32, + last_failed_index: usize, + connect_futures: &mut Vec< + JoinHandle), ConnectError>>, + >, + ) -> Result<(), (ConnectError, Option)> { + if *attempt_count > MAX_ATTEMPT_COUNT { + return Err((ConnectError::MaxAttemptsExceeded, None)); + } else { + connect_futures.swap_remove(last_failed_index); + connect_futures.push(self.spawn_establish_connection_task(proxy_id).await); + *attempt_count += 1; + Ok(()) + } + } + + // Increase the number of connections to the EFS Service. + async fn scale_up_connection( + &self, + proxy_id: ProxyIdentifier, + partition_id: Option, + notification_queue: mpsc::Sender>, + shutdown_handle: ShutdownHandle, + ) { + let result = match self + .inner_establish_multiplex_connection(proxy_id, partition_id, shutdown_handle) + .await + { + Ok((id, proxy_streams, scale_up_config)) => { + notification_queue + .send(Event::ConnectionSuccess( + Some(id), + proxy_streams, + scale_up_config, + )) + .await + } + Err(e) => { + info!("Attempt to scale up failed: {}", e.0); + notification_queue.send(Event::ConnectionFail(e.1)).await + } + }; + result.unwrap_or_else(|_| warn!("Unable to notify event queue of established connections")); + } +} + +pub fn configure_stream(tcp_stream: TcpStream) -> TcpStream { + match tcp_stream.set_nodelay(true) { + Ok(_) => {} + Err(e) => warn!("Error setting TCP_NODELAY: {}", e), + } + tcp_stream +} + +// Allow for graceful closure of Tls connections +async fn shutdown_connections(connections: HashMap>) { + for streams in connections.into_values() { + for mut stream in streams.into_iter() { + tokio::spawn(async move { + if let Err(e) = stream.shutdown().await { + debug!("Failed to gracefully shutdown connection: {}", e); + } + }); + } + } +} + +// BindResponse in generated by xdrgen and does not implement the Debug or Display traits +pub fn get_bind_response_string(bind_response: &BindResponse) -> String { + match bind_response { + BindResponse::PREFERRED(_partition_id) => String::from("BindResponse::PREFERRED"), + BindResponse::READY(_partition_id) => String::from("BindResponse::READY"), + BindResponse::RETRY(m) => { + if m.is_empty() { + String::from("BindResponse::RETRY") + } else { + format!("BindResponse::RETRY. message: {m}") + } + } + BindResponse::RETRY_LATER(m) => { + if m.is_empty() { + String::from("BindResponse::RETRY_LATER") + } else { + format!("BindResponse::RETRY_LATER. message: {m}") + } + } + BindResponse::ERROR(m) => { + if m.is_empty() { + String::from("BindResponse::ERROR") + } else { + format!("BindResponse::ERROR. message: {m}") + } + } + BindResponse::default => String::from("BindResponse::default"), + } +} + +#[derive(Clone)] +pub struct PlainTextPartitionFinder { + pub mount_target_addr: String, +} + +impl PlainTextPartitionFinder { + async fn establish_plain_text_connection( + mount_target_addr: String, + proxy_id: ProxyIdentifier, + ) -> Result<(TcpStream, Result), ConnectError> { + timeout(Duration::from_secs(SINGLE_CONNECTION_TIMEOUT_SEC), async { + let mut tcp_stream = TcpStream::connect(mount_target_addr).await?; + let response = efs_rpc::bind_client_to_partition(proxy_id, &mut tcp_stream).await; + Ok((configure_stream(tcp_stream), response)) + }) + .await + .map_err(|_| ConnectError::Timeout)? + } +} + +#[async_trait] +impl PartitionFinder for PlainTextPartitionFinder { + async fn establish_connection( + &self, + proxy_id: ProxyIdentifier, + ) -> Result<(TcpStream, Option, Option), ConnectError> { + let (s, bind_result) = + Self::establish_plain_text_connection(self.mount_target_addr.clone(), proxy_id).await?; + match bind_result { + Ok(response) => { + debug!( + "EFS RPC call succeeded while establishing initial connection. Response: {}", + get_bind_response_string(&response.bind_response) + ); + let partition_id = match &response.bind_response { + BindResponse::READY(id) => Some(PartitionId { id: id.0 }), + _ => None, + }; + Ok((s, partition_id, Some(response.scale_up_config))) + } + Err(e) => { + warn!("EFS RPC call errored while establishing initial connection. Error {e}",); + let tcp_stream = TcpStream::connect(self.mount_target_addr.clone()).await?; + return Ok((configure_stream(tcp_stream), None, None)); + } + } + } + + async fn spawn_establish_connection_task( + &self, + proxy_id: ProxyIdentifier, + ) -> JoinHandle), ConnectError>> { + let addr = self.mount_target_addr.clone(); + tokio::spawn(Self::establish_plain_text_connection(addr, proxy_id)) + } +} + +pub struct TlsPartitionFinder { + tls_config: Arc>, +} + +impl TlsPartitionFinder { + pub fn new(tls_config: Arc>) -> Self { + TlsPartitionFinder { tls_config } + } + + async fn establish_tls_connection( + tls_config: TlsConfig, + proxy_id: ProxyIdentifier, + ) -> Result<(TlsStream, Result), ConnectError> { + timeout(Duration::from_secs(SINGLE_CONNECTION_TIMEOUT_SEC), async { + let mut tls_stream = establish_tls_stream(tls_config).await?; + let response = efs_rpc::bind_client_to_partition(proxy_id, &mut tls_stream).await; + Ok((tls_stream, response)) + }) + .await + .map_err(|_| ConnectError::Timeout)? + } +} + +#[async_trait] +impl PartitionFinder> for TlsPartitionFinder { + async fn establish_connection( + &self, + proxy_id: ProxyIdentifier, + ) -> Result< + ( + TlsStream, + Option, + Option, + ), + ConnectError, + > { + let tls_config_copy = self.tls_config.lock().await.clone(); + let (s, bind_result) = Self::establish_tls_connection(tls_config_copy, proxy_id).await?; + let (bind_response, scale_up_config) = match bind_result { + Ok(response) => { + warn!( + "EFS RPC call succeeded while establishing initial connection. Response: {}", + get_bind_response_string(&response.bind_response) + ); + (response.bind_response, Some(response.scale_up_config)) + } + Err(e) => { + warn!("EFS RPC call errored while establishing initial connection. Error {e}",); + let tls_stream = establish_tls_stream(self.tls_config.lock().await.clone()).await?; + return Ok((tls_stream, None, None)); + } + }; + + match bind_response { + BindResponse::READY(id) => Ok((s, Some(PartitionId { id: id.0 }), scale_up_config)), + _ => Ok((s, None, scale_up_config)), + } + } + + async fn spawn_establish_connection_task( + &self, + proxy_id: ProxyIdentifier, + ) -> JoinHandle< + Result<(TlsStream, Result), ConnectError>, + > { + let tls_config_copy = self.tls_config.lock().await.clone(); + tokio::spawn(Self::establish_tls_connection(tls_config_copy, proxy_id)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config_parser::tests::get_test_config; + use crate::connections::PartitionFinder; + use crate::controller::tests::{find_available_port, ServiceAction, TestService}; + use crate::controller::DEFAULT_SCALE_UP_CONFIG; + use crate::ProxyConfig; + use nix::sys::signal::kill; + use nix::sys::signal::Signal; + use std::path::Path; + use std::str::FromStr; + use tokio::signal; + use tokio::sync::Mutex; + use tokio_util::sync::CancellationToken; + use uuid::Uuid; + + const PROXY_ID: ProxyIdentifier = ProxyIdentifier { + uuid: Uuid::from_u128(1 as u128), + incarnation: 0, + }; + + struct MultiplexTest { + service: TestService, + partition_finder: TlsPartitionFinder, + initial_partition_id: PartitionId, + } + + impl MultiplexTest { + async fn new() -> Self { + let service = TestService::new(true).await; + MultiplexTest::new_with_service(service).await + } + + async fn new_with_service(service: TestService) -> Self { + let mut tls_config = TlsConfig::new_from_config(&get_test_config()) + .await + .expect("Failed to acquire TlsConfig."); + tls_config.remote_addr = format!("127.0.0.1:{}", service.listen_port); + + let partition_finder = TlsPartitionFinder::new(Arc::new(Mutex::new(tls_config))); + + let (_s, id, _) = partition_finder + .establish_connection(PROXY_ID.clone()) + .await + .expect("Failed to connect to server"); + + let Some(initial_partition_id) = id else { + panic!("Partition Id not found for initial connection.") + }; + + MultiplexTest { + service, + partition_finder: partition_finder, + initial_partition_id, + } + } + } + + #[tokio::test] + async fn test_establish_multiplex_same_partition_found() { + let test = MultiplexTest::new().await; + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + let (new_connnection_id, connections, _) = test + .partition_finder + .inner_establish_multiplex_connection( + PROXY_ID.clone(), + Some(test.initial_partition_id.clone()), + shutdown_handle, + ) + .await + .expect("Could not establish a multiplex connection"); + + assert_eq!(test.initial_partition_id, new_connnection_id); + assert_eq!( + DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections - 1, + connections.len() as i32 + ); + + test.service.shutdown().await; + } + + #[tokio::test] + async fn test_establish_multiplex_new_partition_found() { + let test = MultiplexTest::new().await; + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + test.service + .post_action(ServiceAction::StopPartitionAcceptor( + test.initial_partition_id.clone(), + )) + .await; + + let (new_connnection_id, connections, _) = test + .partition_finder + .inner_establish_multiplex_connection( + PROXY_ID.clone(), + Some(test.initial_partition_id.clone()), + shutdown_handle, + ) + .await + .expect("Could not establish a multiplex connection"); + + assert_eq!( + DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections, + connections.len() as i32 + ); + assert_ne!(test.initial_partition_id, new_connnection_id); + + test.service.shutdown().await; + } + + #[tokio::test] + async fn test_establish_multiplex_no_target() { + let test = MultiplexTest::new().await; + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + let (new_connnection_id, connections, _) = test + .partition_finder + .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle) + .await + .expect("Could not establish a multiplex connection"); + + assert_eq!( + DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections, + connections.len() as i32 + ); + assert_ne!(test.initial_partition_id, new_connnection_id); + + test.service.shutdown().await; + } + + #[tokio::test] + async fn test_establish_connection_timeout() { + let (_listener, port) = find_available_port().await; + + let error = tokio::spawn(async move { + let partition_finder = PlainTextPartitionFinder { + mount_target_addr: format!("127.0.0.1:{}", port.clone()), + }; + partition_finder + .establish_connection(PROXY_ID.clone()) + .await + }) + .await + .expect("join err"); + + assert!(matches!(error, Err(ConnectError::Timeout))); + } + + #[tokio::test] + async fn test_establish_multiplex_timeout() { + let (_listener, port) = find_available_port().await; + + let error = tokio::spawn(async move { + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + let partition_finder = PlainTextPartitionFinder { + mount_target_addr: format!("127.0.0.1:{}", port.clone()), + }; + partition_finder + .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle) + .await + }) + .await + .expect("join err"); + + assert!(matches!(error, Err((ConnectError::Timeout, None)))); + } + + #[tokio::test] + async fn test_establish_multiplex_shutdown() { + let (_listener, port) = find_available_port().await; + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + let shutdown_handle_clone = shutdown_handle.clone(); + let task = tokio::spawn(async move { + let partition_finder = PlainTextPartitionFinder { + mount_target_addr: format!("127.0.0.1:{}", port.clone()), + }; + partition_finder + .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle_clone) + .await + }); + + shutdown_handle.exit(None).await; + let error = task.await.expect("Unexpected join error"); + + assert!(matches!(error, Err((ConnectError::Cancelled, None)))); + } + + #[tokio::test] + async fn test_scale_up_max_attempts() { + // Create a service in which the all calls of bind_client_to_partition will return a + // different value. Our "TestService" returns these PartitionIds in a round robin fashion, + // and this service will have more PartitionId than MAX_ATTEMPT_COUNT + let service = + TestService::new_with_partition_count((MAX_ATTEMPT_COUNT + 2) as usize, true).await; + + let test = MultiplexTest::new_with_service(service).await; + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + + let error = test + .partition_finder + .inner_establish_multiplex_connection( + PROXY_ID.clone(), + Some(test.initial_partition_id.clone()), + shutdown_handle.clone(), + ) + .await; + + assert!(matches!( + error, + Err((ConnectError::MaxAttemptsExceeded, None)) + )); + } + + enum BrokenPartitionFinderType { + _ConnectIoError, + _RpcIoError, + RpcNonIoError, + } + + struct BrokenPartitionFinder { + finder_type: BrokenPartitionFinderType, + } + + impl BrokenPartitionFinder { + fn new(finder_type: BrokenPartitionFinderType) -> Self { + Self { finder_type } + } + } + + #[async_trait] + impl PartitionFinder for BrokenPartitionFinder { + async fn establish_connection( + &self, + _proxy_id: ProxyIdentifier, + ) -> Result<(TcpStream, Option, Option), ConnectError> { + unimplemented!() + } + + async fn spawn_establish_connection_task( + &self, + _proxy_id: ProxyIdentifier, + ) -> JoinHandle), ConnectError>> + { + let (_listener, port) = find_available_port().await; + let tcp_stream = TcpStream::connect(("127.0.0.1", port)) + .await + .expect("Could not establish TCP stream."); + let error = match self.finder_type { + BrokenPartitionFinderType::_ConnectIoError => Err(ConnectError::IoError( + tokio::io::ErrorKind::BrokenPipe.into(), + )), + BrokenPartitionFinderType::_RpcIoError => Ok(( + tcp_stream, + Err(RpcError::IoError(tokio::io::ErrorKind::BrokenPipe.into())), + )), + BrokenPartitionFinderType::RpcNonIoError => { + Ok((tcp_stream, Err(RpcError::GarbageArgs))) + } + }; + tokio::spawn(async { error }) + } + } + + #[tokio::test] + async fn test_scale_up_rpc_error() { + let partition_finder = BrokenPartitionFinder::new(BrokenPartitionFinderType::RpcNonIoError); + + let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); + let error = partition_finder + .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle.clone()) + .await; + + assert!(matches!(error, Err((ConnectError::MultiplexFailure, None)))); + } + + #[tokio::test] + async fn test_reload_certificate() { + let (tx, rx) = tokio::sync::oneshot::channel(); + let mut sigs_hangup_listener = + signal::unix::signal(signal::unix::SignalKind::hangup()).unwrap(); + let config_file_path = Path::new("tests/certs/test_config.ini"); + let config_contents = std::fs::read_to_string(&config_file_path).unwrap(); + let proxy_config = ProxyConfig::from_str(&config_contents).unwrap(); + let mut tls_config = TlsConfig::new_from_config(&proxy_config).await.unwrap(); + tls_config.client_cert = vec![1, 2]; + let old_cert = tls_config.client_cert.clone(); + let tls_config_ptr = Arc::new(Mutex::new(tls_config)); + let cloned_tls_config_ptr = Arc::clone(&tls_config_ptr); + tokio::spawn(async move { + loop { + // Check if the SIGHUP signal is received + if (sigs_hangup_listener.recv().await).is_some() { + //Reloading the TLS configuration + let mut locked_config = cloned_tls_config_ptr.lock().await; + *locked_config = crate::get_tls_config(&proxy_config).await.unwrap(); + tx.send(()).unwrap(); + break; + } + } + }); + let tls_partition_finder = TlsPartitionFinder { + tls_config: tls_config_ptr.clone(), + }; + let _ = kill(nix::unistd::Pid::this(), Signal::SIGHUP); + rx.await.unwrap(); + assert_ne!( + old_cert, + tls_partition_finder.tls_config.lock().await.client_cert + ); + } +} diff --git a/src/proxy/src/controller.rs b/src/proxy/src/controller.rs new file mode 100644 index 00000000..46406e5e --- /dev/null +++ b/src/proxy/src/controller.rs @@ -0,0 +1,1614 @@ +use crate::connections::configure_stream; +use crate::efs_prot::ScaleUpConfig; +use crate::efs_rpc::PartitionId; +use crate::shutdown::ShutdownReason; +use crate::status_reporter::{self, StatusReporter}; +use crate::{ + connections::{PartitionFinder, ProxyStream}, + proxy::{PerformanceStats, Proxy}, + proxy_identifier::ProxyIdentifier, + shutdown::ShutdownHandle, +}; +use log::{debug, error, info, warn}; +use std::{sync::Arc, time::Duration}; +use tokio::{net::TcpListener, sync::mpsc, time::Instant}; +use tokio_util::sync::CancellationToken; + +pub const DEFAULT_SCALE_UP_BACKOFF: Duration = Duration::from_secs(300); + +pub const DEFAULT_SCALE_UP_CONFIG: ScaleUpConfig = ScaleUpConfig { + max_multiplexed_connections: 5, + scale_up_bytes_per_sec_threshold: 300 * 1024 * 1024, + scale_up_threshold_breached_duration_sec: 1, +}; + +#[derive(Debug)] +pub enum Event { + ProxyUpdate(PerformanceStats), + ConnectionSuccess(Option, Vec, ScaleUpConfig), + ConnectionFail(Option), +} + +enum EventResult { + Restart((Option, Vec, Option)), + Ok, +} + +#[derive(Clone, Debug, PartialEq)] +pub enum ConnectionSearchState { + SearchingAdditional(Option), + Stop(Instant), + Idle, +} + +struct IncarnationState { + pub proxy_id: ProxyIdentifier, + pub last_proxy_update: Option<(Instant, PerformanceStats)>, + pub partition_id: Option, + connection_state: ConnectionSearchState, + pub num_connections: u16, + events_tx: mpsc::Sender>, +} + +impl IncarnationState { + fn new( + proxy_id: ProxyIdentifier, + partition_id: Option, + events_tx: mpsc::Sender>, + num_connections: u16, + ) -> Self { + Self { + proxy_id, + last_proxy_update: None, + partition_id, + connection_state: ConnectionSearchState::Idle, + num_connections, + events_tx, + } + } +} + +pub struct Controller { + listener: TcpListener, + partition_finder: Arc + Sync + Send>, + proxy_id: ProxyIdentifier, + scale_up_attempt_count: u64, + restart_count: u64, + scale_up_config: ScaleUpConfig, + status_reporter: StatusReporter, +} + +impl Controller { + pub async fn new( + listen_addr: &str, + partition_finder: Arc + Sync + Send + 'static>, + status_reporter: StatusReporter, + ) -> Self { + let Ok(listener) = TcpListener::bind(listen_addr).await else { + panic!("Failed to bind {}", listen_addr); + }; + + Self { + listener, + partition_finder, + proxy_id: ProxyIdentifier::new(), + scale_up_attempt_count: 0, + restart_count: 0, + scale_up_config: DEFAULT_SCALE_UP_CONFIG, + status_reporter, + } + } + + pub async fn run(mut self, token: CancellationToken) -> Option { + let mut ready_connections = None; + loop { + info!("Starting new incarnation of proxy"); + let nfs_client = match self.listener.accept().await { + Ok((client, socket_addr)) => { + self.proxy_id.increment(); + info!( + "Accepted new connection {:?}, {:?} ", + socket_addr, self.proxy_id + ); + configure_stream(client) + } + Err(e) => { + error!("Failed to establish connection to NFS client. {e}"); + continue; + } + }; + + let peek_result = nfs_client.peek(&mut [0; 1]).await; + if let Ok(0) = peek_result { + // efs-utils performs a test in which it checks if a connection to the proxy port + // can be established. This connection is never used and is immediately closed. + // When this behavior is detected, this loops should be restarted so that another + // connection to the port can be established + debug!("Connection to nfs client was closed before any data was sent to the proxy. This is expected. Restarting controller"); + continue; + } else if let Err(e) = peek_result { + error!("Failed to check if data was sent by the NFS client. {}", e); + return Some(ShutdownReason::UnexpectedError); + } + + let (events_tx, mut events_rx) = mpsc::channel(1024); + let (shutdown, mut waiter) = ShutdownHandle::new(token.child_token()); + + let (partition_id, partition_servers, scale_up_config) = match ready_connections { + Some(connections) => { + ready_connections = None; + connections + } + None => { + match self + .partition_finder + .establish_connection(self.proxy_id) + .await + { + Ok((s, partition_id, scale_up_config)) => { + (partition_id, vec![s], scale_up_config) + } + Err(e) => { + warn!("Failed to establish an initial connection to EFS. Error: {e}",); + continue; + } + } + } + }; + + match partition_id { + Some(id) => debug!("Established initial connection with PartitionId: {id:?}"), + None => debug!("Established initial connection without a PartitionId"), + } + + self.scale_up_config = scale_up_config.unwrap_or(self.scale_up_config); + debug!("ScaleUpConfig: {:#?}", self.scale_up_config); + + let mut state = IncarnationState::new( + self.proxy_id, + partition_id, + events_tx.clone(), + partition_servers.len() as u16, + ); + + let mut proxy = Proxy::new(nfs_client, partition_servers, events_tx, shutdown.clone()); + + loop { + let mut err = Ok(()); + tokio::select! { + _ = self.status_reporter.await_report_request() => { + let report = status_reporter::Report { + proxy_id: state.proxy_id, + partition_id: state.partition_id, + connection_state: state.connection_state.clone(), + num_connections: state.num_connections as usize, + last_proxy_update: state.last_proxy_update, + scale_up_attempt_count: self.scale_up_attempt_count, + restart_count: self.restart_count + }; + self.status_reporter.publish_status(report).await; + } + event = events_rx.recv() => { + if let Some(next_event) = event { + match self.handle_event(next_event, &mut proxy, &mut state, shutdown.clone()).await { + Ok(EventResult::Restart(connections)) => { + debug!("Restarting proxy to use multiple connections"); + ready_connections = Some(connections); + shutdown.exit(Some(ShutdownReason::NeedsRestart)).await; + break; + }, + Ok(EventResult::Ok) => continue, + Err(e) => err = Err(e), + }; + + } else { + err = Err("All senders have closed"); + } + } + _ = shutdown.cancellation_token.cancelled() => { + debug!("Controller exiting due to child exit"); + break; + } + _ = self.listener.accept() => { + warn!("Unexpected connection, ignoring") + } + } + if err.is_err() { + info!("Starting proxy restart due to {}", err.unwrap_err()); + break; + } + } + + if let Some(count) = self.restart_count.checked_add(1) { + self.restart_count = count; + } + + // Ensure that connection(s) to EFS is closed. If we can't successfully stop the proxy, + // then exit from this process and allow the watchdog to restart the efs-proxy program. + // + if let Err(e) = proxy.shutdown().await { + error!("Proxy shutdown failed. {}", e); + return Some(ShutdownReason::UnexpectedError); + }; + + let shutdown_reason = waiter.recv().await; + match shutdown_reason { + Some(ShutdownReason::NeedsRestart) => { + debug!("Proxy restarting with ShutdownReason::NeedsRestart") + } + Some(ShutdownReason::Unmount) => { + debug!("Proxy restarting with ShutdownReason::Unmount") + } + reason => return reason, + } + } + } + + fn should_scale_up(&self, state: &mut IncarnationState, stats: PerformanceStats) -> bool { + if let ConnectionSearchState::Stop(last_failure) = state.connection_state { + if Instant::now().duration_since(last_failure) > DEFAULT_SCALE_UP_BACKOFF { + state.connection_state = ConnectionSearchState::Idle; + } + } + + state.num_connections == 1 + && state.connection_state == ConnectionSearchState::Idle + && stats.get_total_throughput() + >= self.scale_up_config.scale_up_bytes_per_sec_threshold as u64 + } + + async fn handle_event( + &mut self, + event: Event, + proxy: &mut Proxy, + state: &mut IncarnationState, + shutdown_handle: ShutdownHandle, + ) -> Result, &str> { + match event { + Event::ProxyUpdate(stats) => { + info!("Proxy performance: {:?}", stats); + + if self.should_scale_up(state, stats) { + info!("Searching for a new connection"); + if let Some(count) = self.scale_up_attempt_count.checked_add(1) { + self.scale_up_attempt_count = count; + } + + state.connection_state = + ConnectionSearchState::SearchingAdditional(state.partition_id); + self.partition_finder + .scale_up_connection( + state.proxy_id, + state.partition_id, + state.events_tx.clone(), + shutdown_handle, + ) + .await; + } + } + Event::ConnectionSuccess(id, streams, scale_up_config) => { + info!("Established new TCP connection to {:?}", id); + if state.partition_id == id { + assert_eq!( + (self.scale_up_config.max_multiplexed_connections - 1) as usize, + streams.len() + ); + for stream in streams { + proxy.add_connection(stream).await; + } + } else { + assert_eq!( + self.scale_up_config.max_multiplexed_connections as usize, + streams.len() + ); + assert!(id.is_some()); + assert_ne!(state.partition_id, id); + + return Ok(EventResult::Restart((id, streams, Some(scale_up_config)))); + } + state.num_connections = self.scale_up_config.max_multiplexed_connections as u16; + state.connection_state = ConnectionSearchState::Idle; + self.scale_up_config = scale_up_config; + } + Event::ConnectionFail(scale_up_config) => { + state.connection_state = ConnectionSearchState::Stop(Instant::now()); + self.scale_up_config = scale_up_config.unwrap_or(self.scale_up_config); + info!("Connection failed"); + } + } + debug!("ScaleUpConfig: {:#?}", self.scale_up_config); + Ok(EventResult::Ok) + } +} + +#[cfg(test)] +pub mod tests { + use crate::config_parser::tests::get_test_config; + use crate::connections::PlainTextPartitionFinder; + use crate::connections::ProxyStream; + use crate::connections::MULTIPLEX_CONNECTION_TIMEOUT_SEC; + use crate::controller::ConnectionSearchState; + use crate::controller::DEFAULT_SCALE_UP_BACKOFF; + use crate::efs_prot; + use crate::efs_prot::BindResponse; + use crate::efs_prot::ScaleUpConfig; + use crate::efs_rpc; + use crate::efs_rpc::PartitionId; + use crate::proxy; + use crate::proxy_identifier::ProxyIdentifier; + use crate::proxy_identifier::INITIAL_INCARNATION; + use crate::rpc; + use crate::rpc::RPC_HEADER_SIZE; + use crate::shutdown::ShutdownReason; + use crate::status_reporter; + use crate::status_reporter::Report; + use crate::status_reporter::StatusRequester; + use crate::tls::tests::get_server_config; + use crate::tls::TlsConfig; + use crate::{connections::TlsPartitionFinder, controller::Controller}; + + use bytes::BytesMut; + use log::debug; + use onc_rpc::RpcMessage; + use rand::Rng; + use std::collections::HashMap; + use std::collections::HashSet; + use std::io::ErrorKind; + use std::sync::atomic::AtomicU32; + use std::time::Duration; + use std::{self, io::Error, sync::Arc}; + use test_case::test_case; + use tokio::time::error::Elapsed; + use tokio::time::timeout; + use tokio::{ + io::AsyncWriteExt, + net::{TcpListener, TcpStream}, + sync::oneshot, + sync::Mutex, + task::JoinHandle, + }; + use tokio_util::sync::CancellationToken; + + use super::DEFAULT_SCALE_UP_CONFIG; + + #[derive(Copy, Clone, Debug, PartialEq)] + pub enum ServiceAction { + // Server will reject the next incoming TCP connection. Further attempts will succeed. + // + RejectNextNewConnectionRequest, + + // The server will close the next connection that receives a request from the proxy. + // + CloseOnNextRequest, + + // The server will close a random connection without waiting for any incoming request. + // + CloseRandomConnection, + + // This service will restart accepting connections to the given PartitionId + // + _RestartPartitionAcceptor(PartitionId), + + // This service will not accept connections to the given PartitionId + // + StopPartitionAcceptor(PartitionId), + + // This service will close the connection if a bind_client_to_partition request is received + // + CloseOnNextBindClientToPartitionRequest, + + // The service will send BindResponse::RETRY_LATER on subsequent bind_client_to_partition requests + // + DisableScaleUp, + + // The service will allow re-enabling scale up after the DisableScaleUp action is posted. + // + EnableScaleUp, + + // The service will respond with BindResponse::RETRY on the next n bind_client_to_partition requests + SendRetries(u32), + } + + const PARTITION_COUNT: usize = 3; + + pub struct TestService { + pub listen_port: u16, + posted_action: Arc>>, + shutdown_tx: oneshot::Sender<()>, + join_handle: JoinHandle<()>, + pub partition_ids: Vec, + pub stopped_partitions: Arc>>, + pub request_counter: Arc>>>>, + } + + impl TestService { + const ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC: i32 = 0; + const NEVER_SCALE_UP_THRESHOLD_BYTES_PER_SEC: i32 = i32::MAX; + + pub async fn new(tls: bool) -> Self { + TestService::new_with_partition_count(PARTITION_COUNT, tls).await + } + + pub async fn new_with_partition_count(count: usize, tls: bool) -> Self { + TestService::new_with_partition_count_and_scale_up_config( + count, + super::DEFAULT_SCALE_UP_CONFIG, + tls, + ) + .await + } + + pub async fn new_with_throughput_scale_up_threshold(threshold: i32, tls: bool) -> Self { + let mut config = super::DEFAULT_SCALE_UP_CONFIG.clone(); + config.scale_up_bytes_per_sec_threshold = threshold; + TestService::new_with_partition_count_and_scale_up_config(PARTITION_COUNT, config, tls) + .await + } + + pub async fn new_with_partition_count_and_scale_up_threshold( + count: usize, + threshold: i32, + tls: bool, + ) -> Self { + let mut config = super::DEFAULT_SCALE_UP_CONFIG.clone(); + config.scale_up_bytes_per_sec_threshold = threshold; + TestService::new_with_partition_count_and_scale_up_config(count, config, tls).await + } + + pub async fn new_with_partition_count_and_scale_up_config( + count: usize, + scale_up_config: ScaleUpConfig, + tls: bool, + ) -> Self { + let (tcp_listener, listen_port) = find_available_port().await; + + let partition_ids = (0..count) + .map(|_| PartitionId { + id: efs_rpc::tests::generate_partition_id().0, + }) + .collect::>(); + + let stopped_partitions = Arc::new(Mutex::new(HashSet::new())); + + let mut counter = HashMap::new(); + for id in partition_ids.iter() { + counter.insert(id.clone(), Vec::new()); + } + let request_counter = Arc::new(Mutex::new(counter)); + + let posted_action = Arc::new(Mutex::new(Option::None)); + let (shutdown_tx, shutdown_rx) = oneshot::channel(); + + let service_handle = TestService::run( + tcp_listener, + scale_up_config, + partition_ids.clone(), + stopped_partitions.clone(), + request_counter.clone(), + posted_action.clone(), + tls, + shutdown_rx, + ); + + TestService { + listen_port, + posted_action, + shutdown_tx, + join_handle: service_handle, + partition_ids, + stopped_partitions, + request_counter, + } + } + + pub async fn post_action(&self, new_action: ServiceAction) { + match new_action { + ServiceAction::_RestartPartitionAcceptor(id) => { + let mut stopped = self.stopped_partitions.lock().await; + assert!(stopped.remove(&id), "Partition is not stopped"); + return; + } + ServiceAction::StopPartitionAcceptor(id) => { + let mut stopped = self.stopped_partitions.lock().await; + stopped.insert(id); + return; + } + ServiceAction::EnableScaleUp => { + TestService::check_and_consume_action( + &self.posted_action, + ServiceAction::DisableScaleUp, + ) + .await; + return; + } + _ => (), + }; + + let mut consumable_action = self.posted_action.lock().await; + if consumable_action.is_some() { + panic!("Previous action was not consumed"); + } + *consumable_action = Some(new_action); + } + + fn run( + listener: TcpListener, + scale_up_config: ScaleUpConfig, + partition_ids: Vec, + stopped_partitions: Arc>>, + request_counter: Arc>>>>, + posted_action: Arc>>, + tls: bool, + mut shutdown_rx: oneshot::Receiver<()>, + ) -> JoinHandle<()> { + tokio::spawn(async move { + let mut partition_idx = 0; + loop { + tokio::select! { + socket = listener.accept() => { + let Ok((tcp_stream, _socket_addr)) = socket else { + panic!("Failed to establish connection to client"); + }; + + if tls { + let tls_acceptor = s2n_tls_tokio::TlsAcceptor::new(get_server_config().await.expect("Could not get config")); + let tls_stream = match tls_acceptor.accept(tcp_stream).await { + Ok(conn) => conn, + Err(e) => { + panic!("Failed to establish TLS connection: {}", e); + } + }; + Self::inner_run(tls_stream, scale_up_config, &mut partition_idx, &partition_ids, stopped_partitions.clone(), request_counter.clone(), posted_action.clone()).await; + } else { + Self::inner_run(tcp_stream, scale_up_config, &mut partition_idx, &partition_ids, stopped_partitions.clone(), request_counter.clone(), posted_action.clone()).await; + } + }, + _ = &mut shutdown_rx => { + break; + } + }; + } + }) + } + + async fn inner_run( + stream: S, + scale_up_config: ScaleUpConfig, + partition_idx: &mut usize, + partition_ids: &Vec, + stopped_partitions: Arc>>, + request_counter: Arc>>>>, + posted_action: Arc>>, + ) { + if TestService::check_and_consume_action( + &posted_action, + ServiceAction::RejectNextNewConnectionRequest, + ) + .await + || TestService::check_and_consume_action( + &posted_action, + ServiceAction::CloseRandomConnection, + ) + .await + { + debug!("RejectNextNewConnectionRequest processed"); + drop(stream); + } else { + let stopped = stopped_partitions.lock().await; + let mut next_id = None; + for i in 0..partition_ids.len() { + *partition_idx = (*partition_idx + i + 1) % partition_ids.len(); + if !stopped.contains(&partition_ids[*partition_idx]) { + next_id = Some(partition_ids[*partition_idx].clone()); + break; + } + } + let Some(id) = next_id else { + panic!("No available PartitionIds") + }; + + let request_count = Arc::new(AtomicU32::new(0)); + request_counter + .lock() + .await + .get_mut(&id) + .expect("Counter for partition not found") + .push(request_count.clone()); + + tokio::spawn(TestService::new_connection( + stream, + scale_up_config, + posted_action.clone(), + id, + request_count.clone(), + )); + } + } + + async fn check_and_consume_action( + posted_action: &Arc>>, + to_check: ServiceAction, + ) -> bool { + let mut action = posted_action.lock().await; + if *action == Some(to_check) { + *action = Option::None; + true + } else { + false + } + } + + async fn check_action( + posted_action: &Arc>>, + to_check: ServiceAction, + ) -> bool { + let action = posted_action.lock().await; + *action == Some(to_check) + } + + async fn new_connection( + mut stream: S, + scale_up_config: ScaleUpConfig, + posted_action: Arc>>, + partition_id: PartitionId, + request_count: Arc, + ) { + loop { + let Ok(message) = rpc::read_rpc_bytes(&mut stream).await else { + break; + }; + + request_count.fetch_add(1, std::sync::atomic::Ordering::AcqRel); + + if TestService::check_and_consume_action( + &posted_action, + ServiceAction::CloseOnNextRequest, + ) + .await + { + debug!("CloseOnNextRequest processed"); + break; + } + + let response = match TestService::parse_bind_client_to_partition_request(&message) { + Ok(rpc_message) => { + if TestService::check_and_consume_action( + &posted_action, + ServiceAction::CloseOnNextBindClientToPartitionRequest, + ) + .await + { + debug!("CloseOnNextBindClientToPartitionRequest processed"); + break; + } + + let mut bind_response = + BindResponse::READY(efs_prot::PartitionId(partition_id.id)); + + if TestService::check_action(&posted_action, ServiceAction::DisableScaleUp) + .await + { + bind_response = BindResponse::RETRY_LATER( + "Returning BindResponse::RETRY_LATER".into(), + ); + } + + let mut action = posted_action.lock().await; + if let Some(ServiceAction::SendRetries(count)) = *action { + bind_response = + BindResponse::RETRY("Returning BindResponse::RETRY".into()); + if count > 1 { + *action = Some(ServiceAction::SendRetries(count - 1)); + } else { + *action = None; + } + } + + efs_rpc::tests::create_bind_client_to_partition_response( + rpc_message.xid(), + bind_response, + scale_up_config, + ) + .expect("Could not create response") + } + Err(_) => { + // If the test server doesn't parse a `bind_client_to_partition` request, + // then echo request back to the client + message + } + }; + + stream + .write_all(&response) + .await + .expect("Could not write to stream"); + } + } + + fn parse_bind_client_to_partition_request( + request: &Vec, + ) -> Result, Box> { + let rpc_message = onc_rpc::RpcMessage::try_from(request.as_slice())?; + efs_rpc::tests::parse_bind_client_to_partition_request(&rpc_message)?; + Ok(rpc_message) + } + + pub async fn shutdown(self) { + drop(self.shutdown_tx); + self.join_handle.await.unwrap(); + } + } + + struct TestClient { + stream: TcpStream, + next_xid: u32, + } + + impl TestClient { + async fn new(proxy_port: u16) -> Self { + let stream = TcpStream::connect(("127.0.0.1", proxy_port)).await.unwrap(); + Self { + stream, + next_xid: 0, + } + } + + async fn send_message_with_size(&mut self, size: usize) -> Result<(), Error> { + self.next_xid += 1; + let (request, expected_data) = rpc::test::generate_msg_fragments(size, 1); + self.stream.write_all(&request).await?; + + let response = rpc::read_rpc_bytes(&mut self.stream).await?; + + let payload_result = + rpc::RpcBatch::parse_batch(&mut BytesMut::from(response.as_slice())) + .expect("No message found") + .expect("failed to parse"); + + let rpc = payload_result.rpcs.get(0).expect("No RPCs found"); + assert_eq!(expected_data, rpc.to_vec()[RPC_HEADER_SIZE..]); + Ok(()) + } + + async fn send_partial_message_with_size(&mut self, size: usize) -> Result<(), Error> { + self.next_xid += 1; + let (_, m1) = rpc::test::generate_msg_fragments(size, 1); + let mut rng = rand::thread_rng(); + self.stream + .write_all(&m1[0..rng.gen_range(1..size - 1)]) + .await?; + Ok(()) + } + } + + pub struct ProxyUnderTest { + listen_port: u16, + handle: JoinHandle>, + status_requester: StatusRequester, + scale_up_config: ScaleUpConfig, + } + + impl ProxyUnderTest { + pub async fn new(tls: bool, server_port: u16) -> Self { + let scale_up_config = DEFAULT_SCALE_UP_CONFIG; + let (tcp_listener, listen_port) = find_available_port().await; + + let (status_requester, status_reporter) = status_reporter::create_status_channel(); + + let handle = if tls { + let mut tls_config = TlsConfig::new_from_config(&get_test_config()) + .await + .expect("Failed to acquire TlsConfig."); + tls_config.remote_addr = format!("127.0.0.1:{}", server_port); + + let partition_finder = + Arc::new(TlsPartitionFinder::new(Arc::new(Mutex::new(tls_config)))); + + let controller = Controller { + listener: tcp_listener, + partition_finder, + proxy_id: ProxyIdentifier::new(), + scale_up_attempt_count: 0, + restart_count: 0, + scale_up_config: scale_up_config, + status_reporter, + }; + + let token = CancellationToken::new(); + tokio::spawn(controller.run(token)) + } else { + let partition_finder = Arc::new(PlainTextPartitionFinder { + mount_target_addr: format!("127.0.0.1:{}", server_port), + }); + + let controller = Controller { + listener: tcp_listener, + partition_finder, + proxy_id: ProxyIdentifier::new(), + scale_up_attempt_count: 0, + restart_count: 0, + scale_up_config: scale_up_config, + status_reporter, + }; + + let token = CancellationToken::new(); + tokio::spawn(controller.run(token)) + }; + + Self { + listen_port, + handle, + status_requester, + scale_up_config, + } + } + + pub async fn poll_scale_up(&mut self) -> Result<(), Elapsed> { + timeout(Duration::from_secs(5), async { + loop { + let num_connections = self.get_num_connections().await; + if num_connections == self.scale_up_config.max_multiplexed_connections as usize + { + break; + } else { + tokio::time::sleep(Duration::from_millis(500)).await; + } + } + }) + .await + } + + pub async fn get_report(&mut self) -> Report { + self.status_requester + ._request_status() + .await + .expect("Could not get report") + } + + pub async fn get_proxy_id(&mut self) -> ProxyIdentifier { + let report = self.get_report().await; + report.proxy_id + } + + async fn get_num_connections(&mut self) -> usize { + let report = self.get_report().await; + report.num_connections + } + } + + pub async fn find_available_port() -> (TcpListener, u16) { + for port in 10000..15000 { + match TcpListener::bind(("127.0.0.1", port)).await { + Ok(v) => { + return (v, port); + } + Err(_) => continue, + } + } + panic!("Failed to find port"); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_basic(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + client.send_message_with_size(1024).await.unwrap(); + + let report = proxy.get_report().await; + assert!(report.partition_id.is_some()); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_success_after_connection_closed_on_bind_client_to_partition_request( + tls_enabled: bool, + ) { + let service = TestService::new(tls_enabled).await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + + service + .post_action(ServiceAction::CloseOnNextBindClientToPartitionRequest) + .await; + + client.send_message_with_size(10).await.unwrap(); + client.send_message_with_size(1024).await.unwrap(); + + let report = proxy.get_report().await; + assert!(report.partition_id.is_none()); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_success_after_bind_client_to_partition_stop_response_on_initial_connection( + tls_enabled: bool, + ) { + let service = TestService::new(tls_enabled).await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + + service.post_action(ServiceAction::DisableScaleUp).await; + + client.send_message_with_size(10).await.unwrap(); + client.send_message_with_size(1024).await.unwrap(); + + let report = proxy.get_report().await; + assert!(report.partition_id.is_none()); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_closed_connection(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + service.post_action(ServiceAction::CloseOnNextRequest).await; + let result = client.send_message_with_size(10).await; + assert!(result.is_err()); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_closed_connection_after_scale_up(tls_enabled: bool) { + // Use a single partition so that the same PartitionId is return on each + // bind_client_to_partition request. This prevents a controller "reset", which simplifies + // testing that the proxy will retry scale up after the backoff time as elapsed. + // + let scale_up_threshold = 10; + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(100).await.unwrap(); + + // Expect that scale up does not occur + proxy.poll_scale_up().await.expect("Scale up did not occur"); + + // Close one proxy connection. The subsequent requests should fail. + service.post_action(ServiceAction::CloseOnNextRequest).await; + client.send_message_with_size(100).await.unwrap_err(); + + // Wait some time for proxy to reset + tokio::time::sleep(Duration::from_secs(5)).await; + + for _ in 0..5 { + client.send_message_with_size(100).await.unwrap_err(); + } + + // Reconnecting with the client should result in successful requests + let mut new_client = TestClient::new(proxy.listen_port).await; + new_client.send_message_with_size(5).await.unwrap(); + + let num_connections = proxy.get_report().await.num_connections; + assert_eq!(1, num_connections); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_closed_connection_when_big_frame_sent(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + let result = client.send_message_with_size(22222220).await; + assert!(result.is_err()); + let error = result.unwrap_err(); + assert!( + error.kind() == ErrorKind::BrokenPipe || error.kind() == ErrorKind::ConnectionReset + ); + let reason_opt = proxy.handle.await.unwrap(); + assert_eq!(reason_opt, Some(ShutdownReason::FrameSizeExceeded)); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_message_too_small(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut client = TestClient::new(proxy.listen_port).await; + let _ = client.send_message_with_size(1).await; + let reason_opt = proxy.handle.await.unwrap(); + assert_eq!(reason_opt, Some(ShutdownReason::FrameSizeTooSmall)); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_client_disconnects(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut initial_client = TestClient::new(proxy.listen_port).await; + let _ = initial_client.send_partial_message_with_size(1000).await; + // Drop has been implemented to simulate client disconnection + drop(initial_client); + + // After initial_client is disconnects, the proxy should still accept new connection + let mut client = TestClient::new(proxy.listen_port).await; + assert!(matches!( + client.send_partial_message_with_size(1000).await, + Ok(()) + )); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_client_disconnects_without_send(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // Drop this client to simulate a connection to the proxy port that immediately closes + let disconnecting_client = TestClient::new(proxy.listen_port).await; + drop(disconnecting_client); + + // After the connection to the disconnecting_client is dropped, the proxy should still accept new connection + let mut client = TestClient::new(proxy.listen_port).await; + assert!(matches!( + client.send_partial_message_with_size(1000).await, + Ok(()) + )); + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_handle_server_disconnect(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + assert!(client.send_message_with_size(10).await.is_ok()); + + // Incarnation is incremented when connection with NFS client is established + assert_eq!( + INITIAL_INCARNATION + 1, + proxy.get_proxy_id().await.incarnation + ); + + service.post_action(ServiceAction::CloseOnNextRequest).await; + + assert!(client.send_message_with_size(10).await.is_err()); + + // Reconnect + client = TestClient::new(proxy.listen_port).await; + assert!(client.send_message_with_size(10).await.is_ok()); + + // Incarnation is incremented when connection with NFS client is reestablished + assert_eq!( + INITIAL_INCARNATION + 2, + proxy.get_proxy_id().await.incarnation + ); + + proxy.handle.abort(); + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_same_partition(tls_enabled: bool) { + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, + tls_enabled, + ) + .await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // A request from the client will cause the proxy to establish an addition connection to the NFS server + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_periodic_workload(tls_enabled: bool) { + // Requests of 15 bytes every 100 milliseconds should result in 300 bytes of traffic (150 + // bytes sent, 150 bytes received) every second. This exceeds the scale_up_threshold of 299 + // bytes/s. + let scale_up_threshold = 299; + let num_requests = 60; + let request_size = 30; + let request_interval_millis = 100; + + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + for _ in 0..num_requests { + client.send_message_with_size(request_size).await.unwrap(); + tokio::time::sleep(Duration::from_millis(request_interval_millis)).await; + } + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_no_scale_up_periodic_workload(tls_enabled: bool) { + // Requests of 10 bytes every 100 milliseconds should result in 200 bytes of traffic (100 + // bytes sent, 100 bytes received) every seconds. This does not exceeds the + // scale_up_threshold of 300 bytes/s. + // + let scale_up_threshold = 300; + let num_requests = 60; + let request_size = 10; + let request_interval_millis = 100; + + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // Only requests proxied within the monitoring window will be considered when determining + // when to scale up. The following requests should not result in a scale up attempt. + // + let mut client = TestClient::new(proxy.listen_port).await; + for _ in 0..num_requests { + client.send_message_with_size(request_size).await.unwrap(); + tokio::time::sleep(Duration::from_millis(request_interval_millis)).await; + } + + proxy + .poll_scale_up() + .await + .expect_err("Unexpected Scale Up"); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_new_partition(tls_enabled: bool) { + let service = TestService::new_with_throughput_scale_up_threshold( + TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, + tls_enabled, + ) + .await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // A request from the client will cause the proxy to establish an addition connection to + // the NFS server + // + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + + let report = proxy.get_report().await; + let initial_partition_id = report.partition_id.expect("No PartitionId"); + + service + .post_action(ServiceAction::StopPartitionAcceptor(initial_partition_id)) + .await; + + // After scale up, we need to wait for the controller to reset and to listen to a new + // connection from the client + // + tokio::time::sleep(Duration::from_secs(5)).await; + + let mut new_client = TestClient::new(proxy.listen_port).await; + new_client.send_message_with_size(10).await.unwrap(); + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + let connection_state = proxy.get_report().await.connection_state; + assert_eq!(ConnectionSearchState::Idle, connection_state); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_successful_scale_up_with_retries(tls_enabled: bool) { + let scale_up_threshold = 10; + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // A request from the client will cause the proxy to establish an addition connection to the NFS server + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(5).await.unwrap(); + + service + .post_action(ServiceAction::SendRetries(std::cmp::min( + 5, + crate::connections::MAX_ATTEMPT_COUNT - 5, + ))) + .await; + + client.send_message_with_size(100).await.unwrap(); + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_no_scale_up_threshold_not_exceed(tls_enabled: bool) { + let service = TestService::new_with_throughput_scale_up_threshold( + TestService::NEVER_SCALE_UP_THRESHOLD_BYTES_PER_SEC, + tls_enabled, + ) + .await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // Requests from the client below the throughput threshold should not cause new connections + // to the NFS server to be established + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + + proxy + .poll_scale_up() + .await + .expect_err("Unexpected scale up occured"); + + let connection_state = proxy.get_report().await.connection_state; + assert_eq!(ConnectionSearchState::Idle, connection_state); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_no_scale_up_if_already_scaled_up(tls_enabled: bool) { + let scale_up_threshold = 10; + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 5, + scale_up_threshold, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + // Requests from the client below the throughput threshold should not cause scale up + let mut client = TestClient::new(proxy.listen_port).await; + client + .send_message_with_size((scale_up_threshold - 1) as usize) + .await + .unwrap(); + + // Stop initial partition so that the proxy resets after scale up + let initial_report = proxy.get_report().await; + let initial_partition_id = initial_report.partition_id.expect("No PartitionId"); + assert_eq!(0, initial_report.scale_up_attempt_count); + assert_eq!(0, initial_report.restart_count); + + service + .post_action(ServiceAction::StopPartitionAcceptor(initial_partition_id)) + .await; + + // This requests should cause scale up to be attempted + client + .send_message_with_size((scale_up_threshold + 10) as usize) + .await + .unwrap(); + + tokio::time::sleep(Duration::from_secs(5)).await; + let mut client = TestClient::new(proxy.listen_port).await; + client + .send_message_with_size((scale_up_threshold - 1) as usize) + .await + .unwrap(); + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + let second_report = proxy.get_report().await; + assert_eq!(ConnectionSearchState::Idle, second_report.connection_state); + assert_eq!( + DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections as usize, + second_report.num_connections + ); + assert_eq!(1, second_report.scale_up_attempt_count); + assert_eq!(1, second_report.restart_count); + + // Additional requests from the client should not cause additional scale up attempts + for _ in 0..5 { + client + .send_message_with_size((scale_up_threshold + 10) as usize) + .await + .unwrap(); + tokio::time::sleep(Duration::from_secs(1)).await; + } + + let third_report = proxy.get_report().await; + assert_eq!(ConnectionSearchState::Idle, third_report.connection_state); + assert_eq!( + DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections as usize, + third_report.num_connections + ); + assert_eq!(1, third_report.scale_up_attempt_count); + assert_eq!(1, third_report.restart_count); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_failed_too_many_retries(tls_enabled: bool) { + // Use a single partition so that the same PartitionId is return on each + // bind_client_to_partition request. This prevents a controller "reset", which simplifies + // testing that the proxy will retry scale up after the backoff time as elapsed. + // + let scale_up_threshold = 10; + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + + // Send an initial request in which the bind_client_to_partition request succeeds, and the + // main controller loop starts, but scale up is not requested + // + client + .send_message_with_size((scale_up_threshold - 1) as usize) + .await + .unwrap(); + + // Update the server to return BindResponse::RETRY until scale up attempt fails + service + .post_action(ServiceAction::SendRetries( + crate::connections::MAX_ATTEMPT_COUNT + 1, + )) + .await; + + // This request will cause the proxy to attempt scale up, in which bind_client_to_partition + // requests will fail + // + client.send_message_with_size(100).await.unwrap(); + + // Wait for scale up to fail + tokio::time::sleep(Duration::from_secs(5)).await; + + // Expect that scale up does not occur + proxy + .poll_scale_up() + .await + .expect_err("Unexpected scale up occured"); + + let report = proxy.get_report().await; + assert!(matches!( + report.connection_state, + ConnectionSearchState::Stop(_) + )); + + // Advance time and assert that scale up occurs after backoff duration elapsed + tokio::time::pause(); + tokio::time::advance( + DEFAULT_SCALE_UP_BACKOFF + Duration::from_secs(MULTIPLEX_CONNECTION_TIMEOUT_SEC), + ) + .await; + tokio::time::resume(); + + service.post_action(ServiceAction::EnableScaleUp).await; + client.send_message_with_size(100).await.unwrap(); + + proxy.poll_scale_up().await.expect("Scale up failed"); + + let connection_state = proxy.get_report().await.connection_state; + assert_eq!(ConnectionSearchState::Idle, connection_state); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_failed_retry_later(tls_enabled: bool) { + // Use a single partition so that the same PartitionId is return on each + // bind_client_to_partition request. This prevents a controller "reset", which simplifies + // testing that the proxy will retry scale up after the backoff time as elapsed. + // + let scale_up_threshold = 10; + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + scale_up_threshold, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + + // Send an initial request in which the bind_client_to_partition request succeeds, and the + // main controller loop starts, but scale up is not requested + // + client + .send_message_with_size((scale_up_threshold - 1) as usize) + .await + .unwrap(); + + // Update the server to return BindResponse::RETRY_LATER on the next bind_client_to_partition rpc + // request + // + service.post_action(ServiceAction::DisableScaleUp).await; + + // This request will cause the proxy to attempt scale up, in which bind_client_to_partition + // requests will fail + // + client + .send_message_with_size((scale_up_threshold) as usize) + .await + .unwrap(); + + // Expect that scale up does not occur + proxy + .poll_scale_up() + .await + .expect_err("Unexpected scale up occured"); + + let report = proxy.get_report().await; + assert!(matches!( + report.connection_state, + ConnectionSearchState::Stop(_) + )); + + // Advance time and assert that scale up occurs after backoff duration elapsed + tokio::time::pause(); + tokio::time::advance( + DEFAULT_SCALE_UP_BACKOFF + Duration::from_secs(MULTIPLEX_CONNECTION_TIMEOUT_SEC), + ) + .await; + tokio::time::resume(); + + service.post_action(ServiceAction::EnableScaleUp).await; + client + .send_message_with_size( + (scale_up_threshold * proxy::REPORT_INTERVAL_SECS as i32) as usize, + ) + .await + .unwrap(); + + proxy.poll_scale_up().await.expect("Scale up failed"); + + let connection_state = proxy.get_report().await.connection_state; + assert_eq!(ConnectionSearchState::Idle, connection_state); + + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[test_case(false; "tls disabled")] + #[tokio::test] + async fn test_scale_up_connection_usage(tls_enabled: bool) { + // Prevent controller reset after scale up by using existing partition + let service = TestService::new_with_partition_count_and_scale_up_threshold( + 1, + TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, + tls_enabled, + ) + .await; + + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + + proxy + .poll_scale_up() + .await + .expect("Timeout exceeded while awaiting scale up"); + + let request_to_send_per_connection = 10; + for _ in + 0..(request_to_send_per_connection * proxy.scale_up_config.max_multiplexed_connections) + { + client.send_message_with_size(10).await.unwrap(); + } + + // Check that requests are routed over multiple connections + let partition_id = proxy + .get_report() + .await + .partition_id + .expect("Missing PartitionId"); + + let request_counter = service.request_counter.lock().await; + let counts = request_counter + .get(&partition_id) + .expect("Missing request counts"); + + assert!(counts.len() >= proxy.scale_up_config.max_multiplexed_connections as usize); + for count in counts { + let operation_count = count.load(std::sync::atomic::Ordering::Acquire); + // Unused connections to a partition can be established during connection search. For + // this connections, the operation count will be 1 + // + assert!( + operation_count >= request_to_send_per_connection as u32 || operation_count == 1 + ); + } + + drop(request_counter); + service.shutdown().await; + } + + #[test_case(true; "tls enabled")] + #[tokio::test] + async fn test_efs_utils_port_test(tls_enabled: bool) { + let service = TestService::new(tls_enabled).await; + let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; + let mut port_health_check = TestClient::new(proxy.listen_port).await; + // Mimic efs-utils's port test which checks whether efs-proxy is alive. + let _ = port_health_check.stream.shutdown().await.unwrap(); + let mut client = TestClient::new(proxy.listen_port).await; + client.send_message_with_size(10).await.unwrap(); + client.send_message_with_size(1024).await.unwrap(); + + let report = proxy.get_report().await; + assert!(report.partition_id.is_some()); + + service.shutdown().await; + } +} diff --git a/src/proxy/src/efs_prot.x b/src/proxy/src/efs_prot.x new file mode 100644 index 00000000..d0faeb4f --- /dev/null +++ b/src/proxy/src/efs_prot.x @@ -0,0 +1,57 @@ +/* +* EFS program V1 +*/ + +const PROXY_ID_LENGTH = 16; +const PROXY_INCARNATION_LENGTH = 8; +const PARTITION_ID_LENGTH = 64; + +enum OperationType { + OP_BIND_CLIENT_TO_PARTITION = 1 +}; + +typedef opaque PartitionId[PARTITION_ID_LENGTH]; + +struct ProxyIdentifier { + opaque identifier; + opaque incarnation; +}; + +struct ScaleUpConfig { + int max_multiplexed_connections; + int scale_up_bytes_per_sec_threshold; + int scale_up_threshold_breached_duration_sec; +}; + +enum BindResponseType { + RETRY = 0, + RETRY_LATER = 1, + PREFERRED = 2, + READY = 3, + ERROR = 4 +}; + +union BindResponse switch (BindResponseType type) { + case PREFERRED: + case READY: + PartitionId partition_id; + case RETRY: + case RETRY_LATER: + String stop_msg; + case ERROR: + String error_msg; + default: + void; +}; + +struct BindClientResponse { + BindResponse bind_response; + ScaleUpConfig scale_up_config; +}; + +union OperationResponse switch (OperationType operation_type) { + case OP_BIND_CLIENT_TO_PARTITION: + BindClientResponse response; + default: + void; +}; diff --git a/src/proxy/src/efs_rpc.rs b/src/proxy/src/efs_rpc.rs new file mode 100644 index 00000000..5e464734 --- /dev/null +++ b/src/proxy/src/efs_rpc.rs @@ -0,0 +1,318 @@ +use std::io::Cursor; +use tokio::io::AsyncWriteExt; + +use crate::connections::ProxyStream; +use crate::efs_prot; +use crate::efs_prot::BindClientResponse; +use crate::efs_prot::OperationType; +use crate::error::RpcError; +use crate::proxy_identifier::ProxyIdentifier; +use crate::rpc; + +const PROGRAM_NUMBER: u32 = 100200; +const PROGRAM_VERSION: u32 = 1; + +#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] +pub struct PartitionId { + pub id: [u8; 64], +} + +pub async fn bind_client_to_partition( + proxy_id: ProxyIdentifier, + stream: &mut dyn ProxyStream, +) -> Result { + let request = create_bind_client_to_partition_request(&proxy_id)?; + stream.write_all(&request).await?; + stream.flush().await?; + + let response_bytes = rpc::read_rpc_bytes(stream).await?; + let response = onc_rpc::RpcMessage::try_from(response_bytes.as_slice())?; + + parse_bind_client_to_partition_response(&response) +} + +pub fn create_bind_client_to_partition_request( + proxy_id: &ProxyIdentifier, +) -> Result, RpcError> { + let payload = efs_prot::ProxyIdentifier { + identifier: proxy_id.uuid.as_bytes().to_vec(), + incarnation: proxy_id.incarnation.to_be_bytes().to_vec(), + }; + let mut payload_buf = Vec::new(); + xdr_codec::pack(&payload, &mut payload_buf)?; + + let call_body = onc_rpc::CallBody::new( + PROGRAM_NUMBER, + PROGRAM_VERSION, + OperationType::OP_BIND_CLIENT_TO_PARTITION as u32, + onc_rpc::auth::AuthFlavor::AuthNone::>(None), + onc_rpc::auth::AuthFlavor::AuthNone::>(None), + payload_buf, + ); + + let xid = rand::random::(); + onc_rpc::RpcMessage::new(xid, onc_rpc::MessageType::Call(call_body)) + .serialise() + .map_err(|e| e.into()) +} + +pub fn parse_bind_client_to_partition_response( + response: &onc_rpc::RpcMessage<&[u8], &[u8]>, +) -> Result { + let Some(reply_body) = response.reply_body() else { + Err(RpcError::MalformedResponse)? + }; + + let accepted_status = match reply_body { + onc_rpc::ReplyBody::Accepted(reply) => reply.status(), + onc_rpc::ReplyBody::Denied(_m) => Err(RpcError::Denied)?, + }; + + let payload = match accepted_status { + onc_rpc::AcceptedStatus::Success(p) => p, + onc_rpc::AcceptedStatus::GarbageArgs => Err(RpcError::GarbageArgs)?, + onc_rpc::AcceptedStatus::ProgramUnavailable => Err(RpcError::ProgramUnavailable)?, + onc_rpc::AcceptedStatus::ProgramMismatch { low, high } => Err(RpcError::ProgramMismatch { + low: *low, + high: *high, + })?, + onc_rpc::AcceptedStatus::ProcedureUnavailable => Err(RpcError::ProcedureUnavailable)?, + onc_rpc::AcceptedStatus::SystemError => Err(RpcError::SystemError)?, + }; + + xdr_codec::unpack::<_, BindClientResponse>(&mut Cursor::new(payload)).map_err(|e| e.into()) +} + +#[cfg(test)] +pub mod tests { + use super::*; + use crate::controller::tests::TestService; + use crate::controller::DEFAULT_SCALE_UP_CONFIG; + use crate::efs_prot::BindResponse; + use crate::efs_prot::ScaleUpConfig; + use crate::tls::tests::get_client_config; + use onc_rpc::{AuthError, RejectedReply}; + use rand::RngCore; + use s2n_tls_tokio::TlsConnector; + use tokio::net::TcpStream; + + const XID: u32 = 1; + + pub fn parse_bind_client_to_partition_request( + request: &onc_rpc::RpcMessage<&[u8], &[u8]>, + ) -> Result { + let call_body = request.call_body().expect("not a call rpc"); + + if PROGRAM_NUMBER != call_body.program() || PROGRAM_VERSION != call_body.program_version() { + return Err(RpcError::GarbageArgs); + } + + let mut payload = Cursor::new(call_body.payload()); + let raw_proxy_id = xdr_codec::unpack::<_, efs_prot::ProxyIdentifier>(&mut payload)?; + + Ok(ProxyIdentifier { + uuid: uuid::Builder::from_bytes( + raw_proxy_id + .identifier + .try_into() + .expect("Failed not convert vec to sized array"), + ) + .into_uuid(), + incarnation: i64::from_be_bytes( + raw_proxy_id + .incarnation + .try_into() + .expect("Failed to convert vec to sized array"), + ), + }) + } + + pub fn create_bind_client_to_partition_response( + xid: u32, + bind_response: BindResponse, + scale_up_config: ScaleUpConfig, + ) -> Result, RpcError> { + let mut payload_buf = Vec::new(); + + let response = BindClientResponse { + bind_response: bind_response, + scale_up_config: scale_up_config, + }; + xdr_codec::pack(&response, &mut payload_buf)?; + + create_bind_client_to_partition_response_from_accepted_status( + xid, + onc_rpc::AcceptedStatus::Success(payload_buf), + ) + } + + pub fn create_bind_client_to_partition_response_from_accepted_status( + xid: u32, + accepted_status: onc_rpc::AcceptedStatus>, + ) -> Result, RpcError> { + let reply_body = onc_rpc::ReplyBody::Accepted(onc_rpc::AcceptedReply::new( + onc_rpc::auth::AuthFlavor::AuthNone::>(None), + accepted_status, + )); + + onc_rpc::RpcMessage::new(xid, onc_rpc::MessageType::Reply(reply_body)) + .serialise() + .map_err(|e| e.into()) + } + + fn generate_parse_bind_client_to_partition_response_result( + accepted_status: onc_rpc::AcceptedStatus>, + ) -> Result { + let response = + create_bind_client_to_partition_response_from_accepted_status(XID, accepted_status)?; + let deserialized = onc_rpc::RpcMessage::try_from(response.as_slice())?; + parse_bind_client_to_partition_response(&deserialized) + } + + pub fn generate_partition_id() -> efs_prot::PartitionId { + let mut bytes = [0u8; efs_prot::PARTITION_ID_LENGTH as usize]; + rand::thread_rng().fill_bytes(&mut bytes); + efs_prot::PartitionId(bytes) + } + + #[tokio::test] + async fn test_bind_client_to_partition() { + let server = TestService::new(true).await; + let tcp_stream = TcpStream::connect(("127.0.0.1", server.listen_port)) + .await + .expect("Could not connect to test server."); + + let connector = + TlsConnector::new(get_client_config().await.expect("Failed to read config")); + let mut tls_stream = connector + .connect("localhost", tcp_stream) + .await + .expect("Failed to establish TLS Connection"); + + let response = bind_client_to_partition(ProxyIdentifier::new(), &mut tls_stream) + .await + .expect("bind_client_to_partition request failed"); + + let partition_id = match response.bind_response { + BindResponse::READY(id) => PartitionId { id: id.0 }, + _ => panic!(), + }; + + assert_eq!( + server + .partition_ids + .get(1) + .expect("Service has no partition IDs"), + &partition_id + ); + server.shutdown().await; + } + + #[test] + fn test_request_serde() -> Result<(), RpcError> { + let proxy_id = ProxyIdentifier::new(); + let request = create_bind_client_to_partition_request(&proxy_id)?; + + let deserialized = onc_rpc::RpcMessage::try_from(request.as_slice())?; + let deserialized_proxy_id = parse_bind_client_to_partition_request(&deserialized)?; + + assert_eq!(proxy_id.uuid, deserialized_proxy_id.uuid); + assert_eq!(proxy_id.incarnation, deserialized_proxy_id.incarnation); + Ok(()) + } + + #[test] + fn test_response_serde() -> Result<(), RpcError> { + let partition_id = generate_partition_id(); + let partition_id_copy = efs_prot::PartitionId(partition_id.0.clone()); + + let response = create_bind_client_to_partition_response( + XID, + BindResponse::READY(partition_id_copy), + DEFAULT_SCALE_UP_CONFIG, + )?; + + let deserialized = onc_rpc::RpcMessage::try_from(response.as_slice())?; + let deserialized_response = parse_bind_client_to_partition_response(&deserialized)?; + + assert!( + matches!(deserialized_response.bind_response, BindResponse::READY(id) if id.0 == partition_id.0) + ); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_missing_reply() -> Result<(), RpcError> { + // Create a call message, which will error when parsed as a response + let malformed_response = create_bind_client_to_partition_request(&ProxyIdentifier::new())?; + let deserialized = onc_rpc::RpcMessage::try_from(malformed_response.as_slice())?; + + let result = parse_bind_client_to_partition_response(&deserialized); + assert!(matches!(result, Err(RpcError::MalformedResponse))); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_denied() -> Result<(), RpcError> { + let reply_body = + onc_rpc::ReplyBody::Denied(RejectedReply::AuthError(AuthError::BadCredentials)); + let rpc_message = onc_rpc::RpcMessage::new(XID, onc_rpc::MessageType::Reply(reply_body)); + + let result = parse_bind_client_to_partition_response(&rpc_message); + assert!(matches!(result, Err(RpcError::Denied))); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_garbage_args() -> Result<(), RpcError> { + let parse_result = generate_parse_bind_client_to_partition_response_result( + onc_rpc::AcceptedStatus::GarbageArgs, + ); + assert!(matches!(parse_result, Err(RpcError::GarbageArgs))); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_program_unavailable() -> Result<(), RpcError> { + let parse_result = generate_parse_bind_client_to_partition_response_result( + onc_rpc::AcceptedStatus::ProcedureUnavailable, + ); + assert!(matches!(parse_result, Err(RpcError::ProcedureUnavailable))); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_program_mismatch() -> Result<(), RpcError> { + let program_version_low = 10; + let program_version_high = 100; + let parse_result = generate_parse_bind_client_to_partition_response_result( + onc_rpc::AcceptedStatus::ProgramMismatch { + low: program_version_low, + high: program_version_high, + }, + ); + assert!(matches!( + parse_result, + Err(RpcError::ProgramMismatch { low: l, high: h }) if program_version_low == l && program_version_high == h)); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_procedure_unavailable() -> Result<(), RpcError> + { + let parse_result = generate_parse_bind_client_to_partition_response_result( + onc_rpc::AcceptedStatus::ProcedureUnavailable, + ); + assert!(matches!(parse_result, Err(RpcError::ProcedureUnavailable))); + Ok(()) + } + + #[test] + fn test_parse_bind_client_to_partition_response_system_error() -> Result<(), RpcError> { + let parse_result = generate_parse_bind_client_to_partition_response_result( + onc_rpc::AcceptedStatus::SystemError, + ); + assert!(matches!(parse_result, Err(RpcError::SystemError))); + Ok(()) + } +} diff --git a/src/proxy/src/error.rs b/src/proxy/src/error.rs new file mode 100644 index 00000000..f6eee0d4 --- /dev/null +++ b/src/proxy/src/error.rs @@ -0,0 +1,41 @@ +use thiserror::Error as ThisError; + +#[derive(Debug, ThisError)] +pub enum ConnectError { + #[error("Connect attempt cancelled")] + Cancelled, + #[error("{0}")] + IoError(#[from] tokio::io::Error), + #[error("Connect attempt failed - Maximum attempt count exceeded")] + MaxAttemptsExceeded, + #[error("Attempt to acquire additional connections to EFS failed.")] + MultiplexFailure, + #[error(transparent)] + Tls(#[from] s2n_tls::error::Error), + #[error("Connect attempt failed - Timeout")] + Timeout, +} + +#[derive(Debug, ThisError)] +pub enum RpcError { + #[error("not a rpc response")] + MalformedResponse, + #[error("rpc reply_stat: MSG_DENIED")] + Denied, + #[error("rpc accept_stat: GARBAGE_ARGS")] + GarbageArgs, + #[error("rpc accept_stat: PROG_UNAVAIL")] + ProgramUnavailable, + #[error("rpc accept_stat: PROG_MISMATCH low: {} high: {}", .low, .high)] + ProgramMismatch { low: u32, high: u32 }, + #[error("rpc accept_stat: PROC_UNAVAIL")] + ProcedureUnavailable, + #[error("rpc accept_stat: SystemError")] + SystemError, + #[error(transparent)] + IoError(#[from] tokio::io::Error), + #[error(transparent)] + XdrCodecError(#[from] xdr_codec::Error), + #[error(transparent)] + OncRpc(#[from] onc_rpc::Error), +} diff --git a/src/proxy/src/lib.rs b/src/proxy/src/lib.rs new file mode 100644 index 00000000..008bcdf6 --- /dev/null +++ b/src/proxy/src/lib.rs @@ -0,0 +1,4 @@ +//! One-sentence summary of your crate. +//! +//! Followed by more detailed Markdown documentation of your crate. +#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] diff --git a/src/proxy/src/logger.rs b/src/proxy/src/logger.rs new file mode 100644 index 00000000..1d153fe9 --- /dev/null +++ b/src/proxy/src/logger.rs @@ -0,0 +1,65 @@ +use log::LevelFilter; +use log4rs::{ + append::{ + console::{ConsoleAppender, Target}, + rolling_file::{ + policy::compound::{ + roll::fixed_window::FixedWindowRoller, trigger::size::SizeTrigger, CompoundPolicy, + }, + RollingFileAppender, + }, + }, + config::{Appender, Config, Root}, + encode::pattern::PatternEncoder, + filter::threshold::ThresholdFilter, +}; +use std::{path::Path, str::FromStr}; + +use crate::config_parser::ProxyConfig; + +const LOG_FILE_MAX_BYTES: u64 = 1048576; +const LOG_FILE_COUNT: u32 = 10; + +pub fn init(config: &ProxyConfig) { + let log_file_path_string = config + .output + .clone() + .expect("config value `output` is not set"); + let log_file_path = Path::new(&log_file_path_string); + let level_filter = + LevelFilter::from_str(&config.debug).expect("config value for `debug` is invalid"); + + let stderr = ConsoleAppender::builder().target(Target::Stderr).build(); + + let trigger = SizeTrigger::new(LOG_FILE_MAX_BYTES); + let mut pattern = log_file_path_string.clone(); + pattern.push_str(".{}"); + let roller = FixedWindowRoller::builder() + .build(&pattern, LOG_FILE_COUNT) + .expect("Unable to create roller"); + let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller)); + + let log_file = RollingFileAppender::builder() + .encoder(Box::new(PatternEncoder::new( + "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {l} {M} {m}{n}", + ))) + .build(log_file_path, Box::new(policy)) + .expect("Unable to create log file"); + + let config = Config::builder() + .appender(Appender::builder().build("logfile", Box::new(log_file))) + .appender( + Appender::builder() + .filter(Box::new(ThresholdFilter::new(LevelFilter::Error))) + .build("stderr", Box::new(stderr)), + ) + .build( + Root::builder() + .appender("logfile") + .appender("stderr") + .build(level_filter), + ) + .expect("Invalid logger config"); + + let _ = log4rs::init_config(config).expect("Unable to initialize logger"); +} diff --git a/src/proxy/src/main.rs b/src/proxy/src/main.rs new file mode 100644 index 00000000..acc82e15 --- /dev/null +++ b/src/proxy/src/main.rs @@ -0,0 +1,138 @@ +use crate::config_parser::ProxyConfig; +use crate::connections::{PlainTextPartitionFinder, TlsPartitionFinder}; +use crate::tls::TlsConfig; +use clap::Parser; +use controller::Controller; +use log::{debug, error, info}; +use std::path::Path; +use std::sync::Arc; +use tokio::signal; +use tokio::sync::Mutex; +use tokio_util::sync::CancellationToken; + +mod config_parser; +mod connections; +mod controller; +mod efs_rpc; +mod error; +mod logger; +mod proxy; +mod proxy_identifier; +mod rpc; +mod shutdown; +mod status_reporter; +mod tls; + +#[allow(clippy::all)] +#[allow(deprecated)] +#[allow(invalid_value)] +#[allow(non_camel_case_types)] +#[allow(unused_assignments)] +mod efs_prot { + include!(concat!(env!("OUT_DIR"), "/efs_prot_xdr.rs")); +} + +#[tokio::main] +async fn main() { + let args = Args::parse(); + + let proxy_config = match ProxyConfig::from_path(Path::new(&args.proxy_config_path)) { + Ok(config) => config, + Err(e) => panic!("Failed to read configuration. {}", e), + }; + + if let Some(_log_file_path) = &proxy_config.output { + logger::init(&proxy_config) + } + + info!("Running with configuration: {:?}", proxy_config); + + // This "status reporter" is currently only used in tests + let (_status_requester, status_reporter) = status_reporter::create_status_channel(); + + let sigterm_cancellation_token = CancellationToken::new(); + let mut sigterm_listener = match signal::unix::signal(signal::unix::SignalKind::terminate()) { + Ok(listener) => listener, + Err(e) => panic!("Failed to create SIGTERM listener. {}", e), + }; + + let controller_handle = if args.tls { + let tls_config = match get_tls_config(&proxy_config).await { + Ok(config) => Arc::new(Mutex::new(config)), + Err(e) => panic!("Failed to obtain TLS config:{}", e), + }; + + run_sighup_handler(proxy_config.clone(), tls_config.clone()); + + let controller = Controller::new( + &proxy_config.nested_config.listen_addr, + Arc::new(TlsPartitionFinder::new(tls_config)), + status_reporter, + ) + .await; + tokio::spawn(controller.run(sigterm_cancellation_token.clone())) + } else { + let controller = Controller::new( + &proxy_config.nested_config.listen_addr, + Arc::new(PlainTextPartitionFinder { + mount_target_addr: proxy_config.nested_config.mount_target_addr.clone(), + }), + status_reporter, + ) + .await; + tokio::spawn(controller.run(sigterm_cancellation_token.clone())) + }; + + tokio::select! { + shutdown_reason = controller_handle => error!("Shutting down. {:?}", shutdown_reason), + _ = sigterm_listener.recv() => { + info!("Received SIGTERM"); + sigterm_cancellation_token.cancel(); + }, + } +} + +async fn get_tls_config(proxy_config: &ProxyConfig) -> Result { + let tls_config = TlsConfig::new( + proxy_config.fips, + Path::new(&proxy_config.nested_config.ca_file), + Path::new(&proxy_config.nested_config.client_cert_pem_file), + Path::new(&proxy_config.nested_config.client_private_key_pem_file), + &proxy_config.nested_config.mount_target_addr, + &proxy_config.nested_config.expected_server_hostname_tls, + ) + .await; + let tls_config = tls_config?; + Ok(tls_config) +} + +fn run_sighup_handler(proxy_config: ProxyConfig, tls_config: Arc>) { + tokio::spawn(async move { + let mut sighup_listener = match signal::unix::signal(signal::unix::SignalKind::hangup()) { + Ok(listener) => listener, + Err(e) => panic!("Failed to create SIGHUP listener. {}", e), + }; + + loop { + sighup_listener + .recv() + .await + .expect("SIGHUP listener stream is closed"); + + debug!("Received SIGHUP"); + let mut locked_config = tls_config.lock().await; + match get_tls_config(&proxy_config).await { + Ok(config) => *locked_config = config, + Err(e) => panic!("Failed to acquire TLS config. {}", e), + } + } + }); +} + +#[derive(Parser, Debug, Clone)] +pub struct Args { + pub proxy_config_path: String, + + #[arg(long, default_value_t = false)] + pub tls: bool, +} diff --git a/src/proxy/src/proxy.rs b/src/proxy/src/proxy.rs new file mode 100644 index 00000000..d686e144 --- /dev/null +++ b/src/proxy/src/proxy.rs @@ -0,0 +1,525 @@ +use std::{ + error::Error, + marker::PhantomData, + sync::{atomic::AtomicU64, Arc}, + time::{Duration, Instant}, +}; + +use bytes::BytesMut; +use log::{debug, error, info, trace}; +use tokio::{ + io::{split, AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf}, + net::{ + tcp::{OwnedReadHalf, OwnedWriteHalf}, + TcpStream, + }, + sync::{ + mpsc::{self}, + Mutex, + }, + task::JoinHandle, +}; +use tokio_util::sync::CancellationToken; + +use crate::rpc::{RpcFragmentParseError, RPC_MAX_SIZE}; +use crate::{ + connections::ProxyStream, + controller::Event, + rpc::RpcBatch, + shutdown::{ShutdownHandle, ShutdownReason}, +}; + +pub const REPORT_INTERVAL_SECS: u64 = 3; + +#[derive(Copy, Clone, Debug)] +pub struct PerformanceStats { + _num_connections: usize, + pub read_bytes: u64, + pub write_bytes: u64, + pub time_delta: Duration, +} + +impl PerformanceStats { + pub fn new( + num_connections: usize, + read_bytes: u64, + write_bytes: u64, + time_delta: Duration, + ) -> Self { + PerformanceStats { + _num_connections: num_connections, + read_bytes, + write_bytes, + time_delta, + } + } + + // Return total throughput in bytes per second + pub fn get_total_throughput(&self) -> u64 { + let time_delta_seconds = self.time_delta.as_secs(); + if time_delta_seconds == 0 { + 0 + } else { + let total_bytes = self.read_bytes + self.write_bytes; + total_bytes / time_delta_seconds + } + } +} +pub struct Proxy { + partition_to_nfs_cli_queue: mpsc::Sender, + partition_senders: Arc>>>, + shutdown: ShutdownHandle, + proxy_task_handle: JoinHandle<()>, + phantom: PhantomData, +} + +impl Proxy { + const SHUTDOWN_TIMEOUT: u64 = 15; + + pub fn new( + nfs_client: TcpStream, + partition_servers: Vec, + notification_queue: mpsc::Sender>, + shutdown: ShutdownHandle, + ) -> Self { + let (tx, rx) = mpsc::channel(64); + + let senders = partition_servers + .into_iter() + .map(|stream| Proxy::create_connection(stream, tx.clone(), shutdown.clone())) + .collect::>>(); + + let partition_senders = Arc::new(Mutex::new(senders)); + + let proxy = ProxyTask::new( + nfs_client, + notification_queue, + partition_senders.clone(), + rx, + shutdown.clone(), + ); + let proxy_task_handle = tokio::spawn(proxy.run()); + Self { + partition_to_nfs_cli_queue: tx, + partition_senders, + shutdown, + proxy_task_handle, + phantom: PhantomData, + } + } + + pub async fn add_connection(&self, stream: S) { + let conn = Proxy::create_connection( + stream, + self.partition_to_nfs_cli_queue.clone(), + self.shutdown.clone(), + ); + let mut f = self.partition_senders.lock().await; + f.push(conn); + } + + fn create_connection( + stream: S, + proxy: mpsc::Sender, + shutdown: ShutdownHandle, + ) -> mpsc::Sender { + let (tx, rx) = mpsc::channel(64); + tokio::spawn(ConnectionTask::new(stream, rx, proxy).run(shutdown)); + tx + } + + pub async fn shutdown(self) -> Result<(), Box> { + self.shutdown.cancellation_token.cancel(); + match tokio::time::timeout( + Duration::from_secs(Self::SHUTDOWN_TIMEOUT), + self.proxy_task_handle, + ) + .await? + { + Ok(()) => Ok(()), + Err(join_err) => Err(join_err.into()), + } + } +} + +const BUFFER_SIZE: usize = RPC_MAX_SIZE; + +struct ProxyTask { + nfs_client: TcpStream, + notification_queue: mpsc::Sender>, + partition_senders: Arc>>>, + response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, +} + +enum ConnectionMessage { + Response(RpcBatch), +} + +impl ProxyTask { + pub fn new( + nfs_client: TcpStream, + notification_queue: mpsc::Sender>, + partition_senders: Arc>>>, + response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, + ) -> Self { + Self { + nfs_client, + notification_queue, + partition_senders, + response_queue, + shutdown, + } + } + + async fn run(self) { + // Runs Proxy between NFS Client and the EFS Service. + // + // This function returns when it is cancelled by the `ShutdownHandle`, or if an error + // causes the `ProxyTask`'s `reader`, `writer`, or `reporter` task to return. In any of + // these cases, the `tokio::select!` block will cancel all of the tasks run by this object. + // + // An unused `mspc::Sender` is passed to each task spawned, so that we can await task + // shutdown with `mspc::Receiver::recv`. See https://tokio.rs/tokio/topics/shutdown. + + trace!("Starting proxy task"); + + let (shutdown_sender, mut shutdown_receiver) = mpsc::channel::(1); + + let write_byte_count = Arc::new(AtomicU64::new(0)); + let read_byte_count = Arc::new(AtomicU64::new(0)); + + let (read_half, write_half) = self.nfs_client.into_split(); + + let reader = Self::run_reader( + read_half, + read_byte_count.clone(), + self.partition_senders.clone(), + self.shutdown.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = reader => trace!("Proxy reader stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reader stopped by ShutdownHandle"), + } + }); + + let writer = Self::run_writer( + write_half, + write_byte_count.clone(), + self.response_queue, + self.shutdown.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = writer => trace!("Proxy writer stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy writer stopped by ShutdownHandle"), + } + }); + + let reporter = Self::run_reporter( + read_byte_count, + write_byte_count, + self.partition_senders.clone(), + self.notification_queue.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = reporter => trace!("Proxy reporter stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reporter stopped by ShutdownHandle"), + } + }); + + drop(shutdown_sender); + shutdown_receiver.recv().await; + } + + // NFS client to Proxy + async fn run_reader( + mut read_half: OwnedReadHalf, + read_count: Arc, + partition_senders: Arc>>>, + shutdown: ShutdownHandle, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting proxy reader"); + let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); + let reason; + let mut next_conn = 0; + + loop { + match read_half.read_buf(&mut buffer).await { + Ok(n_read) => { + if n_read == 0 { + reason = Some(ShutdownReason::Unmount); + break; + } else { + read_count.fetch_add(n_read as u64, std::sync::atomic::Ordering::AcqRel); + } + } + Err(e) => { + info!("Error reading from NFS client {:?}", e); + reason = Some(ShutdownReason::Unmount); + break; + } + } + + match RpcBatch::parse_batch(&mut buffer) { + Ok(Some(batch)) => { + let f = partition_senders.lock().await; + let r = f[next_conn].send(batch).await; + next_conn = (next_conn + 1) % f.len(); + if let Err(e) = r { + debug!("Error sending RPC batch to connection task {:?}", e); + reason = Some(ShutdownReason::UnexpectedError); + break; + }; + } + Err(RpcFragmentParseError::InvalidSizeTooSmall) => { + drop(read_half); + error!("NFS Client Error: invalid RPC size - size too small"); + reason = Some(ShutdownReason::FrameSizeTooSmall); + break; + } + Err(RpcFragmentParseError::SizeLimitExceeded) => { + drop(read_half); + error!("NFS Client Error: invalid RPC size - size limit exceeded"); + reason = Some(ShutdownReason::FrameSizeExceeded); + break; + } + Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), + } + + if buffer.capacity() == 0 { + buffer.reserve(BUFFER_SIZE) + } + } + trace!("cli_to_server exiting!"); + shutdown.exit(reason).await; + } + + // Proxy to NFS Client + async fn run_writer( + mut write_half: OwnedWriteHalf, + write_count: Arc, + mut response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting proxy writer"); + + let mut reason = None; + loop { + match response_queue.recv().await { + Some(ConnectionMessage::Response(batch)) => { + let mut total_written = 0; + + for b in &batch.rpcs { + match write_half.write_all(b).await { + Ok(_) => total_written += b.len(), + Err(e) => { + debug!("Error writing to nfs_client. {:?}", e); + reason = Some(ShutdownReason::Unmount); + break; + } + }; + } + + write_count + .fetch_add(total_written as u64, std::sync::atomic::Ordering::AcqRel); + } + None => { + info!("Exiting server_to_cli"); + break; + } + } + } + shutdown.exit(reason).await; + } + + async fn run_reporter( + read_count: Arc, + write_count: Arc, + partition_senders: Arc>>>, + notification_queue: mpsc::Sender>, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting reporter task"); + + let mut last = Instant::now(); + loop { + tokio::time::sleep(Duration::from_secs(REPORT_INTERVAL_SECS)).await; + + let num_connections; + { + let t = partition_senders.lock().await; + num_connections = t.len(); + drop(t); + } + + let now = Instant::now(); + let delta = now - last; + last = now; + let read = read_count.swap(0, std::sync::atomic::Ordering::AcqRel); + let write = write_count.swap(0, std::sync::atomic::Ordering::AcqRel); + let result = notification_queue + .send(Event::ProxyUpdate(PerformanceStats::new( + num_connections, + read, + write, + delta, + ))) + .await; + if result.is_err() { + break; + } + } + } +} + +struct ConnectionTask { + stream: S, + proxy_receiver: mpsc::Receiver, + proxy_sender: mpsc::Sender, +} + +impl ConnectionTask { + fn new( + stream: S, + proxy_receiver: mpsc::Receiver, + proxy_sender: mpsc::Sender, + ) -> Self { + Self { + stream, + proxy_receiver, + proxy_sender, + } + } + + async fn run(self, shutdown_handle: ShutdownHandle) { + let (r, w) = split(self.stream); + + let shutdown = shutdown_handle.clone(); + + // This CancellationToken facilitates graceful TLS connection closures by ensuring that + // that the ReadHalf is dropped only after the WriteHalf.shutdown() has returned + let connection_cancellation_token = CancellationToken::new(); + + let writer = Self::run_writer( + w, + self.proxy_receiver, + shutdown_handle.clone(), + connection_cancellation_token.clone(), + ); + tokio::spawn(async move { + tokio::select! { + _ = shutdown.cancellation_token.cancelled() => trace!("Cancelled"), + _ = writer => {}, + } + }); + + let reader = Self::run_reader(r, self.proxy_sender, shutdown_handle.clone()); + tokio::spawn(async move { + tokio::select! { + _ = connection_cancellation_token.cancelled() => trace!("Cancelled"), + _ = reader => {}, + } + }); + } + + // EFS to Proxy + async fn run_reader( + mut server_read_half: ReadHalf, + sender: mpsc::Sender, + shutdown: ShutdownHandle, + ) { + let reason; + let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); + loop { + match server_read_half.read_buf(&mut buffer).await { + Ok(n_read) => { + if n_read == 0 { + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + } + Err(e) => { + debug!("Error reading from server: {:?}", e); + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + }; + + match RpcBatch::parse_batch(&mut buffer) { + Ok(Some(batch)) => { + if let Err(e) = sender.send(ConnectionMessage::Response(batch)).await { + debug!("Error sending result back: {:?}", e); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + } + Err(RpcFragmentParseError::InvalidSizeTooSmall) => { + drop(server_read_half); + error!("Server Error: invalid RPC size - size too small"); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + Err(RpcFragmentParseError::SizeLimitExceeded) => { + drop(server_read_half); + error!("Server Error: invalid RPC size - size limit exceeded"); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), + } + + if buffer.capacity() == 0 { + buffer.reserve(BUFFER_SIZE) + } + } + shutdown.exit(reason).await; + } + + // Proxy to EFS + async fn run_writer( + mut server_write_half: WriteHalf, + mut receiver: mpsc::Receiver, + shutdown: ShutdownHandle, + connection_cancellation_token: CancellationToken, + ) { + let mut reason = Option::None; + loop { + let Some(batch) = receiver.recv().await else { + debug!("sender dropped"); + break; + }; + + for b in &batch.rpcs { + match server_write_half.write_all(b).await { + Ok(_) => (), + Err(e) => { + debug!("Error writing to server: {:?}", e); + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + }; + } + } + + tokio::spawn(async move { + match server_write_half.shutdown().await { + Ok(_) => (), + Err(e) => debug!("Failed to gracefully shutdown connection: {}", e), + }; + connection_cancellation_token.cancel(); + }); + shutdown.exit(reason).await; + } +} diff --git a/src/proxy/src/proxy_identifier.rs b/src/proxy/src/proxy_identifier.rs new file mode 100644 index 00000000..e8e08e06 --- /dev/null +++ b/src/proxy/src/proxy_identifier.rs @@ -0,0 +1,54 @@ +use uuid::Uuid; + +pub const INITIAL_INCARNATION: i64 = 0; + +#[derive(Eq, PartialEq, Clone, Copy, Debug)] +pub struct ProxyIdentifier { + pub uuid: Uuid, + pub incarnation: i64, +} + +impl ProxyIdentifier { + pub fn new() -> Self { + ProxyIdentifier { + uuid: Uuid::new_v4(), + incarnation: INITIAL_INCARNATION, + } + } + + pub fn increment(&mut self) { + if self.incarnation == i64::MAX { + self.incarnation = 0; + return; + } + self.incarnation += 1; + } +} + +#[cfg(test)] +mod tests { + use super::ProxyIdentifier; + use super::INITIAL_INCARNATION; + + #[test] + fn test_increment() { + let mut proxy_id = ProxyIdentifier::new(); + let proxy_id_original = proxy_id.clone(); + for i in 0..5 { + assert_eq!(i, proxy_id.incarnation); + proxy_id.increment(); + } + assert_eq!(proxy_id_original.uuid, proxy_id.uuid); + assert_eq!(INITIAL_INCARNATION, proxy_id_original.incarnation); + } + + #[test] + fn test_wrap_around() { + let mut proxy_id = ProxyIdentifier::new(); + let proxy_id_original = proxy_id.clone(); + proxy_id.incarnation = i64::MAX; + proxy_id.increment(); + assert_eq!(proxy_id_original.uuid, proxy_id.uuid); + assert_eq!(INITIAL_INCARNATION, proxy_id.incarnation); + } +} diff --git a/src/proxy/src/rpc.rs b/src/proxy/src/rpc.rs new file mode 100644 index 00000000..f167839d --- /dev/null +++ b/src/proxy/src/rpc.rs @@ -0,0 +1,242 @@ +use std::io::Cursor; + +use bytes::{Buf, Bytes, BytesMut}; +use tokio::io::AsyncReadExt; + +use crate::connections::ProxyStream; + +// Each element is an RPC call. +pub struct RpcBatch { + pub rpcs: Vec, +} + +#[derive(Debug, PartialEq)] +pub enum RpcFragmentParseError { + InvalidSizeTooSmall, + SizeLimitExceeded, + Incomplete, +} + +pub const RPC_LAST_FRAG: u32 = 0x80000000; +pub const RPC_SIZE_MASK: u32 = 0x7FFFFFFF; +pub const RPC_HEADER_SIZE: usize = 4; + +/* The sunrpc server implementation in linux has a maximum payload of 1MB + 1 page + * (see include/linux/sunrpc/svc.h#RPCSVC_MAXPAYLOAD and sv_max_mesg). + */ +pub const RPC_MAX_SIZE: usize = 1024 * 1024 + 4 * 1024; +pub const RPC_MIN_SIZE: usize = 2; + +impl RpcBatch { + pub fn parse_batch(buffer: &mut BytesMut) -> Result, RpcFragmentParseError> { + let mut batch = RpcBatch { rpcs: Vec::new() }; + + loop { + match Self::check_rpc_message(Cursor::new(&buffer[..])) { + Ok(len) => { + let rpc_message = buffer.split_to(len); + batch.rpcs.push(rpc_message.freeze()); + } + Err(RpcFragmentParseError::Incomplete) => break, + Err(e) => return Err(e), + } + } + + if batch.rpcs.is_empty() { + Ok(None) + } else { + Ok(Some(batch)) + } + } + + pub fn check_rpc_message(mut src: Cursor<&[u8]>) -> Result { + loop { + if src.remaining() < RPC_HEADER_SIZE { + return Err(RpcFragmentParseError::Incomplete); + } + + let fragment_header = src.get_u32(); + let fragment_size = (fragment_header & RPC_SIZE_MASK) as usize; + let is_last_fragment = (fragment_header & RPC_LAST_FRAG) != 0; + + if fragment_size <= RPC_MIN_SIZE { + return Err(RpcFragmentParseError::InvalidSizeTooSmall); + } + + if fragment_size >= RPC_MAX_SIZE { + return Err(RpcFragmentParseError::SizeLimitExceeded); + } + + if src.remaining() < fragment_size { + return Err(RpcFragmentParseError::Incomplete); + } + + src.advance(fragment_size); + + if is_last_fragment { + return Ok(src.position() as usize); + } + } + } +} + +pub async fn read_rpc_bytes(stream: &mut dyn ProxyStream) -> Result, tokio::io::Error> { + let mut header = [0; RPC_HEADER_SIZE]; + stream.read_exact(&mut header).await?; + + // NOTE: onc-rpc crate does not support fragmentation out of the box. Add 4 to include the header. + let len = (RPC_SIZE_MASK & extract_u32_from_bytes(&header)) + RPC_HEADER_SIZE as u32; + + let mut payload = vec![0; len as usize]; + payload[0..RPC_HEADER_SIZE].clone_from_slice(&header); + + stream.read_exact(&mut payload[RPC_HEADER_SIZE..]).await?; + + Ok(payload) +} + +fn extract_u32_from_bytes(header: &[u8]) -> u32 { + u32::from_be_bytes([header[0], header[1], header[2], header[3]]) +} + +#[cfg(test)] +pub mod test { + use crate::rpc::RPC_MAX_SIZE; + + use super::{RpcBatch, RpcFragmentParseError, RPC_HEADER_SIZE, RPC_LAST_FRAG}; + use bytes::{BufMut, BytesMut}; + use rand::Rng; + + // Generates message fragments for tests + // + // This function generates a set of message fragments from random data. The fragments are constructed + // in a way that they can be later assembled into the full long message data + // function. + // + // # Arguments + // * `size` - The total size of the message. + // * `num_fragments` - The number of fragments to generate. + // + pub fn generate_msg_fragments(size: usize, num_fragments: usize) -> (bytes::BytesMut, Vec) { + let mut rng = rand::thread_rng(); + let data: Vec = (0..size).map(|_| rng.gen()).collect(); + + let fragment_data_size = data.len() / num_fragments; + + let mut data_buffer = bytes::BytesMut::new(); + for i in 0..num_fragments { + let start_idx = i * fragment_data_size; + let end_idx = std::cmp::min(size, start_idx + fragment_data_size); + let fragment_data = &data[start_idx..end_idx]; + + let mut header = (end_idx - start_idx) as u32; + if end_idx == size { + header |= 1 << 31; + } + + data_buffer.extend_from_slice(&header.to_be_bytes()); + data_buffer.extend_from_slice(fragment_data); + } + assert_eq!(data_buffer.len(), (num_fragments * 4) + data.len()); + + (data_buffer, data) + } + + #[test] + fn multiple_messages() { + let mut b = BytesMut::with_capacity(8); + b.put_u32(RPC_LAST_FRAG | 4); + b.put_u32(42); + b.put_u32(RPC_LAST_FRAG | 4); + + let batch = RpcBatch::parse_batch(&mut b); + let batch = batch.unwrap().unwrap(); + assert_eq!(batch.rpcs[0].len(), 8); + assert_eq!(batch.rpcs.len(), 1); + + b.put_u32(43); + let batch = RpcBatch::parse_batch(&mut b); + let batch = batch.unwrap().unwrap(); + assert_eq!(batch.rpcs[0].len(), 8); + assert_eq!(batch.rpcs.len(), 1); + + let batch = RpcBatch::parse_batch(&mut b); + assert!(matches!(batch, Ok(None))); + } + + #[test] + fn test_invalid_rpc_small_fragment() { + let num_fragments = 1; + let (mut input_buffer, _) = generate_msg_fragments(1, num_fragments); + let result = RpcBatch::parse_batch(&mut input_buffer); + assert!(matches!( + result, + Err(RpcFragmentParseError::InvalidSizeTooSmall) + )); + } + + #[test] + fn test_invalid_rpc_big_fragment() { + let num_fragments = 1; + let (mut input_buffer, _) = generate_msg_fragments(RPC_MAX_SIZE + 1, num_fragments); + let result = RpcBatch::parse_batch(&mut input_buffer); + assert!(matches!( + result, + Err(RpcFragmentParseError::SizeLimitExceeded) + )); + } + + #[test] + fn test_parse_batch_single_message() { + // Create an input buffer with multiple RPC fragments + let num_fragments = 3; + let message_size = 12; + let (mut input_buffer, _) = generate_msg_fragments(message_size, num_fragments); + let mut rpc_batch = RpcBatch::parse_batch(&mut input_buffer) + .expect("parse batch failed") + .expect("no rpc messages found"); + + assert_eq!(1, rpc_batch.rpcs.len()); + let rpc_message = rpc_batch.rpcs.pop().expect("No RPC messages"); + + let expected_message_size = num_fragments * RPC_HEADER_SIZE + message_size; + assert_eq!(expected_message_size, rpc_message.len()); + } + + #[test] + fn test_parse_batch_multiple_message() { + // Create an input buffer with multiple RPC messages + let num_fragments_1 = 3; + let message_size_1 = 12; + let (mut input_buffer, _) = generate_msg_fragments(message_size_1, num_fragments_1); + + let num_fragments_2 = 6; + let message_size_2 = 24; + let (input_buffer_2, _) = generate_msg_fragments(message_size_2, num_fragments_2); + + let num_fragments_3 = 1; + let message_size_3 = 50; + let (input_buffer_3, _) = generate_msg_fragments(message_size_3, num_fragments_3); + + input_buffer.extend_from_slice(&input_buffer_2); + input_buffer.extend_from_slice(&input_buffer_3); + + let mut rpc_batch = RpcBatch::parse_batch(&mut input_buffer) + .expect("parse batch failed") + .expect("no rpc messages found"); + + assert_eq!(3, rpc_batch.rpcs.len()); + + let rpc_message_3 = rpc_batch.rpcs.pop().expect("No RPC messages"); + let expected_message_size_3 = num_fragments_3 * RPC_HEADER_SIZE + message_size_3; + assert_eq!(expected_message_size_3, rpc_message_3.len()); + + let rpc_message_2 = rpc_batch.rpcs.pop().expect("No RPC messages"); + let expected_message_size_2 = num_fragments_2 * RPC_HEADER_SIZE + message_size_2; + assert_eq!(expected_message_size_2, rpc_message_2.len()); + + let rpc_message_1 = rpc_batch.rpcs.pop().expect("No RPC messages"); + let expected_message_size_1 = num_fragments_1 * RPC_HEADER_SIZE + message_size_1; + assert_eq!(expected_message_size_1, rpc_message_1.len()); + } +} diff --git a/src/proxy/src/shutdown.rs b/src/proxy/src/shutdown.rs new file mode 100644 index 00000000..5c8f148a --- /dev/null +++ b/src/proxy/src/shutdown.rs @@ -0,0 +1,85 @@ +use log::debug; +use tokio::sync::mpsc::{self, Receiver, Sender}; +use tokio_util::sync::CancellationToken; + +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum ShutdownReason { + NeedsRestart, + UnexpectedError, + Unmount, + FrameSizeExceeded, + FrameSizeTooSmall, +} + +#[derive(Clone)] +pub struct ShutdownHandle { + pub cancellation_token: CancellationToken, + notifier: Sender, +} + +impl ShutdownHandle { + pub fn new(cancellation_token: CancellationToken) -> (Self, Receiver) { + let (notifier, r) = mpsc::channel(1024); + let h = Self { + cancellation_token, + notifier, + }; + (h, r) + } + + pub async fn exit(self, reason: Option) { + debug!("Exiting: {:?}", reason); + self.cancellation_token.cancel(); + if let Some(reason) = reason { + let _ = self.notifier.send(reason).await; + } + } +} + +#[cfg(test)] +mod test { + use log::info; + use std::time::Duration; + + use tokio::sync::mpsc; + use tokio_util::sync::CancellationToken; + + use super::ShutdownHandle; + + #[tokio::test] + async fn test() { + let (t, mut r) = mpsc::channel(1); + let token = CancellationToken::new(); + + let s1 = ShutdownHandle { + cancellation_token: token.clone(), + notifier: t.clone(), + }; + let s2 = ShutdownHandle { + cancellation_token: token.clone(), + notifier: t.clone(), + }; + + tokio::spawn(run_task(s1, false)); + tokio::spawn(run_task(s2, true)); + drop(t); + + let _ = r.recv().await; + info!("Done"); + } + + async fn run_task(shutdown: ShutdownHandle, to_cancel: bool) { + let f = async { + if to_cancel { + shutdown.cancellation_token.clone().cancel() + } else { + tokio::time::sleep(Duration::from_secs(10)).await; + } + }; + tokio::select! { + _ = shutdown.cancellation_token.cancelled() => {}, + _ = f => {} + } + info!("Task exiting"); + } +} diff --git a/src/proxy/src/status_reporter.rs b/src/proxy/src/status_reporter.rs new file mode 100644 index 00000000..ac9f6a9c --- /dev/null +++ b/src/proxy/src/status_reporter.rs @@ -0,0 +1,110 @@ +use crate::controller::ConnectionSearchState; +use crate::efs_rpc::PartitionId; +use crate::{proxy::PerformanceStats, proxy_identifier::ProxyIdentifier}; +use anyhow::{Error, Result}; +use tokio::sync::mpsc::{self, Receiver, Sender}; +use tokio::time::Instant; + +pub struct Report { + pub proxy_id: ProxyIdentifier, + pub partition_id: Option, + pub connection_state: ConnectionSearchState, + pub num_connections: usize, + pub last_proxy_update: Option<(Instant, PerformanceStats)>, + pub scale_up_attempt_count: u64, + pub restart_count: u64, +} + +type Request = (); +type Response = Report; + +pub struct StatusReporter { + pub sender: Sender, + pub receiver: Receiver, +} + +impl StatusReporter { + pub async fn await_report_request(&mut self) -> Result<()> { + self.receiver + .recv() + .await + .ok_or_else(|| Error::msg("Request channel closed"))?; + Ok(()) + } + + // Note: This should only be called when a message is received by the receiver. + pub async fn publish_status(&mut self, report: Report) { + match self.sender.send(report).await { + Ok(_) => (), + Err(e) => panic!("StatusReporter could not send report {}", e), + } + } +} + +pub struct StatusRequester { + _sender: Sender, + _receiver: Receiver, +} + +impl StatusRequester { + pub async fn _request_status(&mut self) -> Result { + self._sender.send(()).await?; + self._receiver + .recv() + .await + .ok_or_else(|| Error::msg("Response channel closed")) + } +} + +pub fn create_status_channel() -> (StatusRequester, StatusReporter) { + let (call_sender, call_receiver) = mpsc::channel::(1); + let (reply_sender, reply_receiver) = mpsc::channel::(1); + + let status_requester = StatusRequester { + _sender: call_sender, + _receiver: reply_receiver, + }; + + let status_reporter = StatusReporter { + sender: reply_sender, + receiver: call_receiver, + }; + + (status_requester, status_reporter) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_basic() -> Result<()> { + let proxy_id = ProxyIdentifier::new(); + + let (mut status_requester, mut status_reporter) = create_status_channel(); + tokio::spawn(async move { + status_reporter + .await_report_request() + .await + .expect("Request channel closed"); + let report = Report { + proxy_id: proxy_id.clone(), + partition_id: None, + connection_state: ConnectionSearchState::Idle, + num_connections: 1, + last_proxy_update: None, + scale_up_attempt_count: 0, + restart_count: 0, + }; + status_reporter.publish_status(report).await + }); + + let r = status_requester._request_status().await?; + assert_eq!(proxy_id, r.proxy_id); + assert!(matches!(r.partition_id, None)); + assert_eq!(r.connection_state, ConnectionSearchState::Idle); + assert!(matches!(r.last_proxy_update, None)); + assert_eq!(1, r.num_connections); + Ok(()) + } +} diff --git a/src/proxy/src/tls.rs b/src/proxy/src/tls.rs new file mode 100644 index 00000000..6a6f062e --- /dev/null +++ b/src/proxy/src/tls.rs @@ -0,0 +1,230 @@ +use anyhow::{Context, Result}; +use log::*; +use nix::NixPath; +use s2n_tls::enums::ClientAuthType::Optional; +use s2n_tls::security::Policy; +use s2n_tls::{config::Config, security::DEFAULT_TLS13}; +use s2n_tls_tokio::TlsConnector; +use s2n_tls_tokio::TlsStream; +use std::path::Path; +use tokio::net::TcpStream; + +use crate::error::ConnectError; + +pub const FIPS_COMPLIANT_POLICY_VERSION: &str = "20230317"; +pub struct InsecureAcceptAllCertificatesHandler; +impl s2n_tls::callbacks::VerifyHostNameCallback for InsecureAcceptAllCertificatesHandler { + fn verify_host_name(&self, _host_name: &str) -> bool { + true + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct TlsConfig { + pub fips_enabled: bool, + + /// Contents of the certificate authority file. E.g. /etc/amazon/efs/efs-utils.crt + pub ca_file_contents: Vec, + + /// The client-side certificate and public key + pub client_cert: Vec, + + /// The client private key + pub client_private_key: Vec, + + /// The remote address to establish the TLS connection with + pub remote_addr: String, + + /// The hostname that is expected to be on the remote server's TLS certificate + pub server_domain: String, +} + +// s2n-tls errors if there are comments in the certificate files. This function removes comments if +// they are present. +async fn read_file_with_comments_removed(path: &Path) -> Result> { + let file = tokio::fs::File::open(path).await?; + let reader = tokio::io::BufReader::new(file); + let mut lines = tokio::io::AsyncBufReadExt::lines(reader); + + let mut output = Vec::new(); + while let Ok(Some(line)) = lines.next_line().await { + if !line.starts_with("# ") { + if !output.is_empty() { + output.push(b'\n'); + } + + output.extend_from_slice(line.as_bytes()); + } + } + Ok(output) +} + +impl TlsConfig { + /// Create an instance of TlsConfig. + /// + /// This will return an error if the files could not be read or the remote address could not be resolved. + /// + /// # Arguments + /// * `ca_file` - File path of the certificate authority file. E.g. /etc/amazon/efs/efs-utils.crt + /// * `client_cert_pem_file` - File path of the file that contains the client-side certificate and public key + /// * `client_private_key_pem_file` - File path of the file that contains the client private key + /// * `remote_addr` - The remote address to establish the TLS connection with + /// * `server_domain` - The hostname that is expected to be on the certificate that the remote server presents + /// + pub async fn new( + fips_enabled: bool, + ca_file: &Path, + client_cert_pem_file: &Path, + client_private_key_pem_file: &Path, + remote_addr: &str, + server_domain: &str, + ) -> Result { + let mut ca_file_contents: Vec = Vec::new(); + if !ca_file.is_empty() { + ca_file_contents = read_file_with_comments_removed(ca_file).await.context( + String::from("Error in TlsConfig::new. Unable to the CA File. Make sure it does not have any comments (lines that start with #)."))?; + } + let client_cert = read_file_with_comments_removed(client_cert_pem_file) + .await + .context(String::from( + "Error in TlsConfig::new. Unable to read the client certificate file.", + ))?; + let client_private_key = read_file_with_comments_removed(client_private_key_pem_file) + .await + .context(String::from( + "Error in TlsConfig::new. Unable to read private key file.", + ))?; + let server_domain = server_domain.to_string(); + let remote_addr = remote_addr.to_string(); + + Ok(TlsConfig { + fips_enabled, + ca_file_contents, + client_cert, + client_private_key, + remote_addr, + server_domain, + }) + } + + #[cfg(test)] + pub async fn new_from_config(config: &crate::ProxyConfig) -> Result { + let efs_config = &config.nested_config; + + let ca_file = Path::new(&efs_config.ca_file); + let ca_cert_pem = Path::new(&efs_config.client_cert_pem_file); + let private_key_pem = Path::new(&efs_config.client_private_key_pem_file); + if !ca_file.exists() || !ca_cert_pem.exists() || !private_key_pem.exists() { + let error_msg = "One or more required files for TLS config are missing"; + return Err(anyhow::Error::msg(error_msg)); + } + TlsConfig::new( + config.fips, + &ca_file, + &ca_cert_pem, + &private_key_pem, + efs_config.mount_target_addr.as_str(), + efs_config.expected_server_hostname_tls.as_str(), + ) + .await + } +} + +/// Establishes a TLS stream using the configuration and remote address specified in tls_config +pub async fn establish_tls_stream( + tls_config: TlsConfig, +) -> Result, ConnectError> { + let config = create_config_builder(&tls_config).build()?; + + let tls_connector = TlsConnector::new(config); + + let tcp_stream = TcpStream::connect(tls_config.remote_addr).await?; + + let tls_stream = tls_connector + .connect(&tls_config.server_domain, tcp_stream) + .await?; + + debug!("{:#?}", tls_stream); + Ok(tls_stream) +} + +fn create_config_builder(tls_config: &TlsConfig) -> s2n_tls::config::Builder { + let mut config = Config::builder(); + + let policy = if tls_config.fips_enabled { + Policy::from_version(FIPS_COMPLIANT_POLICY_VERSION).expect("Invalid policy") + } else { + DEFAULT_TLS13 + }; + config + .set_security_policy(&policy) + .expect("Error in create_tls_connector. Failed to set security policy."); + config + .set_client_auth_type(Optional) + .expect("Error in create_tls_connector. Failed to set client auth type."); + config + .load_pem(&tls_config.client_cert, &tls_config.client_private_key) + .expect( + "Error in create_tls_connector. Failed to load the client certificate and private key.", + ); + + // If the customer is using the verify=0 mount option, we want to disable cert verification. + if !tls_config.ca_file_contents.is_empty() { + config + .trust_pem(&tls_config.ca_file_contents) + .expect("Error in create_tls_connector. Failed to add the CA file to the trust store."); + } else { + unsafe { + config + .disable_x509_verification() + .expect("Error disabling x509 verification"); + }; + } + + // If stunnel_check_cert_hostname = false in efs-utils config, then we don't verify the hostname + if tls_config.server_domain.is_empty() { + config + .set_verify_host_callback(InsecureAcceptAllCertificatesHandler) + .expect("Unable to disable host name verification"); + } + + config +} + +#[cfg(test)] +pub mod tests { + + use crate::config_parser::tests::get_test_config; + + use super::*; + + pub async fn get_client_config() -> Result { + let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; + let builder = create_config_builder(&tls_config); + + let config = builder.build()?; + Ok(config) + } + + pub async fn get_server_config() -> Result { + let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; + let mut builder = create_config_builder(&tls_config); + + // Accept all client certificates + builder.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?; + + let config = builder.build()?; + Ok(config) + } + + #[tokio::test] + async fn test_remove_comments() { + let comment_file = Path::new("tests/certs/cert_with_comments.pem"); + let decommented_output = read_file_with_comments_removed(comment_file).await; + + let expected = tokio::fs::read(&Path::new("tests/certs/cert.pem")) + .await + .expect("Could not read certificate file"); + assert_eq!(expected.len(), decommented_output.unwrap().len()); + } +} diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 0e9c439c..41d49f1d 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "1.36.0" +VERSION = "2.0.0" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -180,6 +180,9 @@ SYSTEM_RELEASE_PATH = "/etc/system-release" OS_RELEASE_PATH = "/etc/os-release" STUNNEL_INSTALLATION_MESSAGE = "Please install it following the instructions at: https://docs.aws.amazon.com/efs/latest/ug/using-amazon-efs-utils.html#upgrading-stunnel" +EFS_PROXY_INSTALLATION_MESSAGE = "Please install it by reinstalling amazon-efs-utils" + +EFS_PROXY_BIN = "efs-proxy" def fatal_error(user_message, log_message=None): @@ -798,9 +801,10 @@ def get_pid_in_state_dir(state_file, state_file_dir): def is_mount_stunnel_proc_running(state_pid, state_file, state_file_dir): """ - Check whether a given stunnel process id in state file is running for the mount. To avoid we incorrectly checking - processes running by other applications and send signal further, the stunnel process in state file is counted as - running iff: + Check whether the PID in the state file corresponds to a running efs-proxy/stunnel process. + Although this code was originally written to check if stunnel is running, we've modified + it to support the efs-proxy process as well. + The proxy or stunnel process is counted as running iff: 1. The pid in state file is not None. 2. The process running with the pid is a stunnel process. This is validated through process command name. 3. The process can be reached via os.kill(pid, 0). @@ -818,9 +822,11 @@ def is_mount_stunnel_proc_running(state_pid, state_file, state_file_dir): return False process_name = check_process_name(state_pid) - if not process_name or "stunnel" not in str(process_name): + if not process_name or ( + "efs-proxy" not in str(process_name) and "stunnel" not in str(process_name) + ): logging.debug( - "Process running on %s is not a stunnel process, full command: %s.", + "Process running on %s is not an efs-proxy or stunnel process, full command: %s.", state_pid, str(process_name) if process_name else "", ) @@ -828,7 +834,7 @@ def is_mount_stunnel_proc_running(state_pid, state_file, state_file_dir): if not is_pid_running(state_pid): logging.debug( - "Stunnel process with pid %s is not running anymore for %s.", + "Stunnel or efs-proxy process with pid %s is not running anymore for %s.", state_pid, state_file, ) @@ -942,11 +948,39 @@ def update_stunnel_command_for_ecs_amazon_linux_2( return command +def command_uses_efs_proxy(command): + """ + Accepts a list of strings which represents the command that was used + to start or efs-proxy. If the command contains efs-proxy, return True. + + Since we control the filepath in which the efs-proxy executable is stored, we + know that we will not run into situations where a directory on the filepath is named + efs-proxy but the executable command is something else, like stunnel. + """ + for i in range(len(command)): + if EFS_PROXY_BIN in command[i]: + return True + + return False + + def start_tls_tunnel(child_procs, state, state_file_dir, state_file): - # launch the tunnel in a process group so if it has any child processes, they can be killed easily + """ + Reads the command from the state file, and uses it to start a subprocess. + This is the command that efs-utils used to spin up the efs-proxy or stunnel process. + + We launch the stunnel and efs-proxy process in a process group so that child processes can be easily killed. + :param child_procs: list that contains efs-proxy / stunnel processes that the Watchdog instance has spawned + :param state: the state corresponding to a given mount - the proxy process associated with this mount will be started + :param state_file_dir: the directory where mount state files are stored + :param state_file: this function may rewrite the command used to start up the proxy or stunnel process, and thus needs a handle on the state file to update it. + :return: the pid of the proxy or stunnel process that was spawned + """ command = state["cmd"] logging.info('Starting TLS tunnel: "%s"', " ".join(command)) + efs_proxy_enabled = command_uses_efs_proxy(command) + command = update_stunnel_command_for_ecs_amazon_linux_2( command, state, state_file_dir, state_file ) @@ -960,44 +994,57 @@ def start_tls_tunnel(child_procs, state, state_file_dir, state_file): close_fds=True, ) except FileNotFoundError as e: - logging.warning("Watchdog failed to start stunnel due to %s", e) + if efs_proxy_enabled: + logging.warning("Watchdog failed to start efs-proxy due to %s", e) + else: + logging.warning("Watchdog failed to start stunnel due to %s", e) + + # https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/812 It is possible that the stunnel is not + # present anymore and replaced by stunnel5 on AL2, meanwhile watchdog is attempting to restart stunnel for + # mount using old efs-utils based on old state file generated during previous mount, which has stale command + # using stunnel bin. Update the state file if the stunnel does not exist anymore, and use stunnel5 on Al2. + # + if get_system_release_version() in AMAZON_LINUX_2_RELEASE_VERSIONS: + for i in range(len(command)): + if "stunnel" in command[i] and "stunnel-config" not in command[i]: + command[i] = find_command_path( + "stunnel5", STUNNEL_INSTALLATION_MESSAGE + ) + break + + tunnel = subprocess.Popen( + command, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + preexec_fn=os.setsid, + close_fds=True, + ) - # https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/812 It is possible that the stunnel is not - # present anymore and replaced by stunnel5 on AL2, meanwhile watchdog is attempting to restart stunnel for - # mount using old efs-utils based on old state file generated during previous mount, which has stale command - # using stunnel bin. Update the state file if the stunnel does not exist anymore, and use stunnel5 on Al2. - # - if get_system_release_version() in AMAZON_LINUX_2_RELEASE_VERSIONS: - for i in range(len(command)): - if "stunnel" in command[i] and "stunnel-config" not in command[i]: - command[i] = find_command_path( - "stunnel5", STUNNEL_INSTALLATION_MESSAGE - ) - break - - tunnel = subprocess.Popen( - command, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - preexec_fn=os.setsid, - close_fds=True, - ) + state["cmd"] = command + logging.info( + "Rewriting %s with new stunnel cmd: %s for Amazon Linux 2 platform.", + state_file, + " ".join(state["cmd"]), + ) + rewrite_state_file(state, state_file_dir, state_file) - state["cmd"] = command - logging.info( - "Rewriting %s with new stunnel cmd: %s for Amazon Linux 2 platform.", - state_file, - " ".join(state["cmd"]), - ) - rewrite_state_file(state, state_file_dir, state_file) + # We may have used either stunnel or efs-proxy as the TLS tunnel. + # We want to make it clear in the logs which was used. + tunnel_process_name = "stunnel" + if efs_proxy_enabled: + tunnel_process_name = "efs-proxy" if tunnel is None or not is_pid_running(tunnel.pid): fatal_error( - "Failed to initialize TLS tunnel for %s" % state_file, - "Failed to start TLS tunnel.", + "Failed to initialize %s for %s" % (tunnel_process_name, state_file), + "Failed to start %s." % tunnel_process_name, + ) + fatal_error( + "Failed to initialize %s for %s" % (tunnel_process_name, state_file), + "Failed to start %s." % tunnel_process_name, ) - logging.info("Started TLS tunnel, pid: %d", tunnel.pid) + logging.info("Started %s, pid: %d", tunnel_process_name, tunnel.pid) child_procs.append(tunnel) return tunnel.pid @@ -1148,18 +1195,6 @@ def check_efs_mounts( if is_mount_stunnel_proc_running( state.get("pid"), state_file, state_file_dir ): - # https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/616 We have seen EFS hanging issue caused - # by stuck stunnel (version: 4.56) process. Apart from checking whether stunnel is running or not, we - # need to check whether the stunnel connection established is healthy periodically. - # - # The way to check the stunnel health is by `df` the mountpoint, i.e. check the file system information, - # which will trigger a remote GETATTR on the root of the file system. Normally the command will finish - # in 10 milliseconds, thus if the command hang for certain period (defined as 30 sec as of now), the - # stunnel connection is likely to be unhealthy. Watchdog will kill the old stunnel process and restart - # a new one for the unhealthy mount. The health check will run every 5 min since mount. - # - # Both the command hang timeout and health check interval are configurable in efs-utils config file. - # check_stunnel_health( config, state, state_file_dir, state_file, child_procs, nfs_mounts ) @@ -1171,6 +1206,21 @@ def check_efs_mounts( def check_stunnel_health( config, state, state_file_dir, state_file, child_procs, nfs_mounts ): + """ + Check the health of efs-proxy, or stunnel (older versions of efs-utils), by executing `df` on the mountpoint. + + https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/616 We have seen EFS hanging issue caused + by stuck stunnel (version: 4.56) process. Apart from checking whether stunnel is running or not, we + need to check whether the stunnel connection established is healthy periodically. + + The way to check the stunnel health is by `df` the mountpoint, i.e. check the file system information, + which will trigger a remote GETATTR on the root of the file system. Normally the command will finish + in 10 milliseconds, thus if the command hang for certain period (defined as 30 sec as of now), the + stunnel connection is likely to be unhealthy. Watchdog will kill the old stunnel process and restart + a new one for the unhealthy mount. The health check will run every 5 min since mount. + + Both the command hang timeout and health check interval are configurable in efs-utils config file. + """ if not get_boolean_config_item_value( config, CONFIG_SECTION, "stunnel_health_check_enabled", default_value=True ): diff --git a/test/mount_efs_test/test_add_stunnel_ca_options.py b/test/mount_efs_test/test_add_stunnel_ca_options.py index 5ac7ccd2..f528c439 100644 --- a/test/mount_efs_test/test_add_stunnel_ca_options.py +++ b/test/mount_efs_test/test_add_stunnel_ca_options.py @@ -41,7 +41,7 @@ def test_use_existing_cafile(tmpdir): options = {"cafile": str(_create_temp_file(tmpdir))} efs_config = {} - mount_efs.add_stunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) + mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) assert options["cafile"] == efs_config.get("CAfile") assert "CApath" not in efs_config @@ -52,7 +52,7 @@ def test_use_missing_cafile(capsys): efs_config = {} with pytest.raises(SystemExit) as ex: - mount_efs.add_stunnel_ca_options( + mount_efs.add_tunnel_ca_options( efs_config, _get_config(), options, DEFAULT_REGION ) @@ -68,7 +68,7 @@ def test_stunnel_cafile_configuration_in_option(mocker): mocker.patch("os.path.exists", return_value=True) - mount_efs.add_stunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) + mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) assert CAFILE == efs_config.get("CAfile") @@ -82,7 +82,7 @@ def test_stunnel_cafile_configuration_in_config(mocker): mocker.patch("os.path.exists", return_value=True) - mount_efs.add_stunnel_ca_options(efs_config, config, options, DEFAULT_REGION) + mount_efs.add_tunnel_ca_options(efs_config, config, options, DEFAULT_REGION) assert CAFILE == efs_config.get("CAfile") @@ -93,7 +93,7 @@ def test_stunnel_cafile_not_configured(mocker): mocker.patch("os.path.exists", return_value=True) - mount_efs.add_stunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) + mount_efs.add_tunnel_ca_options(efs_config, _get_config(), options, DEFAULT_REGION) assert mount_efs.DEFAULT_STUNNEL_CAFILE == efs_config.get("CAfile") @@ -110,6 +110,6 @@ def test_stunnel_cafile_configured_in_mount_region_section(mocker): mocker.patch("os.path.exists", return_value=True) - mount_efs.add_stunnel_ca_options(efs_config, config, options, ISOLATED_REGION) + mount_efs.add_tunnel_ca_options(efs_config, config, options, ISOLATED_REGION) assert ISOLATED_REGION_STUNNEL_CAFILE == efs_config.get("CAfile") diff --git a/test/mount_efs_test/test_bootstrap_tls.py b/test/mount_efs_test/test_bootstrap_proxy.py similarity index 51% rename from test/mount_efs_test/test_bootstrap_tls.py rename to test/mount_efs_test/test_bootstrap_proxy.py index e3060766..d56f3e53 100644 --- a/test/mount_efs_test/test_bootstrap_tls.py +++ b/test/mount_efs_test/test_bootstrap_proxy.py @@ -40,15 +40,17 @@ def setup_mocks(mocker): return_value=(DNS_NAME, None), ) mocker.patch("mount_efs.get_target_region", return_value=REGION) - mocker.patch("mount_efs.write_tls_tunnel_state_file", return_value="~mocktempfile") + mocker.patch("mount_efs.write_tunnel_state_file", return_value="~mocktempfile") mocker.patch("mount_efs.create_certificate") mocker.patch("os.rename") mocker.patch("os.kill") mocker.patch( - "mount_efs.update_tls_tunnel_temp_state_file_with_tunnel_pid", + "mount_efs.update_tunnel_temp_state_file_with_tunnel_pid", return_value="~mocktempfile", ) + mocker.patch("mount_efs.get_efs_proxy_log_level", return_value="info") + process_mock = MagicMock() process_mock.communicate.return_value = ( "stdout", @@ -74,10 +76,10 @@ def setup_mocks_without_popen(mocker): "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", return_value=(DNS_NAME, None), ) - mocker.patch("mount_efs.write_tls_tunnel_state_file", return_value="~mocktempfile") + mocker.patch("mount_efs.write_tunnel_state_file", return_value="~mocktempfile") mocker.patch("os.kill") mocker.patch( - "mount_efs.update_tls_tunnel_temp_state_file_with_tunnel_pid", + "mount_efs.update_tunnel_temp_state_file_with_tunnel_pid", return_value="~mocktempfile", ) @@ -87,12 +89,12 @@ def setup_mocks_without_popen(mocker): return write_config_mock -def test_bootstrap_tls_state_file_dir_exists(mocker, tmpdir): +def test_bootstrap_proxy_state_file_dir_exists(mocker, tmpdir): popen_mock, _ = setup_mocks(mocker) state_file_dir = str(tmpdir) - - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") - with mount_efs.bootstrap_tls( + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, FS_ID, MOUNT_POINT, {}, state_file_dir ): pass @@ -100,11 +102,11 @@ def test_bootstrap_tls_state_file_dir_exists(mocker, tmpdir): args, _ = popen_mock.call_args args = args[0] - assert "/usr/bin/stunnel" in args + assert "/usr/bin/efs-proxy" in args assert EXPECTED_STUNNEL_CONFIG_FILE in args -def test_bootstrap_tls_state_file_nonexistent_dir(mocker, tmpdir): +def test_bootstrap_proxy_state_file_nonexistent_dir(mocker, tmpdir): popen_mock, _ = setup_mocks(mocker) state_file_dir = str(tmpdir.join(tempfile.mkdtemp()[1])) @@ -122,9 +124,10 @@ def config_get_side_effect(section, field): assert not os.path.exists(state_file_dir) - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") mocker.patch("mount_efs.find_existing_mount_using_tls_port", return_value=None) - with mount_efs.bootstrap_tls( + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, FS_ID, MOUNT_POINT, {}, state_file_dir ): pass @@ -132,13 +135,13 @@ def config_get_side_effect(section, field): assert os.path.exists(state_file_dir) -def test_bootstrap_tls_cert_created(mocker, tmpdir): +def test_bootstrap_proxy_cert_created_tls_mount(mocker, tmpdir): setup_mocks_without_popen(mocker) mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) mocker.patch("mount_efs.get_target_region", return_value=REGION) state_file_dir = str(tmpdir) tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) - + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) pk_path = os.path.join(str(tmpdir), "privateKey.pem") mocker.patch("mount_efs.get_private_key_path", return_value=pk_path) @@ -147,6 +150,8 @@ def config_get_side_effect(section, field): return "0755" elif section == mount_efs.CONFIG_SECTION and field == "dns_name_format": return "{fs_id}.efs.{region}.amazonaws.com" + elif section == mount_efs.CONFIG_SECTION and field == "logging_level": + return "info" elif section == mount_efs.CLIENT_INFO_SECTION and field == "source": return CLIENT_SOURCE else: @@ -154,15 +159,15 @@ def config_get_side_effect(section, field): MOCK_CONFIG.get.side_effect = config_get_side_effect - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") try: - with mount_efs.bootstrap_tls( + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, FS_ID, MOUNT_POINT, - {"accesspoint": AP_ID}, + {"accesspoint": AP_ID, "tls": None}, state_file_dir, ): pass @@ -175,7 +180,53 @@ def config_get_side_effect(section, field): assert os.path.exists(pk_path) -def test_bootstrap_tls_non_default_port(mocker, tmpdir): +def test_bootstrap_proxy_cert_not_created_non_tls_mount(mocker, tmpdir): + setup_mocks_without_popen(mocker) + mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) + mocker.patch("mount_efs.get_target_region", return_value=REGION) + state_file_dir = str(tmpdir) + tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) + + pk_path = os.path.join(str(tmpdir), "privateKey.pem") + mocker.patch("mount_efs.get_private_key_path", return_value=pk_path) + + def config_get_side_effect(section, field): + if section == mount_efs.CONFIG_SECTION and field == "state_file_dir_mode": + return "0755" + elif section == mount_efs.CONFIG_SECTION and field == "dns_name_format": + return "{fs_id}.efs.{region}.amazonaws.com" + elif section == mount_efs.CONFIG_SECTION and field == "logging_level": + return "info" + elif section == mount_efs.CLIENT_INFO_SECTION and field == "source": + return CLIENT_SOURCE + else: + raise ValueError("Unexpected arguments") + + MOCK_CONFIG.get.side_effect = config_get_side_effect + + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + try: + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {"accesspoint": AP_ID}, + state_file_dir, + ): + pass + except OSError as e: + assert "[Errno 2] No such file or directory" in str(e) + + assert not os.path.exists(os.path.join(tls_dict["mount_dir"], "certificate.pem")) + assert not os.path.exists(os.path.join(tls_dict["mount_dir"], "request.csr")) + assert not os.path.exists(os.path.join(tls_dict["mount_dir"], "config.conf")) + assert not os.path.exists(pk_path) + + +def test_bootstrap_proxy_non_default_port(mocker, tmpdir): popen_mock, write_config_mock = setup_mocks(mocker) mocker.patch("os.rename") state_file_dir = str(tmpdir) @@ -185,9 +236,9 @@ def test_bootstrap_tls_non_default_port(mocker, tmpdir): tls_port_sock_mock.getsockname.return_value = ("local_host", tls_port) tls_port_sock_mock.close.side_effect = None mocker.patch("socket.socket", return_value=tls_port_sock_mock) - - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") - with mount_efs.bootstrap_tls( + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, @@ -202,29 +253,55 @@ def test_bootstrap_tls_non_default_port(mocker, tmpdir): popen_args = popen_args[0] write_config_args, _ = write_config_mock.call_args - assert "/usr/bin/stunnel" in popen_args + assert "/usr/bin/efs-proxy" in popen_args assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args assert tls_port == write_config_args[4] # positional argument for tls_port - # Ensure tls port socket is closed in bootstrap_tls + # Ensure tls port socket is closed in bootstrap_proxy # The number is two here, the first one is the actual socket when choosing tls port, the second one is a socket to # verify tls port can be connected after establishing TLS stunnel. They share the same mock. assert 2 == tls_port_sock_mock.close.call_count -def test_bootstrap_tls_non_default_verify_level(mocker, tmpdir): +def test_bootstrap_proxy_non_tls_verify_ignored(mocker, tmpdir): popen_mock, write_config_mock = setup_mocks(mocker) state_file_dir = str(tmpdir) + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {}, + state_file_dir, + ): + pass + popen_args, _ = popen_mock.call_args + popen_args = popen_args[0] + write_config_args, _ = write_config_mock.call_args + + assert "/usr/bin/efs-proxy" in popen_args + assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args + assert None == write_config_args[6] # positional argument for verify_level + + +def test_bootstrap_proxy_non_default_verify_level_stunnel(mocker, tmpdir): + popen_mock, write_config_mock = setup_mocks(mocker) + state_file_dir = str(tmpdir) + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) verify = 0 mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") - with mount_efs.bootstrap_tls( + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, FS_ID, MOUNT_POINT, - {"verify": verify}, + {"verify": verify, "tls": None}, state_file_dir, + efs_proxy_enabled=False, ): pass @@ -237,12 +314,11 @@ def test_bootstrap_tls_non_default_verify_level(mocker, tmpdir): assert 0 == write_config_args[6] # positional argument for verify_level -def test_bootstrap_tls_ocsp_option(mocker, tmpdir): +def test_bootstrap_proxy_ocsp_option(mocker, tmpdir): popen_mock, write_config_mock = setup_mocks(mocker) state_file_dir = str(tmpdir) - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") - with mount_efs.bootstrap_tls( + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, @@ -250,6 +326,7 @@ def test_bootstrap_tls_ocsp_option(mocker, tmpdir): MOUNT_POINT, {"ocsp": None}, state_file_dir, + efs_proxy_enabled=False, ): pass @@ -263,12 +340,11 @@ def test_bootstrap_tls_ocsp_option(mocker, tmpdir): assert write_config_args[7] is True -def test_bootstrap_tls_noocsp_option(mocker, tmpdir): +def test_bootstrap_proxy_noocsp_option(mocker, tmpdir): popen_mock, write_config_mock = setup_mocks(mocker) state_file_dir = str(tmpdir) - mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") - with mount_efs.bootstrap_tls( + with mount_efs.bootstrap_proxy( MOCK_CONFIG, INIT_SYSTEM, DNS_NAME, @@ -276,6 +352,7 @@ def test_bootstrap_tls_noocsp_option(mocker, tmpdir): MOUNT_POINT, {"noocsp": None}, state_file_dir, + efs_proxy_enabled=False, ): pass @@ -287,3 +364,114 @@ def test_bootstrap_tls_noocsp_option(mocker, tmpdir): assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args # positional argument for ocsp_override assert write_config_args[7] is False + + +def test_bootstrap_proxy_efs_proxy_enabled_tls(mocker, tmpdir): + popen_mock, _ = setup_mocks(mocker) + mocker.patch("os.rename") + state_file_dir = str(tmpdir) + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {"tls": None}, + state_file_dir, + efs_proxy_enabled=True, + ): + pass + + popen_args, _ = popen_mock.call_args + popen_args = popen_args[0] + + assert "/usr/bin/efs-proxy" in popen_args + assert "--tls" in popen_args + assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args + + +def test_bootstrap_proxy_efs_proxy_enabled_non_tls(mocker, tmpdir): + popen_mock, _ = setup_mocks(mocker) + mocker.patch("os.rename") + state_file_dir = str(tmpdir) + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {}, + state_file_dir, + efs_proxy_enabled=True, + ): + pass + + popen_args, _ = popen_mock.call_args + popen_args = popen_args[0] + + assert "/usr/bin/stunnel" not in popen_args + assert "--tls" not in popen_args + + assert "/usr/bin/efs-proxy" in popen_args + assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args + + +def test_bootstrap_proxy_stunnel_enabled(mocker, tmpdir): + popen_mock, _ = setup_mocks(mocker) + mocker.patch("os.rename") + state_file_dir = str(tmpdir) + + mocker.patch("mount_efs._stunnel_bin", return_value="/usr/bin/stunnel") + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {}, + state_file_dir, + efs_proxy_enabled=False, + ): + pass + + popen_args, _ = popen_mock.call_args + popen_args = popen_args[0] + + assert "/usr/bin/efs-proxy" not in popen_args + assert "info" not in popen_args + + assert "/usr/bin/stunnel" in popen_args + assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args + + +def test_bootstrap_proxy_netns_option(mocker, tmpdir): + popen_mock, write_config_mock = setup_mocks(mocker) + state_file_dir = str(tmpdir) + + netns = "/proc/1/net/ns" + mocker.patch("mount_efs._efs_proxy_bin", return_value="/usr/bin/efs-proxy") + mocker.patch("mount_efs.NetNS") + mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) + with mount_efs.bootstrap_proxy( + MOCK_CONFIG, + INIT_SYSTEM, + DNS_NAME, + FS_ID, + MOUNT_POINT, + {"netns": netns}, + state_file_dir, + ): + pass + + popen_args, _ = popen_mock.call_args + popen_args = popen_args[0] + write_config_args, _ = write_config_mock.call_args + + assert "/usr/bin/efs-proxy" in popen_args + assert EXPECTED_STUNNEL_CONFIG_FILE in popen_args + assert "nsenter" in popen_args + assert "--net=" + netns in popen_args diff --git a/test/mount_efs_test/test_get_nfs_mount_options.py b/test/mount_efs_test/test_get_nfs_mount_options.py index 4675f3d6..cf7d1d5d 100644 --- a/test/mount_efs_test/test_get_nfs_mount_options.py +++ b/test/mount_efs_test/test_get_nfs_mount_options.py @@ -6,10 +6,49 @@ from unittest.mock import MagicMock +try: + import ConfigParser +except ImportError: + from configparser import ConfigParser + import pytest import mount_efs +DEFAULT_OPTIONS = {"tlsport": "3030"} + + +def _get_config(ocsp_enabled=False): + try: + config = ConfigParser.SafeConfigParser() + except AttributeError: + config = ConfigParser() + + mount_nfs_command_retry_count = 4 + mount_nfs_command_retry_timeout = 10 + mount_nfs_command_retry = "false" + config.add_section(mount_efs.CONFIG_SECTION) + config.set( + mount_efs.CONFIG_SECTION, "retry_nfs_mount_command", mount_nfs_command_retry + ) + config.set( + mount_efs.CONFIG_SECTION, + "retry_nfs_mount_command_count", + str(mount_nfs_command_retry_count), + ) + config.set( + mount_efs.CONFIG_SECTION, + "retry_nfs_mount_command_timeout_sec", + str(mount_nfs_command_retry_timeout), + ) + if ocsp_enabled: + config.set( + mount_efs.CONFIG_SECTION, + "stunnel_check_cert_validity", + "true", + ) + return config + def _mock_popen(mocker, returncode=0, stdout="stdout", stderr="stderr"): popen_mock = MagicMock() @@ -23,7 +62,7 @@ def _mock_popen(mocker, returncode=0, stdout="stdout", stderr="stderr"): def test_get_default_nfs_mount_options(): - nfs_opts = mount_efs.get_nfs_mount_options({}) + nfs_opts = mount_efs.get_nfs_mount_options(dict(DEFAULT_OPTIONS), _get_config()) assert "nfsvers=4.1" in nfs_opts assert "rsize=1048576" in nfs_opts @@ -31,17 +70,22 @@ def test_get_default_nfs_mount_options(): assert "hard" in nfs_opts assert "timeo=600" in nfs_opts assert "retrans=2" in nfs_opts + assert "port=3030" in nfs_opts def test_override_nfs_version(): - nfs_opts = mount_efs.get_nfs_mount_options({"nfsvers": 4.0}) + options = dict(DEFAULT_OPTIONS) + options["nfsvers"] = 4.0 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "nfsvers=4.0" in nfs_opts assert "nfsvers=4.1" not in nfs_opts def test_override_nfs_version_alternate_option(): - nfs_opts = mount_efs.get_nfs_mount_options({"vers": 4.0}) + options = dict(DEFAULT_OPTIONS) + options["vers"] = 4.0 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "vers=4.0" in nfs_opts assert "nfsvers=4.0" not in nfs_opts @@ -49,21 +93,27 @@ def test_override_nfs_version_alternate_option(): def test_override_rsize(): - nfs_opts = mount_efs.get_nfs_mount_options({"rsize": 1}) + options = dict(DEFAULT_OPTIONS) + options["rsize"] = 1 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "rsize=1" in nfs_opts assert "rsize=1048576" not in nfs_opts def test_override_wsize(): - nfs_opts = mount_efs.get_nfs_mount_options({"wsize": 1}) + options = dict(DEFAULT_OPTIONS) + options["wsize"] = 1 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "wsize=1" in nfs_opts assert "wsize=1048576" not in nfs_opts def test_override_recovery_soft(): - nfs_opts = mount_efs.get_nfs_mount_options({"soft": None}) + options = dict(DEFAULT_OPTIONS) + options["soft"] = None + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "soft" in nfs_opts assert "soft=" not in nfs_opts @@ -71,35 +121,43 @@ def test_override_recovery_soft(): def test_override_timeo(): - nfs_opts = mount_efs.get_nfs_mount_options({"timeo": 1}) + options = dict(DEFAULT_OPTIONS) + options["timeo"] = 1 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "timeo=1" in nfs_opts assert "timeo=600" not in nfs_opts def test_override_retrans(): - nfs_opts = mount_efs.get_nfs_mount_options({"retrans": 1}) + options = dict(DEFAULT_OPTIONS) + options["retrans"] = 1 + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "retrans=1" in nfs_opts assert "retrans=2" not in nfs_opts def test_tlsport(): - nfs_opts = mount_efs.get_nfs_mount_options({"tls": None, "tlsport": 3030}) + options = dict(DEFAULT_OPTIONS) + options["tls"] = None + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "port=3030" in nfs_opts assert "tls" not in nfs_opts def test_fsap_efs_only(): - nfs_opts = mount_efs.get_nfs_mount_options({"fsap": None}) + options = dict(DEFAULT_OPTIONS) + options["fsap"] = None + nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) assert "fsap" not in nfs_opts def test_get_default_nfs_mount_options_macos(mocker): mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) - nfs_opts = mount_efs.get_nfs_mount_options({}) + nfs_opts = mount_efs.get_nfs_mount_options(dict(DEFAULT_OPTIONS), _get_config()) assert "nfsvers=4.0" in nfs_opts assert "rsize=1048576" in nfs_opts @@ -108,13 +166,14 @@ def test_get_default_nfs_mount_options_macos(mocker): assert "timeo=600" in nfs_opts assert "retrans=2" in nfs_opts assert "mountport=2049" in nfs_opts + assert not "port=3030" in nfs_opts def _test_unsupported_mount_options_macos(mocker, capsys, options={}): mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) _mock_popen(mocker, stdout="nfs") with pytest.raises(SystemExit) as ex: - mount_efs.get_nfs_mount_options(options) + mount_efs.get_nfs_mount_options(options, _get_config()) assert 0 != ex.value.code diff --git a/test/mount_efs_test/test_main.py b/test/mount_efs_test/test_main.py index 73321ab3..789c0b68 100644 --- a/test/mount_efs_test/test_main.py +++ b/test/mount_efs_test/test_main.py @@ -44,6 +44,8 @@ def _test_main( awscredsuri=None, notls=False, crossaccount=False, + stunnel=False, + macos=False, ): options = {} @@ -69,6 +71,8 @@ def _test_main( options["awscredsuri"] = awscredsuri if crossaccount: options["crossaccount"] = None + if stunnel: + options["stunnel"] = None if root: mocker.patch("os.geteuid", return_value=0) @@ -90,8 +94,8 @@ def _test_main( parse_arguments_mock = mocker.patch( "mount_efs.parse_arguments", return_value=("fs-deadbeef", "/", "/mnt", options) ) - bootstrap_tls_mock = mocker.patch( - "mount_efs.bootstrap_tls", side_effect=dummy_contextmanager + bootstrap_proxy_mock = mocker.patch( + "mount_efs.bootstrap_proxy", side_effect=dummy_contextmanager ) if tls: @@ -106,10 +110,19 @@ def _test_main( utils.assert_called_once(parse_arguments_mock) utils.assert_called_once(mount_mock) - if tls: - utils.assert_called_once(bootstrap_tls_mock) + stunnel_mode_enabled = stunnel or macos or ocsp + + if stunnel_mode_enabled: + if tls: + utils.assert_called_once(bootstrap_proxy_mock) + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] is False + else: + utils.assert_not_called(bootstrap_proxy_mock) else: - utils.assert_not_called(bootstrap_tls_mock) + utils.assert_called_once(bootstrap_proxy_mock) + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] is True def _test_main_assert_error(mocker, capsys, expected_err, **kwargs): @@ -128,7 +141,7 @@ def _test_main_macos(mocker, is_supported_macos_version, **kwargs): "mount_efs.check_if_mac_version_is_supported", return_value=is_supported_macos_version, ) - _test_main(mocker, **kwargs) + _test_main(mocker, macos=True, **kwargs) def _test_main_macos_assert_error( @@ -232,8 +245,12 @@ def test_main_awscredsuri_without_iam(mocker, capsys): ) -def test_main_tls_ocsp_option(mocker): - _test_main(mocker, tls=True, ocsp=True, tlsport=TLS_PORT) +def test_main_tls_ocsp_option_with_stunnel(mocker): + _test_main(mocker, tls=True, ocsp=True, stunnel=True, tlsport=TLS_PORT) + + +def test_main_tls_ocsp_option_should_revert_to_stunnel(mocker): + _test_main(mocker, tls=True, ocsp=True, stunnel=False, tlsport=TLS_PORT) def test_main_tls_noocsp_option(mocker): diff --git a/test/mount_efs_test/test_mount_nfs.py b/test/mount_efs_test/test_mount_nfs.py index 5e2782cf..d9070846 100644 --- a/test/mount_efs_test/test_mount_nfs.py +++ b/test/mount_efs_test/test_mount_nfs.py @@ -54,6 +54,8 @@ NETNS = "/proc/1/net/ns" +LOCAL_HOST = "127.0.0.1" + def _get_config( mount_nfs_command_retry="true", @@ -107,6 +109,30 @@ def test_mount_nfs(mocker): args, _ = mock.call_args args = args[0] + assert "/sbin/mount.nfs4" == args[NFS_BIN_ARG_IDX] + assert LOCAL_HOST in args[NFS_MOUNT_PATH_IDX] + assert "/mnt" == args[NFS_MOUNT_POINT_IDX] + + utils.assert_called_once(optimize_readahead_window_mock) + + +def test_mount_nfs_stunnel_enabled(mocker): + mock = _mock_popen(mocker) + optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") + options = dict(DEFAULT_OPTIONS) + options["stunnel"] = None + + mount_efs.mount_nfs( + _get_config(mount_nfs_command_retry="false"), + DNS_NAME, + "/", + "/mnt", + options, + ) + + args, _ = mock.call_args + args = args[0] + assert "/sbin/mount.nfs4" == args[NFS_BIN_ARG_IDX] assert DNS_NAME in args[NFS_MOUNT_PATH_IDX] assert "/mnt" == args[NFS_MOUNT_POINT_IDX] @@ -114,16 +140,18 @@ def test_mount_nfs(mocker): utils.assert_called_once(optimize_readahead_window_mock) -def test_mount_nfs_with_fallback_ip_address(mocker): +def test_mount_nfs_stunnel_with_fallback_ip_address(mocker): mock = _mock_popen(mocker) optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") + options = dict(DEFAULT_OPTIONS) + options["stunnel"] = None mount_efs.mount_nfs( _get_config(mount_nfs_command_retry="false"), DNS_NAME, "/", "/mnt", - DEFAULT_OPTIONS, + options, fallback_ip_address=FALLBACK_IP_ADDRESS, ) @@ -138,12 +166,13 @@ def test_mount_nfs_with_fallback_ip_address(mocker): utils.assert_called_once(optimize_readahead_window_mock) -def test_mount_nfs_tls(mocker): +def test_mount_nfs_tls_stunnel_enabled(mocker): mock = _mock_popen(mocker) optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") options = dict(DEFAULT_OPTIONS) options["tls"] = None + options["stunnel"] = None mount_efs.mount_nfs( _get_config(mount_nfs_command_retry="false"), DNS_NAME, "/", "/mnt", options @@ -205,11 +234,11 @@ def test_mount_tls_mountpoint_mounted_with_nfs(mocker, capsys): options = dict(DEFAULT_OPTIONS) options["tls"] = None - bootstrap_tls_mock = mocker.patch("mount_efs.bootstrap_tls") + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") mocker.patch("os.path.ismount", return_value=True) _mock_popen(mocker, stdout="nfs") - mount_efs.mount_tls( + mount_efs.mount_with_proxy( _get_config(mount_nfs_command_retry="false"), INIT_SYSTEM, DNS_NAME, @@ -220,7 +249,7 @@ def test_mount_tls_mountpoint_mounted_with_nfs(mocker, capsys): ) out, err = capsys.readouterr() assert "is already mounted" in out - utils.assert_not_called(bootstrap_tls_mock) + utils.assert_not_called(bootstrap_proxy_mock) utils.assert_not_called(optimize_readahead_window_mock) diff --git a/test/mount_efs_test/test_mount_with_proxy.py b/test/mount_efs_test/test_mount_with_proxy.py new file mode 100644 index 00000000..ad5821c7 --- /dev/null +++ b/test/mount_efs_test/test_mount_with_proxy.py @@ -0,0 +1,221 @@ +import subprocess +from unittest.mock import MagicMock + +import pytest + +import mount_efs + +from .. import common, utils + +try: + import ConfigParser +except ImportError: + from configparser import ConfigParser + +try: + import ConfigParser +except ImportError: + from configparser import ConfigParser + +DNS_NAME = "fs-deadbeef.efs.us-east-1.amazonaws.com" +FS_ID = "fs-deadbeef" +INIT_SYSTEM = "upstart" +FALLBACK_IP_ADDRESS = "192.0.0.1" +MOUNT_POINT = "/mnt" +PATH = "/" + +DEFAULT_OPTIONS = { + "nfsvers": 4.1, + "rsize": 1048576, + "wsize": 1048576, + "hard": None, + "timeo": 600, + "retrans": 2, + "tlsport": 3049, +} + +# indices of different arguments to the NFS call +NFS_BIN_ARG_IDX = 0 +NFS_MOUNT_PATH_IDX = 1 +NFS_MOUNT_POINT_IDX = 2 +NFS_OPTION_FLAG_IDX = 3 +NFS_OPTIONS_IDX = 4 + +# indices of different arguments to the NFS call to certain network namespace +NETNS_NSENTER_ARG_IDX = 0 +NETNS_PATH_ARG_IDX = 1 +NETNS_NFS_OFFSET = 2 + +# indices of different arguments to the NFS call for MACOS +NFS_MOUNT_PATH_IDX_MACOS = -2 +NFS_MOUNT_POINT_IDX_MACOS = -1 + +NETNS = "/proc/1/net/ns" + + +def _get_config(ocsp_enabled=False): + try: + config = ConfigParser.SafeConfigParser() + except AttributeError: + config = ConfigParser() + + mount_nfs_command_retry_count = 4 + mount_nfs_command_retry_timeout = 10 + mount_nfs_command_retry = "false" + config.add_section(mount_efs.CONFIG_SECTION) + config.set( + mount_efs.CONFIG_SECTION, "retry_nfs_mount_command", mount_nfs_command_retry + ) + config.set( + mount_efs.CONFIG_SECTION, + "retry_nfs_mount_command_count", + str(mount_nfs_command_retry_count), + ) + config.set( + mount_efs.CONFIG_SECTION, + "retry_nfs_mount_command_timeout_sec", + str(mount_nfs_command_retry_timeout), + ) + if ocsp_enabled: + config.set( + mount_efs.CONFIG_SECTION, + "stunnel_check_cert_validity", + "true", + ) + return config + + +def _mock_popen(mocker, returncode=0, stdout="stdout", stderr="stderr"): + popen_mock = MagicMock() + popen_mock.communicate.return_value = ( + stdout, + stderr, + ) + popen_mock.returncode = returncode + + return mocker.patch("subprocess.Popen", return_value=popen_mock) + + +def test_mount_with_proxy_efs_proxy_enabled(mocker, capsys): + options = dict(DEFAULT_OPTIONS) + options["tls"] = None + + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") + mocker.patch("os.path.ismount", return_value=False) + mocker.patch("threading.Thread.start") + mocker.patch("threading.Thread.join") + mocker.patch("mount_efs.mount_nfs") + _mock_popen(mocker, stdout="nfs") + mount_efs.mount_with_proxy( + _get_config(), + INIT_SYSTEM, + DNS_NAME, + PATH, + FS_ID, + MOUNT_POINT, + options, + ) + utils.assert_called_once(bootstrap_proxy_mock) + + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] == True + + +def test_mount_with_proxy_ocsp_config_enabled(mocker, capsys): + options = dict(DEFAULT_OPTIONS) + options["tls"] = None + + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") + mocker.patch("os.path.ismount", return_value=False) + mocker.patch("threading.Thread.start") + mocker.patch("threading.Thread.join") + mocker.patch("mount_efs.mount_nfs") + _mock_popen(mocker, stdout="nfs") + mount_efs.mount_with_proxy( + _get_config(ocsp_enabled=True), + INIT_SYSTEM, + DNS_NAME, + PATH, + FS_ID, + MOUNT_POINT, + options, + ) + utils.assert_called_once(bootstrap_proxy_mock) + + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] == False + + +def test_mount_with_proxy_ocsp_option_enabled(mocker, capsys): + options = dict(DEFAULT_OPTIONS) + options["tls"] = None + options["ocsp"] = None + + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") + mocker.patch("os.path.ismount", return_value=False) + mocker.patch("threading.Thread.start") + mocker.patch("threading.Thread.join") + mocker.patch("mount_efs.mount_nfs") + _mock_popen(mocker, stdout="nfs") + mount_efs.mount_with_proxy( + _get_config(), + INIT_SYSTEM, + DNS_NAME, + PATH, + FS_ID, + MOUNT_POINT, + options, + ) + utils.assert_called_once(bootstrap_proxy_mock) + + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] == False + + +def test_mount_with_proxy_efs_proxy_enabled_non_tls_mount(mocker, capsys): + options = dict(DEFAULT_OPTIONS) + + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") + mocker.patch("os.path.ismount", return_value=False) + mocker.patch("threading.Thread.start") + mocker.patch("threading.Thread.join") + mocker.patch("mount_efs.mount_nfs") + _mock_popen(mocker, stdout="nfs") + mount_efs.mount_with_proxy( + _get_config(), + INIT_SYSTEM, + DNS_NAME, + PATH, + FS_ID, + MOUNT_POINT, + options, + ) + utils.assert_called_once(bootstrap_proxy_mock) + + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] == True + + +def test_mount_with_proxy_stunnel_enabled(mocker, capsys): + options = dict(DEFAULT_OPTIONS) + options["stunnel"] = None + + bootstrap_proxy_mock = mocker.patch("mount_efs.bootstrap_proxy") + mocker.patch("os.path.ismount", return_value=False) + mocker.patch("threading.Thread.start") + mocker.patch("threading.Thread.join") + mocker.patch("mount_efs.mount_nfs") + _mock_popen(mocker, stdout="nfs") + mount_efs.mount_with_proxy( + _get_config(), + INIT_SYSTEM, + DNS_NAME, + PATH, + FS_ID, + MOUNT_POINT, + options, + ) + utils.assert_called_once(bootstrap_proxy_mock) + + kwargs = bootstrap_proxy_mock.call_args[1] + assert kwargs["efs_proxy_enabled"] == False diff --git a/test/mount_efs_test/test_write_stunnel_config_file.py b/test/mount_efs_test/test_write_stunnel_config_file.py index b01f88d7..7bb5e267 100644 --- a/test/mount_efs_test/test_write_stunnel_config_file.py +++ b/test/mount_efs_test/test_write_stunnel_config_file.py @@ -27,6 +27,7 @@ OCSP_ENABLED = False DEFAULT_REGION = "us-east-1" STUNNEL_LOGS_FILE = "/var/log/amazon/efs/%s.stunnel.log" % FS_ID +PROXY_LOGS_FILE = "/var/log/amazon/efs/%s.efs-proxy.log" % FS_ID def _get_config( @@ -98,7 +99,12 @@ def _get_config( return config -def _get_mount_options(port=PORT): +def _get_mount_options_tls(port=PORT): + options = {"tlsport": port, "tls": None} + return options + + +def _get_mount_options_non_tls(port=PORT): options = { "tlsport": port, } @@ -152,7 +158,7 @@ def _validate_config(stunnel_config_file, expected_global_config, expected_efs_c assert expected_efs_config == actual_efs_config -def _get_expected_efs_config( +def _get_expected_efs_config_tls( port=PORT, dns_name=DNS_NAME, verify=mount_efs.DEFAULT_STUNNEL_VERIFY_LEVEL, @@ -161,6 +167,7 @@ def _get_expected_efs_config( check_cert_validity=False, disable_libwrap=True, fallback_ip_address=None, + efs_proxy_enabled=True, ): expected_efs_config = dict(mount_efs.STUNNEL_EFS_CONFIG) expected_efs_config["accept"] = expected_efs_config["accept"] % port @@ -172,26 +179,44 @@ def _get_expected_efs_config( ) expected_efs_config["verify"] = str(verify) - if check_cert_hostname: + if check_cert_hostname or efs_proxy_enabled: expected_efs_config["checkHost"] = dns_name[dns_name.index(FS_ID) :] - if check_cert_validity and ocsp_override: + if check_cert_validity and ocsp_override and (not efs_proxy_enabled): expected_efs_config["OCSPaia"] = "yes" - if disable_libwrap: + if disable_libwrap and (not efs_proxy_enabled): expected_efs_config["libwrap"] = "no" return expected_efs_config -def _test_check_cert_hostname( +def _get_expected_efs_config_non_tls( + port=PORT, + dns_name=DNS_NAME, + fallback_ip_address=None, +): + expected_efs_config = dict(mount_efs.STUNNEL_EFS_CONFIG) + expected_efs_config["accept"] = expected_efs_config["accept"] % port + if not fallback_ip_address: + expected_efs_config["connect"] = expected_efs_config["connect"] % dns_name + else: + expected_efs_config["connect"] = ( + expected_efs_config["connect"] % fallback_ip_address + ) + + return expected_efs_config + + +# Check the hostname behavior when using stunnel instead of efs-proxy. +def _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported, stunnel_check_cert_hostname, expected_check_cert_hostname_config_value, ): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( _get_config( @@ -206,8 +231,9 @@ def _test_check_cert_hostname( DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=False, ) utils.assert_called_once(ca_mocker) @@ -215,8 +241,9 @@ def _test_check_cert_hostname( _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config( - check_cert_hostname=expected_check_cert_hostname_config_value + _get_expected_efs_config_tls( + check_cert_hostname=expected_check_cert_hostname_config_value, + efs_proxy_enabled=False, ), ) @@ -228,7 +255,7 @@ def _test_check_cert_validity( stunnel_check_cert_validity, expected_check_cert_validity_config_value, ): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( _get_config( @@ -242,8 +269,9 @@ def _test_check_cert_validity( DNS_NAME, VERIFY_LEVEL, stunnel_check_cert_validity, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ca_mocker) @@ -251,14 +279,14 @@ def _test_check_cert_validity( _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config( + _get_expected_efs_config_tls( check_cert_validity=expected_check_cert_validity_config_value ), ) def test_write_stunnel_config_file(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -270,20 +298,21 @@ def test_write_stunnel_config_file(mocker, tmpdir): DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ca_mocker) _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config(), + _get_expected_efs_config_tls(), ) def test_write_stunnel_config_file_with_az_as_dns_name(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -295,15 +324,16 @@ def test_write_stunnel_config_file_with_az_as_dns_name(mocker, tmpdir): DNS_NAME_WITH_AZ, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ca_mocker) _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config(dns_name=DNS_NAME_WITH_AZ), + _get_expected_efs_config_tls(dns_name=DNS_NAME_WITH_AZ), ) @@ -313,7 +343,7 @@ def _test_enable_disable_libwrap( system_release="unknown", libwrap_supported=True, ): - mocker.patch("mount_efs.add_stunnel_ca_options") + mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) ver_mocker = mocker.patch( "mount_efs.get_system_release_version", return_value=system_release @@ -328,20 +358,21 @@ def _test_enable_disable_libwrap( DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ver_mocker) _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config(disable_libwrap=libwrap_supported), + _get_expected_efs_config_tls(disable_libwrap=libwrap_supported), ) def test_write_stunnel_config_with_debug(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -353,8 +384,9 @@ def test_write_stunnel_config_with_debug(mocker, tmpdir): DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ca_mocker) @@ -364,19 +396,21 @@ def test_write_stunnel_config_with_debug(mocker, tmpdir): expected_global_config["debug"] = "debug" expected_global_config["output"] = os.path.join( mount_efs.LOG_DIR, - "%s.stunnel.log" + "%s.efs-proxy.log" % mount_efs.get_mount_specific_filename(FS_ID, MOUNT_POINT, PORT), ) - _validate_config(config_file, expected_global_config, _get_expected_efs_config()) + _validate_config( + config_file, expected_global_config, _get_expected_efs_config_tls() + ) def test_write_stunnel_config_with_debug_and_logs_file(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( _get_config( - mocker, stunnel_debug_enabled=True, stunnel_logs_file=STUNNEL_LOGS_FILE + mocker, stunnel_debug_enabled=True, stunnel_logs_file=PROXY_LOGS_FILE ), state_file_dir, FS_ID, @@ -385,8 +419,9 @@ def test_write_stunnel_config_with_debug_and_logs_file(mocker, tmpdir): DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=True, ) utils.assert_called_once(ca_mocker) @@ -394,15 +429,48 @@ def test_write_stunnel_config_with_debug_and_logs_file(mocker, tmpdir): _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir) ) expected_global_config["debug"] = "debug" - expected_global_config["output"] = STUNNEL_LOGS_FILE + expected_global_config["output"] = PROXY_LOGS_FILE + + _validate_config( + config_file, expected_global_config, _get_expected_efs_config_tls() + ) + + +# We should always write "checkHost" into the stunnel config when using efs-proxy for TLS mounts. +def test_write_stunnel_config_efs_proxy_check_cert_hostname_tls(mocker, tmpdir): + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") + supported_opt_mock = mocker.patch("mount_efs.is_stunnel_option_supported") + state_file_dir = str(tmpdir) + config_file = mount_efs.write_stunnel_config_file( + _get_config(mocker), + state_file_dir, + FS_ID, + MOUNT_POINT, + PORT, + DNS_NAME, + VERIFY_LEVEL, + OCSP_ENABLED, + _get_mount_options_tls(), + DEFAULT_REGION, + efs_proxy_enabled=True, + ) + + utils.assert_called_once(ca_mocker) + utils.assert_not_called(supported_opt_mock) - _validate_config(config_file, expected_global_config, _get_expected_efs_config()) + _validate_config( + config_file, + _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), + _get_expected_efs_config_tls( + efs_proxy_enabled=True, + ), + ) def test_write_stunnel_config_check_cert_hostname_supported_flag_not_set( mocker, tmpdir ): - _test_check_cert_hostname( + _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported=True, @@ -414,7 +482,7 @@ def test_write_stunnel_config_check_cert_hostname_supported_flag_not_set( def test_write_stunnel_config_check_cert_hostname_supported_flag_set_false( mocker, capsys, tmpdir ): - _test_check_cert_hostname( + _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported=True, @@ -426,7 +494,7 @@ def test_write_stunnel_config_check_cert_hostname_supported_flag_set_false( def test_write_stunnel_config_check_cert_hostname_supported_flag_set_true( mocker, tmpdir ): - _test_check_cert_hostname( + _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported=True, @@ -438,7 +506,7 @@ def test_write_stunnel_config_check_cert_hostname_supported_flag_set_true( def test_write_stunnel_config_check_cert_hostname_not_supported_flag_not_specified( mocker, capsys, tmpdir ): - _test_check_cert_hostname( + _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported=False, @@ -450,7 +518,7 @@ def test_write_stunnel_config_check_cert_hostname_not_supported_flag_not_specifi def test_write_stunnel_config_check_cert_hostname_not_supported_flag_set_false( mocker, capsys, tmpdir ): - _test_check_cert_hostname( + _test_check_cert_hostname_stunnel( mocker, tmpdir, stunnel_check_cert_hostname_supported=False, @@ -462,7 +530,7 @@ def test_write_stunnel_config_check_cert_hostname_not_supported_flag_set_false( def test_write_stunnel_config_check_cert_hostname_not_supported_flag_set_true( mocker, capsys, tmpdir ): - mocker.patch("mount_efs.add_stunnel_ca_options") + mocker.patch("mount_efs.add_tunnel_ca_options") with pytest.raises(SystemExit) as ex: mount_efs.write_stunnel_config_file( @@ -478,8 +546,9 @@ def test_write_stunnel_config_check_cert_hostname_not_supported_flag_set_true( DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=False, ) assert 0 != ex.value.code @@ -528,7 +597,7 @@ def test_write_stunnel_config_check_cert_validity_not_supported_ocsp_disabled( def test_write_stunnel_config_check_cert_validity_not_supported_ocsp_enabled( mocker, capsys, tmpdir ): - mocker.patch("mount_efs.add_stunnel_ca_options") + mocker.patch("mount_efs.add_tunnel_ca_options") with pytest.raises(SystemExit) as ex: mount_efs.write_stunnel_config_file( @@ -544,8 +613,9 @@ def test_write_stunnel_config_check_cert_validity_not_supported_ocsp_enabled( DNS_NAME, VERIFY_LEVEL, True, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=False, ) assert 0 != ex.value.code @@ -556,7 +626,7 @@ def test_write_stunnel_config_check_cert_validity_not_supported_ocsp_enabled( def test_write_stunnel_config_with_verify_level(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) verify = 0 config_file = mount_efs.write_stunnel_config_file( @@ -568,7 +638,7 @@ def test_write_stunnel_config_with_verify_level(mocker, tmpdir): DNS_NAME, verify, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, ) utils.assert_not_called(ca_mocker) @@ -576,7 +646,7 @@ def test_write_stunnel_config_with_verify_level(mocker, tmpdir): _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config(check_cert_validity=False, verify=verify), + _get_expected_efs_config_tls(check_cert_validity=False, verify=verify), ) @@ -589,7 +659,7 @@ def test_write_stunnel_config_libwrap_supported(mocker, tmpdir): def test_write_stunnel_config_with_fall_back_ip_address(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -601,7 +671,7 @@ def test_write_stunnel_config_with_fall_back_ip_address(mocker, tmpdir): DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, fallback_ip_address=FALLBACK_IP_ADDRESS, ) @@ -611,7 +681,7 @@ def test_write_stunnel_config_with_fall_back_ip_address(mocker, tmpdir): _validate_config( config_file, _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), - _get_expected_efs_config(fallback_ip_address=FALLBACK_IP_ADDRESS), + _get_expected_efs_config_tls(fallback_ip_address=FALLBACK_IP_ADDRESS), ) @@ -633,10 +703,16 @@ def test_write_stunnel_config_foreground_quiet_supported_debug_enabled(mocker, t ) +def test_write_stunnel_config_foreground_quiet_supported_debug_enabled(mocker, tmpdir): + _test_stunnel_config_foreground_quiet_helper( + mocker, tmpdir, foreground_quiet_supported=True, stunnel_debug_enabled=True + ) + + def _test_stunnel_config_foreground_quiet_helper( mocker, tmpdir, foreground_quiet_supported, stunnel_debug_enabled ): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -652,8 +728,9 @@ def _test_stunnel_config_foreground_quiet_helper( DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, + efs_proxy_enabled=False, ) utils.assert_called_once(ca_mocker) @@ -670,11 +747,15 @@ def _test_stunnel_config_foreground_quiet_helper( "%s.stunnel.log" % mount_efs.get_mount_specific_filename(FS_ID, MOUNT_POINT, PORT), ) - _validate_config(config_file, expected_global_config, _get_expected_efs_config()) + _validate_config( + config_file, + expected_global_config, + _get_expected_efs_config_tls(efs_proxy_enabled=False), + ) def test_write_stunnel_config_fips_enabled(mocker, tmpdir): - ca_mocker = mocker.patch("mount_efs.add_stunnel_ca_options") + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") state_file_dir = str(tmpdir) config_file = mount_efs.write_stunnel_config_file( @@ -686,7 +767,7 @@ def test_write_stunnel_config_fips_enabled(mocker, tmpdir): DNS_NAME, VERIFY_LEVEL, OCSP_ENABLED, - _get_mount_options(), + _get_mount_options_tls(), DEFAULT_REGION, ) utils.assert_called_once(ca_mocker) @@ -699,5 +780,35 @@ def test_write_stunnel_config_fips_enabled(mocker, tmpdir): _validate_config( config_file, expected_global_config, - _get_expected_efs_config(), + _get_expected_efs_config_tls(), + ) + + +def test_non_tls_mount_with_proxy(mocker, tmpdir): + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") + state_file_dir = str(tmpdir) + + config_file = mount_efs.write_stunnel_config_file( + _get_config(mocker), + state_file_dir, + FS_ID, + MOUNT_POINT, + PORT, + DNS_NAME, + VERIFY_LEVEL, + OCSP_ENABLED, + _get_mount_options_non_tls(), + DEFAULT_REGION, + efs_proxy_enabled=True, + ) + utils.assert_not_called(ca_mocker) + + expected_global_config = dict( + _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir) + ) + + _validate_config( + config_file, + expected_global_config, + _get_expected_efs_config_non_tls(), ) diff --git a/test/mount_efs_test/test_write_tls_tunnel_state_file.py b/test/mount_efs_test/test_write_tls_tunnel_state_file.py index ac6426e2..57d7915e 100644 --- a/test/mount_efs_test/test_write_tls_tunnel_state_file.py +++ b/test/mount_efs_test/test_write_tls_tunnel_state_file.py @@ -22,7 +22,7 @@ DATETIME_FORMAT = "%y%m%d%H%M%SZ" -def test_write_tls_tunnel_state_file_netns(tmpdir): +def test_write_tunnel_state_file_netns(tmpdir): state_file_dir = str(tmpdir) mount_point = "/home/user/foo/mount" @@ -42,7 +42,7 @@ def test_write_tls_tunnel_state_file_netns(tmpdir): "useIam": True, } - state_file = mount_efs.write_tls_tunnel_state_file( + state_file = mount_efs.write_tunnel_state_file( FS_ID, mount_point, PORT, @@ -80,7 +80,7 @@ def test_write_tls_tunnel_state_file_netns(tmpdir): assert cert_details["useIam"] == state.get("useIam") -def test_write_tls_tunnel_state_file(tmpdir): +def test_write_tunnel_state_file(tmpdir): state_file_dir = str(tmpdir) mount_point = "/home/user/foo/mount" @@ -100,7 +100,7 @@ def test_write_tls_tunnel_state_file(tmpdir): "useIam": True, } - state_file = mount_efs.write_tls_tunnel_state_file( + state_file = mount_efs.write_tunnel_state_file( FS_ID, mount_point, PORT, PID, COMMAND, FILES, state_file_dir, cert_details ) @@ -131,12 +131,12 @@ def test_write_tls_tunnel_state_file(tmpdir): assert cert_details["useIam"] == state.get("useIam") -def test_write_tls_tunnel_state_file_no_cert(tmpdir): +def test_write_tunnel_state_file_no_cert(tmpdir): state_file_dir = str(tmpdir) mount_point = "/home/user/foo/mount" - state_file = mount_efs.write_tls_tunnel_state_file( + state_file = mount_efs.write_tunnel_state_file( FS_ID, mount_point, PORT, PID, COMMAND, FILES, state_file_dir ) diff --git a/test/watchdog_test/test_send_signal_to_stunnel_processes.py b/test/watchdog_test/test_send_signal_to_stunnel_processes.py index b0573fe1..77cff775 100644 --- a/test/watchdog_test/test_send_signal_to_stunnel_processes.py +++ b/test/watchdog_test/test_send_signal_to_stunnel_processes.py @@ -93,7 +93,7 @@ def test_is_mount_stunnel_proc_running_process_not_stunnel(mocker, tmpdir): assert False == watchdog.is_mount_stunnel_proc_running(PID, STATE_FILE, tmpdir) debug_log = mock_log_debug.call_args[0][0] - assert "is not a stunnel process" in debug_log + assert "is not an efs-proxy or stunnel process" in debug_log def test_is_mount_stunnel_proc_running_process_not_running(mocker, tmpdir): diff --git a/test/watchdog_test/test_start_tls_tunnel.py b/test/watchdog_test/test_start_tls_tunnel.py index cfa6af7c..3b546fee 100644 --- a/test/watchdog_test/test_start_tls_tunnel.py +++ b/test/watchdog_test/test_start_tls_tunnel.py @@ -29,13 +29,17 @@ def _mock_popen(mocker): return mocker.patch("subprocess.Popen", return_value=_get_popen_mock()) -def _initiate_state_file(tmpdir, cmd=None): +def _initiate_state_file(tmpdir, cmd=None, efs_proxy_enabled=False): + tunnel_executable = "/usr/bin/stunnel" + if efs_proxy_enabled: + tunnel_executable = "/usr/bin/efs-proxy" + state = { "pid": PID - 1, "cmd": cmd if cmd else [ - "/usr/bin/stunnel", + tunnel_executable, "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007", ], } @@ -57,6 +61,18 @@ def test_start_tls_tunnel(mocker, tmpdir): assert 1 == len(procs) +def test_start_tls_tunnel_efs_proxy(mocker, tmpdir): + _mock_popen(mocker) + mocker.patch("watchdog.is_pid_running", return_value=True) + + state, state_file = _initiate_state_file(tmpdir, efs_proxy_enabled=True) + procs = [] + pid = watchdog.start_tls_tunnel(procs, state, str(tmpdir), state_file) + + assert PID == pid + assert 1 == len(procs) + + def test_start_tls_tunnel_fails(mocker, capsys, tmpdir): _mock_popen(mocker) mocker.patch("watchdog.is_pid_running", return_value=False) @@ -70,7 +86,23 @@ def test_start_tls_tunnel_fails(mocker, capsys, tmpdir): assert 0 != ex.value.code out, err = capsys.readouterr() - assert "Failed to initialize TLS tunnel" in err + assert "Failed to initialize stunnel" in err + + +def test_start_tls_tunnel_fails_proxy_enabled(mocker, capsys, tmpdir): + _mock_popen(mocker) + mocker.patch("watchdog.is_pid_running", return_value=False) + + state, state_file = _initiate_state_file(tmpdir, efs_proxy_enabled=True) + procs = [] + with pytest.raises(SystemExit) as ex: + watchdog.start_tls_tunnel(procs, state, str(tmpdir), state_file) + + assert 0 == len(procs) + assert 0 != ex.value.code + + out, err = capsys.readouterr() + assert "Failed to initialize efs-proxy" in err # https://github.com/kubernetes-sigs/aws-efs-csi-driver/issues/812 The watchdog is trying to launch stunnel on AL2 for @@ -159,3 +191,39 @@ def test_start_tls_tunnel_for_mount_via_older_version_of_efs_utils_on_ecs_amazon assert " ".join(["nsenter", namespace, "/usr/sbin/stunnel5"]) in " ".join( state["cmd"] ) + + +def test_start_tls_tunnel_efs_proxy_enabled(mocker, tmpdir): + """ + This test makes sure that when efs_proxy is enabled, we will start efs_proxy and not stunnel, + even if the existing command used stunnel. + """ + popen_mock = _mock_popen(mocker) + mocker.patch("watchdog.is_pid_running", return_value=True) + mocker.patch("watchdog.find_command_path", return_value="/usr/bin/efs-proxy") + + proxy_command = [ + "/usr/bin/efs-proxy", + "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007", + ] + state, state_file = _initiate_state_file(tmpdir, proxy_command) + procs = [] + pid = watchdog.start_tls_tunnel(procs, state, str(tmpdir), state_file) + + args, _ = popen_mock.call_args + args = args[0] + assert "/usr/bin/efs-proxy" == args[0] + assert "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007" == args[1] + + assert PID == pid + assert 1 == len(procs) + + +def test_command_uses_efs_proxy(): + cmd = [ + "/usr/bin/stunnel", + "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007", + ] + assert watchdog.command_uses_efs_proxy(cmd) == False + cmd[0] = "/usr/bin/efs-proxy" + assert watchdog.command_uses_efs_proxy(cmd) == True From 0884956aac6202b3a70ff7b59a5ced658a0b9abd Mon Sep 17 00:00:00 2001 From: Ryan Stankiewicz Date: Wed, 17 Apr 2024 20:43:27 +0000 Subject: [PATCH 03/51] Don't require rust or cargo for rhel 7 rhel 7 doesn't provide rust or cargo packages. Even if a user installs rust and cargo through rustup, the 'buildrequires' statement will still fail the build. I also updated the README with a "common 2.0.0 installation issues" section. --- README.md | 41 +++++++++++++++++++++++++++++++++++++++-- amazon-efs-utils.spec | 4 ++++ 2 files changed, 43 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index dac59fae..7f796a55 100644 --- a/README.md +++ b/README.md @@ -115,6 +115,13 @@ for more guidance.) Other distributions require building the package from source and installing it. +If your distribution doesn't provide a rust or cargo package, or it provides versions +that are older than 1.68, then you can install rust and cargo through rustup: +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +. "$HOME/.cargo/env" +``` + - To build and install an RPM: If the distribution is not OpenSUSE or SLES @@ -161,10 +168,39 @@ $ sudo apt-get -y install ./build/amazon-efs-utils*deb If your Debian distribution doesn't provide a rust or cargo package, or your distribution provides versions that are older than 1.68, then you can install rust and cargo through rustup: ```bash -$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +. "$HOME/.cargo/env" +``` + +### Common installation issues with efs-utils v2.0.0 +**`make rpm` fails due to "feature `edition2021` is required"**: + +Update to a version of rust and cargo +that is newer than 1.68. To install a new version of rust and cargo, run +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh . "$HOME/.cargo/env" ``` +**You installed a new version of rust with the above command, but your system is still using the rust installed by the package manager**: + +When installing rust with the rustup script above, the script will fail if it detects a rust already exists on the system. +Un-install the package manager's rust, and re-install rust through rustup. Once done, you will need to install rust through the package manager again to satisfy +the RPM's dependencies. +```bash +yum remove cargo rust +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +yum install cargo rust +. "$HOME/.cargo/env" +``` + +**When you run `make rpm`, compilation of efs-proxy fails due to `error: linker cc not found`**: + +Make sure that you have a linker installed on your system. For example, on Amazon Linux or RHEL, install gcc with +```bash +yum install gcc +``` + ### On MacOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution For EC2 Mac instances running macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the @@ -321,8 +357,9 @@ assist you if relevant logs are provided. You can find the log file at `/var/lo Often times, enabling debug level logging can help us find problems more easily. To do this, run `sed -i '/logging_level = INFO/s//logging_level = DEBUG/g' /etc/amazon/efs/efs-utils.conf`. -You can also enable stunnel debug logs with +You can also enable stunnel and efs-proxy debug logs with `sed -i '/stunnel_debug_enabled = false/s//stunnel_debug_enabled = true/g' /etc/amazon/efs/efs-utils.conf`. +These logs files will also be in `/var/log/amazon/efs/`. Make sure to perform the failed mount again after running the prior commands before pulling the logs. diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 658ad89b..89f8980c 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -73,7 +73,11 @@ Requires(preun) : /sbin/service /sbin/chkconfig Requires(postun) : /sbin/service %endif +# RHEL 7 doesn't provide a Rust or Cargo package, +# so users are expected to install it through rustup. +%if ! 0%{?rhel} == 7 BuildRequires : cargo rust +%endif BuildRequires: openssl-devel Source0 : %{name}.tar.gz From 46cdb9f1363c5d38568df3e8008cc409ca7a9eb6 Mon Sep 17 00:00:00 2001 From: Ryan Stankiewicz Date: Tue, 23 Apr 2024 20:50:29 +0000 Subject: [PATCH 04/51] efs-utils v2.0.1 release - Disable Nagle's algorithm for TLS mounts to improve latencies --- amazon-efs-utils.spec | 7 +++++-- build-deb.sh | 2 +- config.ini | 2 +- dist/amazon-efs-utils.control | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.toml | 2 +- src/proxy/src/tls.rs | 3 ++- src/watchdog/__init__.py | 2 +- 8 files changed, 13 insertions(+), 9 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 89f8980c..db2ba998 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -37,12 +37,12 @@ %endif %global proxy_name efs-proxy -%global proxy_version 2.0.0 +%global proxy_version 2.0.1 %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.0.0 +Version : 2.0.1 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -169,6 +169,9 @@ fi %clean %changelog +* Mon Apr 23 2024 Ryan Stankiewicz - 2.0.1 +- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies + * Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 - Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. diff --git a/build-deb.sh b/build-deb.sh index d97ab37a..9f35ace1 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.0.0 +VERSION=2.0.1 RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 6944864d..70061e86 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.0.0 +version=2.0.1 release=1 diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index d7d71c79..8ad53797 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 2.0.0 +Version: 2.0.1 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index c8b7566c..0c814c9d 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.0.0" +VERSION = "2.0.1" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 4bbf3042..45b4c798 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.0.0" +version = "2.0.1" publish = false [dependencies] diff --git a/src/proxy/src/tls.rs b/src/proxy/src/tls.rs index 6a6f062e..1ccefb3e 100644 --- a/src/proxy/src/tls.rs +++ b/src/proxy/src/tls.rs @@ -9,6 +9,7 @@ use s2n_tls_tokio::TlsStream; use std::path::Path; use tokio::net::TcpStream; +use crate::connections::configure_stream; use crate::error::ConnectError; pub const FIPS_COMPLIANT_POLICY_VERSION: &str = "20230317"; @@ -138,7 +139,7 @@ pub async fn establish_tls_stream( let tls_connector = TlsConnector::new(config); - let tcp_stream = TcpStream::connect(tls_config.remote_addr).await?; + let tcp_stream = configure_stream(TcpStream::connect(tls_config.remote_addr).await?); let tls_stream = tls_connector .connect(&tls_config.server_domain, tcp_stream) diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 41d49f1d..758644b4 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.0.0" +VERSION = "2.0.1" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" From 48bb3751e0630cffae5580b167fe1b0045c70983 Mon Sep 17 00:00:00 2001 From: Josh Goodall Date: Mon, 29 Apr 2024 22:56:15 +1000 Subject: [PATCH 05/51] Amend Debian control to use binary architecture The output includes a binary for a specific architecture. Parameterise the control file and package filename appropriately, lest repository management tools misbehave in multi-architecture environments. --- build-deb.sh | 9 ++++++--- dist/amazon-efs-utils.control | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/build-deb.sh b/build-deb.sh index 9f35ace1..c233ddb1 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -13,7 +13,9 @@ BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild VERSION=2.0.1 RELEASE=1 +ARCH=$(dpkg-architecture -qDEB_BUILD_ARCH) DEB_SYSTEM_RELEASE_PATH=/etc/os-release +export VERSION RELEASE ARCH echo 'Cleaning deb build workspace' rm -rf ${BUILD_ROOT} @@ -47,8 +49,9 @@ install -p -m 755 dist/scriptlets/after-install-upgrade ${BUILD_ROOT}/postinst install -p -m 755 dist/scriptlets/before-remove ${BUILD_ROOT}/prerm install -p -m 755 dist/scriptlets/after-remove ${BUILD_ROOT}/postrm -echo 'Copying control file' -install -p -m 644 dist/amazon-efs-utils.control ${BUILD_ROOT}/control +echo 'Generating control file' +envsubst < dist/amazon-efs-utils.control > ${BUILD_ROOT}/control +chmod 644 ${BUILD_ROOT}/control echo 'Copying conffiles' install -p -m 644 dist/amazon-efs-utils.conffiles ${BUILD_ROOT}/conffiles @@ -69,7 +72,7 @@ tar czf data.tar.gz etc sbin usr var --owner=0 --group=0 cd ${BASE_DIR} echo 'Building deb' -DEB=${BUILD_ROOT}/amazon-efs-utils-${VERSION}-${RELEASE}_all.deb +DEB=${BUILD_ROOT}/amazon-efs-utils-${VERSION}-${RELEASE}_${ARCH}.deb ar r ${DEB} ${BUILD_ROOT}/debian-binary ar r ${DEB} ${BUILD_ROOT}/control.tar.gz ar r ${DEB} ${BUILD_ROOT}/data.tar.gz diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index 8ad53797..a805c3dc 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils -Architecture: all -Version: 2.0.1 +Architecture: $ARCH +Version: $VERSION-$RELEASE Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional From c974e9d22d2029689b223ae2ebac5a96740bd72a Mon Sep 17 00:00:00 2001 From: Amir Mofakhar Date: Wed, 15 May 2024 08:46:45 +0100 Subject: [PATCH 06/51] bump py from 1.10.0 to 1.11.0 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 12d46fd4..05dffd55 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ mccabe==0.6.1 mock==2.0.0 pbr==3.1.1 pluggy==0.13.0 -py==1.10.0 +py==1.11.0 pycodestyle==2.5.0 pyflakes==2.1.1 pytest==4.6.7 From 8b3a208227b832270a278bae61cdbfe08bfb5f46 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 20 May 2024 19:55:12 +0000 Subject: [PATCH 07/51] efs-utils v2.0.2 release --- amazon-efs-utils.spec | 9 ++-- build-deb.sh | 2 +- config.ini | 2 +- dist/amazon-efs-utils.control | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.toml | 3 +- src/proxy/src/logger.rs | 2 +- src/proxy/src/main.rs | 45 +++++++++++++++++++ src/watchdog/__init__.py | 14 +++--- ... => test_clean_up_previous_tunnel_pids.py} | 14 +++--- 10 files changed, 72 insertions(+), 23 deletions(-) rename test/watchdog_test/{test_clean_up_previous_stunnel_pids.py => test_clean_up_previous_tunnel_pids.py} (90%) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index db2ba998..acb3ddc8 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -37,12 +37,11 @@ %endif %global proxy_name efs-proxy -%global proxy_version 2.0.1 %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.0.1 +Version : 2.0.2 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -82,7 +81,7 @@ BuildRequires: openssl-devel Source0 : %{name}.tar.gz %if "%{include_vendor_tarball}" == "true" -Source1 : %{proxy_name}-%{proxy_version}-vendor.tar.xz +Source1 : %{proxy_name}-%{$Version}-vendor.tar.xz Source2 : config.toml %endif @@ -169,6 +168,10 @@ fi %clean %changelog +* Mon May 20 2024 Anthony Tse - 2.0.2 +- Check for efs-proxy PIDs when cleaning tunnel state files +- Add PID to log entries + * Mon Apr 23 2024 Ryan Stankiewicz - 2.0.1 - Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies diff --git a/build-deb.sh b/build-deb.sh index 9f35ace1..f37d5a4a 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.0.1 +VERSION=2.0.2 RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 70061e86..b718fa2a 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.0.1 +version=2.0.2 release=1 diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index 8ad53797..199fc455 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 2.0.1 +Version: 2.0.2 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 0c814c9d..09f64e72 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.0.1" +VERSION = "2.0.2" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 45b4c798..8d6b00ec 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.0.1" +version = "2.0.2" publish = false [dependencies] @@ -33,6 +33,7 @@ xdr-codec = "0.4.4" [dev-dependencies] test-case = "*" tokio = { version = "1.29.0", features = ["test-util"] } +tempfile = "3.10.1" [build-dependencies] xdrgen = "0.4.4" \ No newline at end of file diff --git a/src/proxy/src/logger.rs b/src/proxy/src/logger.rs index 1d153fe9..2b7a3c70 100644 --- a/src/proxy/src/logger.rs +++ b/src/proxy/src/logger.rs @@ -41,7 +41,7 @@ pub fn init(config: &ProxyConfig) { let log_file = RollingFileAppender::builder() .encoder(Box::new(PatternEncoder::new( - "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {l} {M} {m}{n}", + "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {P} {l} {M} {m}{n}", ))) .build(log_file_path, Box::new(policy)) .expect("Unable to create log file"); diff --git a/src/proxy/src/main.rs b/src/proxy/src/main.rs index acc82e15..a4b90e37 100644 --- a/src/proxy/src/main.rs +++ b/src/proxy/src/main.rs @@ -6,6 +6,7 @@ use controller::Controller; use log::{debug, error, info}; use std::path::Path; use std::sync::Arc; +use tokio::io::AsyncWriteExt; use tokio::signal; use tokio::sync::Mutex; use tokio_util::sync::CancellationToken; @@ -47,6 +48,9 @@ async fn main() { info!("Running with configuration: {:?}", proxy_config); + let pid_file_path = Path::new(&proxy_config.pid_file_path); + let _ = write_pid_file(&pid_file_path).await; + // This "status reporter" is currently only used in tests let (_status_requester, status_reporter) = status_reporter::create_status_channel(); @@ -90,6 +94,28 @@ async fn main() { sigterm_cancellation_token.cancel(); }, } + if pid_file_path.exists() { + match tokio::fs::remove_file(&pid_file_path).await { + Ok(()) => info!("Removed pid file"), + Err(e) => error!("Unable to remove pid_file: {e}"), + } + } +} + +async fn write_pid_file(pid_file_path: &Path) -> Result<(), anyhow::Error> { + let mut pid_file = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .mode(0o644) + .open(pid_file_path) + .await?; + pid_file + .write_all(&std::process::id().to_string().as_bytes()) + .await?; + pid_file.write_u8(b'\x0A').await?; + pid_file.flush().await?; + Ok(()) } async fn get_tls_config(proxy_config: &ProxyConfig) -> Result { @@ -136,3 +162,22 @@ pub struct Args { #[arg(long, default_value_t = false)] pub tls: bool, } + +#[cfg(test)] +pub mod tests { + + use super::*; + + #[tokio::test] + async fn test_write_pid_file() -> Result<(), Box> { + let pid_file = tempfile::NamedTempFile::new()?; + let pid_file_path = pid_file.path(); + + write_pid_file(pid_file_path).await?; + + let expected_pid = std::process::id().to_string(); + let read_pid = tokio::fs::read_to_string(pid_file_path).await?; + assert_eq!(expected_pid + "\n", read_pid); + Ok(()) + } +} diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 758644b4..b3b69ae2 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.0.1" +VERSION = "2.0.2" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -2119,11 +2119,11 @@ def clean_up_certificate_lock_file(state_file_dir=STATE_FILE_DIR): check_and_remove_file(lock_file) -def clean_up_previous_stunnel_pids(state_file_dir=STATE_FILE_DIR): +def clean_up_previous_tunnel_pids(state_file_dir=STATE_FILE_DIR): """ - Cleans up stunnel pids created by mount watchdog spawned by a previous efs-csi-driver pod after driver restart, upgrade - or crash. This method attempts to clean PIDs from persisted state files after efs-csi-driver restart to - ensure watchdog creates a new stunnel. + Cleans up efs-proxy/stunnel pids created by mount watchdog spawned by a previous efs-csi-driver + pod after driver restart, upgrade, or crash. This method attempts to clean PIDs from persisted + state files after efs-csi-driver restart to ensure watchdog creates a new tunnel. """ state_files = get_state_files(state_file_dir) logging.debug( @@ -2147,7 +2147,7 @@ def clean_up_previous_stunnel_pids(state_file_dir=STATE_FILE_DIR): out = check_process_name(pid) - if out and "stunnel" in str(out): + if out and ("stunnel" in str(out) or "efs-proxy" in str(out)): logging.debug( "PID %s in state file %s is active. Skipping clean up", pid, @@ -2189,7 +2189,7 @@ def main(): CONFIG_SECTION, "unmount_grace_period_sec" ) - clean_up_previous_stunnel_pids() + clean_up_previous_tunnel_pids() clean_up_certificate_lock_file() while True: diff --git a/test/watchdog_test/test_clean_up_previous_stunnel_pids.py b/test/watchdog_test/test_clean_up_previous_tunnel_pids.py similarity index 90% rename from test/watchdog_test/test_clean_up_previous_stunnel_pids.py rename to test/watchdog_test/test_clean_up_previous_tunnel_pids.py index adda3a47..a8845f40 100644 --- a/test/watchdog_test/test_clean_up_previous_stunnel_pids.py +++ b/test/watchdog_test/test_clean_up_previous_tunnel_pids.py @@ -54,7 +54,7 @@ def test_malformed_state_file(mocker, tmpdir): mocker, state_files={"mnt": state_file}, process_name_output=PROCESS_NAME_OUTPUT ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_not_called(rewrite_state_file_mock) @@ -66,7 +66,7 @@ def test_clean_up_active_stunnel_from_previous_watchdog(mocker, tmpdir): mocker, state_files={"mnt": state_file}, process_name_output=PROCESS_NAME_OUTPUT ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_not_called(rewrite_state_file_mock) @@ -80,7 +80,7 @@ def test_clean_up_active_LWP_from_driver(mocker, tmpdir): process_name_output=PROCESS_NAME_OUTPUT_LWP, ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_called_once(rewrite_state_file_mock) @@ -94,7 +94,7 @@ def test_clean_up_stunnel_pid_from_previous_driver(mocker, tmpdir): process_name_output=PROCESS_NAME_OUTPUT_ERR, ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_called_once(rewrite_state_file_mock) @@ -104,7 +104,7 @@ def test_no_state_files_from_previous_driver(mocker, tmpdir): mocker, state_files={}, process_name_output=PROCESS_NAME_OUTPUT ) - watchdog.clean_up_previous_stunnel_pids(tmpdir) + watchdog.clean_up_previous_tunnel_pids(tmpdir) utils.assert_not_called(rewrite_state_file_mock) @@ -122,7 +122,7 @@ def test_clean_up_multiple_stunnel_pids(mocker, tmpdir): process_name_output=PROCESS_NAME_OUTPUT_ERR, ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_called(rewrite_state_file_mock) @@ -139,6 +139,6 @@ def test_clean_up_stunnel_no_pid(mocker, tmpdir): process_name_output=PROCESS_NAME_OUTPUT_LWP, ) - watchdog.clean_up_previous_stunnel_pids(state_file_dir) + watchdog.clean_up_previous_tunnel_pids(state_file_dir) utils.assert_not_called(rewrite_state_file_mock) From 14a15c7e88517035f18df9a4c8f0ea728596003e Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 10 Jun 2024 16:04:31 +0000 Subject: [PATCH 08/51] Add Ubuntu 22.04 to verified OS list --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 7f796a55..ce13e5a0 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ The `efs-utils` package has been verified against the following Linux distributi | Ubuntu 16.04 | `deb` | `systemd` | | Ubuntu 18.04 | `deb` | `systemd` | | Ubuntu 20.04 | `deb` | `systemd` | +| Ubuntu 22.04 | `deb` | `systemd` | | OpenSUSE Leap | `rpm` | `systemd` | | OpenSUSE Tumbleweed | `rpm` | `systemd` | | Oracle8 | `rpm` | `systemd` | From 5f418ee59f3772205916c100bc3853bc7c44ebcb Mon Sep 17 00:00:00 2001 From: Arnav Gupta Date: Tue, 18 Jun 2024 19:58:35 +0000 Subject: [PATCH 09/51] efs-utils v2.0.3 release --- amazon-efs-utils.spec | 6 +++++- build-deb.sh | 2 +- config.ini | 2 +- dist/amazon-efs-utils.control | 2 +- requirements.txt | 2 +- src/mount_efs/__init__.py | 6 +++--- src/proxy/Cargo.toml | 2 +- src/watchdog/__init__.py | 8 ++++---- .../test_refresh_self_signed_certificate.py | 12 ++++++++---- 9 files changed, 25 insertions(+), 17 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index acb3ddc8..a5f9b5e6 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.0.2 +Version : 2.0.3 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -168,6 +168,10 @@ fi %clean %changelog +* Tue Jun 18 2024 Arnav Gupta - 2.0.3 +- Upgrade py version +- Replace deprecated usage of datetime + * Mon May 20 2024 Anthony Tse - 2.0.2 - Check for efs-proxy PIDs when cleaning tunnel state files - Add PID to log entries diff --git a/build-deb.sh b/build-deb.sh index f37d5a4a..ef868bf9 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.0.2 +VERSION=2.0.3 RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index b718fa2a..bf1e29bc 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.0.2 +version=2.0.3 release=1 diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index 199fc455..4d5e8e40 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 2.0.2 +Version: 2.0.3 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/requirements.txt b/requirements.txt index 12d46fd4..05dffd55 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ mccabe==0.6.1 mock==2.0.0 pbr==3.1.1 pluggy==0.13.0 -py==1.10.0 +py==1.11.0 pycodestyle==2.5.0 pyflakes==2.1.1 pytest==4.6.7 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 09f64e72..e723d29a 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -47,7 +47,7 @@ import threading import time from contextlib import contextmanager -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from logging.handlers import RotatingFileHandler try: @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.0.2" +VERSION = "2.0.3" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -2540,7 +2540,7 @@ def get_utc_now(): """ Wrapped for patching purposes in unit tests """ - return datetime.utcnow() + return datetime.now(timezone.utc) def assert_root(): diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 8d6b00ec..0a6d0add 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.0.2" +version = "2.0.3" publish = false [dependencies] diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index b3b69ae2..5201423d 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -25,7 +25,7 @@ import time from collections import namedtuple from contextlib import contextmanager -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone from logging.handlers import RotatingFileHandler from signal import SIGHUP, SIGKILL, SIGTERM @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.0.2" +VERSION = "2.0.3" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -1408,7 +1408,7 @@ def check_certificate( ) # creation instead of NOT_BEFORE datetime is used for refresh of cert because NOT_BEFORE derives from creation datetime should_refresh_cert = ( - get_utc_now() - certificate_creation_time + get_utc_now() - certificate_creation_time.replace(tzinfo=timezone.utc) ).total_seconds() > certificate_renewal_interval_secs if certificate_exists and not should_refresh_cert: @@ -2060,7 +2060,7 @@ def get_utc_now(): """ Wrapped for patching purposes in unit tests """ - return datetime.utcnow() + return datetime.now(timezone.utc) def check_process_name(pid): diff --git a/test/watchdog_test/test_refresh_self_signed_certificate.py b/test/watchdog_test/test_refresh_self_signed_certificate.py index 5c9aaa4d..f69c3b70 100644 --- a/test/watchdog_test/test_refresh_self_signed_certificate.py +++ b/test/watchdog_test/test_refresh_self_signed_certificate.py @@ -7,7 +7,7 @@ import json import logging import os -from datetime import datetime, timedelta +from datetime import datetime, timedelta, timezone import pytest @@ -34,7 +34,7 @@ ACCESS_KEY_ID_VAL = "FAKE_AWS_ACCESS_KEY_ID" SECRET_ACCESS_KEY_VAL = "FAKE_AWS_SECRET_ACCESS_KEY" SESSION_TOKEN_VAL = "FAKE_SESSION_TOKEN" -FIXED_DT = datetime(2000, 1, 1, 12, 0, 0) +FIXED_DT = datetime(2000, 1, 1, 12, 0, 0, tzinfo=timezone.utc) CLIENT_INFO = {"source": "test", "efs_utils_version": watchdog.VERSION} CREDENTIALS = { "AccessKeyId": ACCESS_KEY_ID_VAL, @@ -472,7 +472,9 @@ def test_refresh_self_signed_certificate_send_sighup(mocker, tmpdir, caplog): config = _get_config() pk_path = _get_mock_private_key_path(mocker, tmpdir) - four_hours_back = (datetime.utcnow() - timedelta(hours=4)).strftime(DT_PATTERN) + four_hours_back = (datetime.now(timezone.utc) - timedelta(hours=4)).strftime( + DT_PATTERN + ) tls_dict = watchdog.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) state = _create_certificate_and_state( tls_dict, str(tmpdir), pk_path, four_hours_back, ap_id=AP_ID @@ -494,7 +496,9 @@ def test_refresh_self_signed_certificate_pid_not_running(mocker, tmpdir, caplog) config = _get_config() pk_path = _get_mock_private_key_path(mocker, tmpdir) - four_hours_back = (datetime.utcnow() - timedelta(hours=4)).strftime(DT_PATTERN) + four_hours_back = (datetime.now(timezone.utc) - timedelta(hours=4)).strftime( + DT_PATTERN + ) tls_dict = watchdog.tls_paths_dictionary(MOUNT_NAME, str(tmpdir)) state = _create_certificate_and_state( tls_dict, str(tmpdir), pk_path, four_hours_back, False, ap_id=AP_ID From 848d80143605103c033389f4fd0221163dc1ab25 Mon Sep 17 00:00:00 2001 From: Arnav Gupta Date: Tue, 18 Jun 2024 20:48:02 +0000 Subject: [PATCH 10/51] Remove outdated python versions from CircleCI --- .circleci/config.yml | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d7fda93..64cf3d45 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -238,18 +238,6 @@ workflows: - test: name: python3_8 image: python:3.8.13 - - test: - name: python3_7 - image: python:3.7.13 - - test: - name: python3_6 - image: python:3.6.15 - - test: - name: python3_5 - image: python:3.5.10 - - test: - name: python3_4 - image: python:3.4.10 - build-deb-package: name: ubuntu-latest image: ubuntu:latest From 8bac1375e1d122be81f4fac2997a4acb2d81ecfc Mon Sep 17 00:00:00 2001 From: Arnav Gupta Date: Tue, 18 Jun 2024 22:24:34 +0000 Subject: [PATCH 11/51] Change README to account for change to CircleCI Python tests --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ce13e5a0..9824a0c7 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ The `efs-utils` package has been verified against the following MacOS distributi * `nfs-utils` (RHEL/CentOS/Amazon Linux/Fedora) or `nfs-common` (Debian/Ubuntu) * OpenSSL-devel 1.0.2+ -* Python 3.4+ +* Python 3.7/3.8 * `stunnel` 4.56+ - `rust` 1.68+ - `cargo` From 9d088054eead50568db39a85abea26785414bd5e Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Tue, 25 Jun 2024 21:25:27 +0000 Subject: [PATCH 12/51] efs-utils v2.0.4 release --- amazon-efs-utils.spec | 5 +- build-deb.sh | 2 +- config.ini | 2 +- dist/amazon-efs-utils.control | 2 +- src/mount_efs/__init__.py | 92 ++++++++++++------- src/proxy/Cargo.toml | 2 +- src/proxy/src/config_parser.rs | 8 +- src/proxy/src/connections.rs | 35 ++++--- src/proxy/src/controller.rs | 20 ++-- src/proxy/src/efs_rpc.rs | 6 +- src/proxy/src/main.rs | 4 +- src/proxy/src/proxy_identifier.rs | 4 +- src/proxy/src/status_reporter.rs | 7 +- src/proxy/src/tls.rs | 6 +- src/watchdog/__init__.py | 2 +- .../test_get_aws_security_credentials.py | 18 +++- .../mount_efs_test/test_get_instance_az_id.py | 9 +- test/mount_efs_test/test_get_instance_id.py | 9 +- .../test_get_target_instance_identity.py | 32 ++++--- test/watchdog_test/test_start_tls_tunnel.py | 14 +-- 20 files changed, 170 insertions(+), 109 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index a5f9b5e6..b9672b63 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.0.3 +Version : 2.0.4 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -168,6 +168,9 @@ fi %clean %changelog +* Tue Jun 25 2024 Anthony Tse - 2.0.4 +- Add retry logic to and increase timeout for EC2 metadata token retrieval requests + * Tue Jun 18 2024 Arnav Gupta - 2.0.3 - Upgrade py version - Replace deprecated usage of datetime diff --git a/build-deb.sh b/build-deb.sh index ef868bf9..b5e64432 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.0.3 +VERSION=2.0.4 RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index bf1e29bc..79723cdb 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.0.3 +version=2.0.4 release=1 diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index 4d5e8e40..7734f8e6 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 2.0.3 +Version: 2.0.4 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index e723d29a..083e3fa1 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.0.3" +VERSION = "2.0.4" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -110,6 +110,7 @@ # 50ms DEFAULT_TIMEOUT = 0.05 DEFAULT_MACOS_VALUE = "macos" +DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT = 3 DEFAULT_NFS_MOUNT_COMMAND_RETRY_COUNT = 3 DEFAULT_NFS_MOUNT_COMMAND_TIMEOUT_SEC = 15 DISABLE_FETCH_EC2_METADATA_TOKEN_ITEM = "disable_fetch_ec2_metadata_token" @@ -612,44 +613,73 @@ def fetch_ec2_metadata_token_disabled(config): ) -def get_aws_ec2_metadata_token(timeout=DEFAULT_TIMEOUT): - # Normally the session token is fetched within 10ms, setting a timeout of 50ms here to abort the request - # and return None if the token has not returned within 50ms - try: - opener = build_opener(HTTPHandler) - request = Request(INSTANCE_METADATA_TOKEN_URL) +def get_aws_ec2_metadata_token( + request_timeout=0.5, + max_retries=DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT, + retry_delay=0.5, +): + """ + Retrieves the AWS EC2 metadata token. Typically, the token is fetched + within 10ms. We set a default timeout of 0.5 seconds to prevent mount + failures caused by slow requests. + + Args: + max_retries (int): The maximum number of retries. + retry_delay (int): The delay in seconds between retries. + + Returns: + The AWS EC2 metadata token str or None if it cannot be retrieved. + """ - request.add_header("X-aws-ec2-metadata-token-ttl-seconds", "21600") - request.get_method = lambda: "PUT" + def get_token(timeout): try: - res = opener.open(request, timeout=timeout) - return res.read() - except socket.timeout: - exception_message = "Timeout when getting the aws ec2 metadata token" - except HTTPError as e: - exception_message = "Failed to fetch token due to %s" % e - except Exception as e: - exception_message = ( - "Unknown error when fetching aws ec2 metadata token, %s" % e + opener = build_opener(HTTPHandler) + request = Request(INSTANCE_METADATA_TOKEN_URL) + request.add_header("X-aws-ec2-metadata-token-ttl-seconds", "21600") + request.get_method = lambda: "PUT" + try: + response = opener.open(request, timeout=timeout) + return response.read() + finally: + opener.close() + + except NameError: + headers = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} + request = Request( + INSTANCE_METADATA_TOKEN_URL, headers=headers, method="PUT" ) - logging.debug(exception_message) - return None - except NameError: - headers = {"X-aws-ec2-metadata-token-ttl-seconds": "21600"} - req = Request(INSTANCE_METADATA_TOKEN_URL, headers=headers, method="PUT") + response = urlopen(request, timeout=timeout) + return response.read() + + retries = 0 + while retries < max_retries: try: - res = urlopen(req, timeout=timeout) - return res.read() + return get_token(timeout=request_timeout) except socket.timeout: - exception_message = "Timeout when getting the aws ec2 metadata token" + logging.debug( + "Timeout when getting the aws ec2 metadata token. Attempt: %s/%s" + % (retries + 1, max_retries) + ) except HTTPError as e: - exception_message = "Failed to fetch token due to %s" % e + logging.debug( + "Failed to fetch token due to %s. Attempt: %s/%s" + % (e, retries + 1, max_retries) + ) except Exception as e: - exception_message = ( - "Unknown error when fetching aws ec2 metadata token, %s" % e + logging.debug( + "Unknown error when fetching aws ec2 metadata token, %s. Attempt: %s/%s" + % (e, retries + 1, max_retries) ) - logging.debug(exception_message) - return None + + retries += 1 + if retries < max_retries: + logging.debug("Retrying in %s seconds", retry_delay) + time.sleep(retry_delay) + else: + logging.debug( + "Unable to retrieve AWS EC2 metadata token. Maximum number of retries reached." + ) + return None def get_aws_security_credentials( diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 0a6d0add..b5f1d721 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.0.3" +version = "2.0.4" publish = false [dependencies] diff --git a/src/proxy/src/config_parser.rs b/src/proxy/src/config_parser.rs index 0e26757b..badcac3e 100644 --- a/src/proxy/src/config_parser.rs +++ b/src/proxy/src/config_parser.rs @@ -95,12 +95,12 @@ pub mod tests { pub static TEST_CONFIG_PATH: &str = "tests/certs/test_config.ini"; pub fn get_test_config() -> ProxyConfig { - ProxyConfig::from_path(&Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") + ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") } #[test] fn test_read_config_from_file() { - assert!(ProxyConfig::from_path(&Path::new(TEST_CONFIG_PATH)).is_ok()); + assert!(ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).is_ok()); } #[test] @@ -131,7 +131,7 @@ key = /etc/amazon/efs/privateKey.pem checkHost = fs-12341234.efs.us-east-1.amazonaws.com "#; - let result_config = ProxyConfig::from_str(&config_string).unwrap(); + let result_config = ProxyConfig::from_str(config_string).unwrap(); let expected_proxy_config = ProxyConfig { fips: true, pid_file_path: String::from( @@ -184,7 +184,7 @@ key = /etc/amazon/efs/privateKey.pem checkHost = fs-12341234.efs.us-east-1.amazonaws.com "#; - let result_config = ProxyConfig::from_str(&config_string).unwrap(); + let result_config = ProxyConfig::from_str(config_string).unwrap(); let expected_proxy_config = ProxyConfig { fips: false, pid_file_path: String::from( diff --git a/src/proxy/src/connections.rs b/src/proxy/src/connections.rs index 41b25a4f..67d4651e 100644 --- a/src/proxy/src/connections.rs +++ b/src/proxy/src/connections.rs @@ -405,7 +405,7 @@ mod tests { use uuid::Uuid; const PROXY_ID: ProxyIdentifier = ProxyIdentifier { - uuid: Uuid::from_u128(1 as u128), + uuid: Uuid::from_u128(1_u128), incarnation: 0, }; @@ -430,7 +430,7 @@ mod tests { let partition_finder = TlsPartitionFinder::new(Arc::new(Mutex::new(tls_config))); let (_s, id, _) = partition_finder - .establish_connection(PROXY_ID.clone()) + .establish_connection(PROXY_ID) .await .expect("Failed to connect to server"); @@ -440,7 +440,7 @@ mod tests { MultiplexTest { service, - partition_finder: partition_finder, + partition_finder, initial_partition_id, } } @@ -455,8 +455,8 @@ mod tests { let (new_connnection_id, connections, _) = test .partition_finder .inner_establish_multiplex_connection( - PROXY_ID.clone(), - Some(test.initial_partition_id.clone()), + PROXY_ID, + Some(test.initial_partition_id), shutdown_handle, ) .await @@ -479,15 +479,15 @@ mod tests { test.service .post_action(ServiceAction::StopPartitionAcceptor( - test.initial_partition_id.clone(), + test.initial_partition_id, )) .await; let (new_connnection_id, connections, _) = test .partition_finder .inner_establish_multiplex_connection( - PROXY_ID.clone(), - Some(test.initial_partition_id.clone()), + PROXY_ID, + Some(test.initial_partition_id), shutdown_handle, ) .await @@ -510,7 +510,7 @@ mod tests { let (new_connnection_id, connections, _) = test .partition_finder - .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle) + .inner_establish_multiplex_connection(PROXY_ID, None, shutdown_handle) .await .expect("Could not establish a multiplex connection"); @@ -531,9 +531,7 @@ mod tests { let partition_finder = PlainTextPartitionFinder { mount_target_addr: format!("127.0.0.1:{}", port.clone()), }; - partition_finder - .establish_connection(PROXY_ID.clone()) - .await + partition_finder.establish_connection(PROXY_ID).await }) .await .expect("join err"); @@ -552,7 +550,7 @@ mod tests { mount_target_addr: format!("127.0.0.1:{}", port.clone()), }; partition_finder - .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle) + .inner_establish_multiplex_connection(PROXY_ID, None, shutdown_handle) .await }) .await @@ -573,7 +571,7 @@ mod tests { mount_target_addr: format!("127.0.0.1:{}", port.clone()), }; partition_finder - .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle_clone) + .inner_establish_multiplex_connection(PROXY_ID, None, shutdown_handle_clone) .await }); @@ -598,8 +596,8 @@ mod tests { let error = test .partition_finder .inner_establish_multiplex_connection( - PROXY_ID.clone(), - Some(test.initial_partition_id.clone()), + PROXY_ID, + Some(test.initial_partition_id), shutdown_handle.clone(), ) .await; @@ -610,6 +608,7 @@ mod tests { )); } + #[allow(clippy::enum_variant_names)] enum BrokenPartitionFinderType { _ConnectIoError, _RpcIoError, @@ -666,7 +665,7 @@ mod tests { let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); let error = partition_finder - .inner_establish_multiplex_connection(PROXY_ID.clone(), None, shutdown_handle.clone()) + .inner_establish_multiplex_connection(PROXY_ID, None, shutdown_handle.clone()) .await; assert!(matches!(error, Err((ConnectError::MultiplexFailure, None)))); @@ -678,7 +677,7 @@ mod tests { let mut sigs_hangup_listener = signal::unix::signal(signal::unix::SignalKind::hangup()).unwrap(); let config_file_path = Path::new("tests/certs/test_config.ini"); - let config_contents = std::fs::read_to_string(&config_file_path).unwrap(); + let config_contents = std::fs::read_to_string(config_file_path).unwrap(); let proxy_config = ProxyConfig::from_str(&config_contents).unwrap(); let mut tls_config = TlsConfig::new_from_config(&proxy_config).await.unwrap(); tls_config.client_cert = vec![1, 2]; diff --git a/src/proxy/src/controller.rs b/src/proxy/src/controller.rs index 46406e5e..4891b769 100644 --- a/src/proxy/src/controller.rs +++ b/src/proxy/src/controller.rs @@ -439,7 +439,7 @@ pub mod tests { } pub async fn new_with_throughput_scale_up_threshold(threshold: i32, tls: bool) -> Self { - let mut config = super::DEFAULT_SCALE_UP_CONFIG.clone(); + let mut config = super::DEFAULT_SCALE_UP_CONFIG; config.scale_up_bytes_per_sec_threshold = threshold; TestService::new_with_partition_count_and_scale_up_config(PARTITION_COUNT, config, tls) .await @@ -450,7 +450,7 @@ pub mod tests { threshold: i32, tls: bool, ) -> Self { - let mut config = super::DEFAULT_SCALE_UP_CONFIG.clone(); + let mut config = super::DEFAULT_SCALE_UP_CONFIG; config.scale_up_bytes_per_sec_threshold = threshold; TestService::new_with_partition_count_and_scale_up_config(count, config, tls).await } @@ -472,7 +472,7 @@ pub mod tests { let mut counter = HashMap::new(); for id in partition_ids.iter() { - counter.insert(id.clone(), Vec::new()); + counter.insert(*id, Vec::new()); } let request_counter = Arc::new(Mutex::new(counter)); @@ -531,6 +531,7 @@ pub mod tests { *consumable_action = Some(new_action); } + #[allow(clippy::too_many_arguments)] fn run( listener: TcpListener, scale_up_config: ScaleUpConfig, @@ -575,7 +576,7 @@ pub mod tests { stream: S, scale_up_config: ScaleUpConfig, partition_idx: &mut usize, - partition_ids: &Vec, + partition_ids: &[PartitionId], stopped_partitions: Arc>>, request_counter: Arc>>>>, posted_action: Arc>>, @@ -599,7 +600,7 @@ pub mod tests { for i in 0..partition_ids.len() { *partition_idx = (*partition_idx + i + 1) % partition_ids.len(); if !stopped.contains(&partition_ids[*partition_idx]) { - next_id = Some(partition_ids[*partition_idx].clone()); + next_id = Some(partition_ids[*partition_idx]); break; } } @@ -725,6 +726,7 @@ pub mod tests { } } + #[allow(clippy::type_complexity)] fn parse_bind_client_to_partition_request( request: &Vec, ) -> Result, Box> { @@ -765,7 +767,7 @@ pub mod tests { .expect("No message found") .expect("failed to parse"); - let rpc = payload_result.rpcs.get(0).expect("No RPCs found"); + let rpc = payload_result.rpcs.first().expect("No RPCs found"); assert_eq!(expected_data, rpc.to_vec()[RPC_HEADER_SIZE..]); Ok(()) } @@ -810,7 +812,7 @@ pub mod tests { proxy_id: ProxyIdentifier::new(), scale_up_attempt_count: 0, restart_count: 0, - scale_up_config: scale_up_config, + scale_up_config, status_reporter, }; @@ -827,7 +829,7 @@ pub mod tests { proxy_id: ProxyIdentifier::new(), scale_up_attempt_count: 0, restart_count: 0, - scale_up_config: scale_up_config, + scale_up_config, status_reporter, }; @@ -1601,7 +1603,7 @@ pub mod tests { let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; let mut port_health_check = TestClient::new(proxy.listen_port).await; // Mimic efs-utils's port test which checks whether efs-proxy is alive. - let _ = port_health_check.stream.shutdown().await.unwrap(); + port_health_check.stream.shutdown().await.unwrap(); let mut client = TestClient::new(proxy.listen_port).await; client.send_message_with_size(10).await.unwrap(); client.send_message_with_size(1024).await.unwrap(); diff --git a/src/proxy/src/efs_rpc.rs b/src/proxy/src/efs_rpc.rs index 5e464734..199fb9e7 100644 --- a/src/proxy/src/efs_rpc.rs +++ b/src/proxy/src/efs_rpc.rs @@ -135,8 +135,8 @@ pub mod tests { let mut payload_buf = Vec::new(); let response = BindClientResponse { - bind_response: bind_response, - scale_up_config: scale_up_config, + bind_response, + scale_up_config, }; xdr_codec::pack(&response, &mut payload_buf)?; @@ -224,7 +224,7 @@ pub mod tests { #[test] fn test_response_serde() -> Result<(), RpcError> { let partition_id = generate_partition_id(); - let partition_id_copy = efs_prot::PartitionId(partition_id.0.clone()); + let partition_id_copy = efs_prot::PartitionId(partition_id.0); let response = create_bind_client_to_partition_response( XID, diff --git a/src/proxy/src/main.rs b/src/proxy/src/main.rs index a4b90e37..a7c1e54b 100644 --- a/src/proxy/src/main.rs +++ b/src/proxy/src/main.rs @@ -49,7 +49,7 @@ async fn main() { info!("Running with configuration: {:?}", proxy_config); let pid_file_path = Path::new(&proxy_config.pid_file_path); - let _ = write_pid_file(&pid_file_path).await; + let _ = write_pid_file(pid_file_path).await; // This "status reporter" is currently only used in tests let (_status_requester, status_reporter) = status_reporter::create_status_channel(); @@ -111,7 +111,7 @@ async fn write_pid_file(pid_file_path: &Path) -> Result<(), anyhow::Error> { .open(pid_file_path) .await?; pid_file - .write_all(&std::process::id().to_string().as_bytes()) + .write_all(std::process::id().to_string().as_bytes()) .await?; pid_file.write_u8(b'\x0A').await?; pid_file.flush().await?; diff --git a/src/proxy/src/proxy_identifier.rs b/src/proxy/src/proxy_identifier.rs index e8e08e06..0e986864 100644 --- a/src/proxy/src/proxy_identifier.rs +++ b/src/proxy/src/proxy_identifier.rs @@ -33,7 +33,7 @@ mod tests { #[test] fn test_increment() { let mut proxy_id = ProxyIdentifier::new(); - let proxy_id_original = proxy_id.clone(); + let proxy_id_original = proxy_id; for i in 0..5 { assert_eq!(i, proxy_id.incarnation); proxy_id.increment(); @@ -45,7 +45,7 @@ mod tests { #[test] fn test_wrap_around() { let mut proxy_id = ProxyIdentifier::new(); - let proxy_id_original = proxy_id.clone(); + let proxy_id_original = proxy_id; proxy_id.incarnation = i64::MAX; proxy_id.increment(); assert_eq!(proxy_id_original.uuid, proxy_id.uuid); diff --git a/src/proxy/src/status_reporter.rs b/src/proxy/src/status_reporter.rs index ac9f6a9c..4aa77432 100644 --- a/src/proxy/src/status_reporter.rs +++ b/src/proxy/src/status_reporter.rs @@ -5,6 +5,7 @@ use anyhow::{Error, Result}; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::Instant; +#[allow(dead_code)] pub struct Report { pub proxy_id: ProxyIdentifier, pub partition_id: Option, @@ -88,7 +89,7 @@ mod tests { .await .expect("Request channel closed"); let report = Report { - proxy_id: proxy_id.clone(), + proxy_id, partition_id: None, connection_state: ConnectionSearchState::Idle, num_connections: 1, @@ -101,9 +102,9 @@ mod tests { let r = status_requester._request_status().await?; assert_eq!(proxy_id, r.proxy_id); - assert!(matches!(r.partition_id, None)); + assert!(r.partition_id.is_none()); assert_eq!(r.connection_state, ConnectionSearchState::Idle); - assert!(matches!(r.last_proxy_update, None)); + assert!(r.last_proxy_update.is_none()); assert_eq!(1, r.num_connections); Ok(()) } diff --git a/src/proxy/src/tls.rs b/src/proxy/src/tls.rs index 1ccefb3e..c91fe1fd 100644 --- a/src/proxy/src/tls.rs +++ b/src/proxy/src/tls.rs @@ -121,9 +121,9 @@ impl TlsConfig { } TlsConfig::new( config.fips, - &ca_file, - &ca_cert_pem, - &private_key_pem, + ca_file, + ca_cert_pem, + private_key_pem, efs_config.mount_target_addr.as_str(), efs_config.expected_server_hostname_tls.as_str(), ) diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 5201423d..3247a380 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.0.3" +VERSION = "2.0.4" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" diff --git a/test/mount_efs_test/test_get_aws_security_credentials.py b/test/mount_efs_test/test_get_aws_security_credentials.py index 0bdd291b..e931f32e 100644 --- a/test/mount_efs_test/test_get_aws_security_credentials.py +++ b/test/mount_efs_test/test_get_aws_security_credentials.py @@ -211,7 +211,14 @@ def test_get_aws_security_credentials_get_instance_metadata_role_name_str_with_t Exception("Unknown Error"), ]: _test_get_aws_security_credentials_get_instance_metadata_role_name( - mocker, is_name_str=True, token_effects=[token_effect] + mocker, + is_name_str=True, + token_effects=[ + token_effect + for _ in range( + 0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT + ) + ], ) @@ -230,7 +237,14 @@ def test_get_aws_security_credentials_get_instance_metadata_role_name_bytes_with Exception("Unknown Error"), ]: _test_get_aws_security_credentials_get_instance_metadata_role_name( - mocker, is_name_str=False, token_effects=[token_effect] + mocker, + is_name_str=False, + token_effects=[ + token_effect + for _ in range( + 0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT + ) + ], ) diff --git a/test/mount_efs_test/test_get_instance_az_id.py b/test/mount_efs_test/test_get_instance_az_id.py index a72a280f..ac76eb75 100644 --- a/test/mount_efs_test/test_get_instance_az_id.py +++ b/test/mount_efs_test/test_get_instance_az_id.py @@ -165,9 +165,12 @@ def test_get_instance_az_id_without_token(mocker): # Reproduce https://github.com/aws/efs-utils/issues/46 def test_get_instance_az_id_token_fetch_time_out(mocker): # get_aws_ec2_metadata_token timeout, fallback to call without session token - mocker.patch( - "mount_efs.urlopen", side_effect=[socket.timeout, MockUrlLibResponse()] - ) + side_effect = [ + socket.timeout + for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) + ] + side_effect.append(MockUrlLibResponse()) + mocker.patch("mount_efs.urlopen", side_effect=side_effect) assert INSTANCE_AZ_ID == test_get_instance_az_id_helper() diff --git a/test/mount_efs_test/test_get_instance_id.py b/test/mount_efs_test/test_get_instance_id.py index 1f531c6d..a97f55cc 100644 --- a/test/mount_efs_test/test_get_instance_id.py +++ b/test/mount_efs_test/test_get_instance_id.py @@ -99,9 +99,12 @@ def test_get_instance_id_without_token(mocker): # Reproduce https://github.com/aws/efs-utils/issues/46 def test_get_instance_id_token_fetch_time_out(mocker): # get_aws_ec2_metadata_token timeout, fallback to call without session token - mocker.patch( - "mount_efs.urlopen", side_effect=[socket.timeout, MockUrlLibResponse()] - ) + side_effect = [ + socket.timeout + for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) + ] + side_effect.append(MockUrlLibResponse()) + mocker.patch("mount_efs.urlopen", side_effect=side_effect) assert INSTANCE_ID == test_get_instance_id_helper() diff --git a/test/mount_efs_test/test_get_target_instance_identity.py b/test/mount_efs_test/test_get_target_instance_identity.py index 21c86247..6700fbea 100644 --- a/test/mount_efs_test/test_get_target_instance_identity.py +++ b/test/mount_efs_test/test_get_target_instance_identity.py @@ -113,28 +113,32 @@ def test_get_target_region_without_token(mocker): # Reproduce https://github.com/aws/efs-utils/issues/46 def test_get_target_region_token_endpoint_fetching_timeout(mocker): # get_aws_ec2_metadata_token timeout, fallback to call without session token - mocker.patch( - "mount_efs.urlopen", side_effect=[socket.timeout, MockUrlLibResponse()] - ) + side_effect = [ + socket.timeout + for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) + ] + side_effect.append(MockUrlLibResponse()) + mocker.patch("mount_efs.urlopen", side_effect=side_effect) assert "us-east-1" == get_target_region_helper() def test_get_target_region_token_fetch_httperror(mocker): - mocker.patch( - "mount_efs.urlopen", - side_effect=[ - HTTPError("url", 405, "Now Allowed", None, None), - MockUrlLibResponse(), - ], - ) + side_effect = [ + HTTPError("url", 405, "Now Allowed", None, None) + for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) + ] + side_effect.append(MockUrlLibResponse()) + mocker.patch("mount_efs.urlopen", side_effect=side_effect) assert "us-east-1" == get_target_region_helper() def test_get_target_region_token_fetch_unknownerror(mocker): - mocker.patch( - "mount_efs.urlopen", - side_effect=[Exception("Unknown Exception"), MockUrlLibResponse()], - ) + side_effect = [ + Exception("Unknown Exception") + for _ in range(0, mount_efs.DEFAULT_GET_AWS_EC2_METADATA_TOKEN_RETRY_COUNT) + ] + side_effect.append(MockUrlLibResponse()) + mocker.patch("mount_efs.urlopen", side_effect=side_effect) assert "us-east-1" == get_target_region_helper() diff --git a/test/watchdog_test/test_start_tls_tunnel.py b/test/watchdog_test/test_start_tls_tunnel.py index 3b546fee..57e22821 100644 --- a/test/watchdog_test/test_start_tls_tunnel.py +++ b/test/watchdog_test/test_start_tls_tunnel.py @@ -36,12 +36,14 @@ def _initiate_state_file(tmpdir, cmd=None, efs_proxy_enabled=False): state = { "pid": PID - 1, - "cmd": cmd - if cmd - else [ - tunnel_executable, - "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007", - ], + "cmd": ( + cmd + if cmd + else [ + tunnel_executable, + "/var/run/efs/stunnel-config.fs-deadbeef.mnt.21007", + ] + ), } state_file = tempfile.mkstemp(prefix="state", dir=str(tmpdir))[1] with open(state_file, "w") as f: From b6721edc7a924da72c158973bb5eba1a0441284b Mon Sep 17 00:00:00 2001 From: JD Davis Date: Wed, 7 Aug 2024 16:52:25 +0000 Subject: [PATCH 13/51] use dns suffix based on region --- src/mount_efs/__init__.py | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 083e3fa1..29889dbd 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -195,7 +195,7 @@ CREDENTIALS_KEYS = ["AccessKeyId", "SecretAccessKey", "Token"] ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" INSTANCE_METADATA_SERVICE_URL = ( "http://169.254.169.254/latest/dynamic/instance-identity/document/" @@ -404,6 +404,22 @@ def _fatal_error(message): _fatal_error(metadata_exception) +def get_target_domain_suffix(config): + def _fatal_error(): + fatal_error( + 'Error retrieving region. Please set the "dns_name_suffix" parameter ' + "in the efs-utils configuration file." + ) + region = get_target_region(config) + config_section = get_config_section(config, region) + + try: + return config.get(config_section, "dns_name_suffix") + except NoOptionError: + pass + + _fatal_error() + def get_target_az(config, options): if "az" in options: @@ -686,6 +702,7 @@ def get_aws_security_credentials( config, use_iam, region, + dns_name_suffix, awsprofile=None, aws_creds_uri=None, jwt_path=None, @@ -730,6 +747,7 @@ def get_aws_security_credentials( role_arn, jwt_path, region, + dns_name_suffix, False, ) if credentials and credentials_source: @@ -744,6 +762,7 @@ def get_aws_security_credentials( os.environ[WEB_IDENTITY_ROLE_ARN_ENV], os.environ[WEB_IDENTITY_TOKEN_FILE_ENV], region, + dns_name_suffix, False, ) if credentials and credentials_source: @@ -817,7 +836,7 @@ def get_aws_security_credentials_from_ecs(config, aws_creds_uri, is_fatal=False) def get_aws_security_credentials_from_webidentity( - config, role_arn, token_file, region, is_fatal=False + config, role_arn, token_file, region, dns_name_suffix, is_fatal=False ): try: with open(token_file, "r") as f: @@ -829,7 +848,7 @@ def get_aws_security_credentials_from_webidentity( else: return None, None - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) + STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region,dns_name_suffix) webidentity_url = ( STS_ENDPOINT_URL + "?" @@ -1748,6 +1767,7 @@ def bootstrap_proxy( security_credentials = None client_info = get_client_info(config) region = get_target_region(config) + dns_name_suffix = get_target_domain_suffix(config) if tls_enabled(options): cert_details = {} @@ -1764,7 +1784,7 @@ def bootstrap_proxy( kwargs = {"awsprofile": get_aws_profile(options, use_iam)} security_credentials, credentials_source = get_aws_security_credentials( - config, use_iam, region, **kwargs + config, use_iam, region, dns_name_suffix, **kwargs ) if credentials_source: @@ -2663,7 +2683,8 @@ def _validate_replacement_field_count(format_str, expected_ct): try: az_id = get_az_id_from_instance_metadata(config, options) region = get_target_region(config) - dns_name = "%s.%s.efs.%s.amazonaws.com" % (az_id, fs_id, region) + dns_name_suffix = get_target_domain_suffix(config) + dns_name = "%s.%s.efs.%s.%s" % (az_id, fs_id, region, dns_name_suffix) except RuntimeError: err_msg = "Cannot retrieve AZ-ID from metadata service. This is required for the crossaccount mount option." fatal_error(err_msg) From 769637c637b40816edbda87fa6e7c1304f67b9fd Mon Sep 17 00:00:00 2001 From: JD Davis Date: Thu, 8 Aug 2024 19:59:54 +0000 Subject: [PATCH 14/51] adding dns name suffixes for new regions --- dist/efs-utils.conf | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/dist/efs-utils.conf b/dist/efs-utils.conf index 1b6d849e..82bf90ec 100644 --- a/dist/efs-utils.conf +++ b/dist/efs-utils.conf @@ -57,11 +57,9 @@ retry_nfs_mount_command_timeout_sec = 15 [mount.cn-north-1] dns_name_suffix = amazonaws.com.cn - [mount.cn-northwest-1] dns_name_suffix = amazonaws.com.cn - [mount.us-iso-east-1] dns_name_suffix = c2s.ic.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem @@ -74,6 +72,22 @@ stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem dns_name_suffix = sc2s.sgov.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +[mount.us-isob-west-1] +dns_name_suffix = sc2s.sgov.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.us-isof-east-1] +dns_name_suffix = csp.hci.ic.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.us-isof-south-1] +dns_name_suffix = csp.hci.ic.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.eu-isoe-west-1] +dns_name_suffix = cloud.adc-e.uk +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + [mount-watchdog] enabled = true poll_interval_sec = 1 From 5addd5051d8a9eda20e583ce8a00bb2d9b59596a Mon Sep 17 00:00:00 2001 From: JD Davis Date: Fri, 9 Aug 2024 21:57:39 +0000 Subject: [PATCH 15/51] use region-specific dns suffix for sts --- src/watchdog/__init__.py | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 3247a380..56811073 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -146,7 +146,7 @@ AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$") ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" INSTANCE_IAM_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" SECURITY_CREDS_ECS_URI_HELP_URL = ( @@ -383,8 +383,10 @@ def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, except Exception as e: logging.error("Error reading token file %s: %s", token_file, e) return None + + dns_name_suffix = get_target_domain_suffix(config, region) - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) + STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region, dns_name_suffix) webidentity_url = ( STS_ENDPOINT_URL + "?" @@ -497,6 +499,29 @@ def credentials_file_helper(file_path, awsprofile): return credentials +def get_target_domain_suffix(config, region): + def _fatal_error(): + fatal_error( + 'Error retrieving DNS domain suffix for region. Please set the "dns_name_suffix" parameter ' + "in the efs-utils configuration file." + ) + + config_section = get_config_section(config, region) + + try: + return config.get(config_section, "dns_name_suffix") + except NoOptionError: + pass + + _fatal_error() + +def get_config_section(config, region): + region_specific_config_section = "%s.%s" % (MOUNT_CONFIG_SECTION, region) + if config.has_section(region_specific_config_section): + config_section = region_specific_config_section + else: + config_section = MOUNT_CONFIG_SECTION + return config_section def is_instance_metadata_url(url): return url.startswith("http://169.254.169.254") From c25f1f7dbeaa1c273203ac4e167435c6acfff5a6 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 12 Aug 2024 16:16:12 +0000 Subject: [PATCH 16/51] efs-utils v2.0.4-2 release --- amazon-efs-utils.spec | 15 +++++++++------ build-deb.sh | 2 +- config.ini | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index b9672b63..a00768b9 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -42,7 +42,7 @@ Name : amazon-efs-utils Version : 2.0.4 -Release : 1%{platform} +Release : 2%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems Group : Amazon/Tools @@ -73,15 +73,18 @@ Requires(postun) : /sbin/service %endif # RHEL 7 doesn't provide a Rust or Cargo package, -# so users are expected to install it through rustup. -%if ! 0%{?rhel} == 7 +# so users are expected to install it through rustup. +# The conditional here checks for amzn2 because on amzn2, +# '0%{?rhel}' also evaluates to 7. +%if 0%{?amzn2} || 0%{?rhel} != 7 BuildRequires : cargo rust %endif + BuildRequires: openssl-devel Source0 : %{name}.tar.gz %if "%{include_vendor_tarball}" == "true" -Source1 : %{proxy_name}-%{$Version}-vendor.tar.xz +Source1 : %{proxy_name}-%{version}-vendor.tar.xz Source2 : config.toml %endif @@ -179,7 +182,7 @@ fi - Check for efs-proxy PIDs when cleaning tunnel state files - Add PID to log entries -* Mon Apr 23 2024 Ryan Stankiewicz - 2.0.1 +* Tue Apr 23 2024 Ryan Stankiewicz - 2.0.1 - Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies * Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 @@ -203,7 +206,7 @@ fi - Add debug statement for size of state file write - Add parameters in mount options for assume web role with web identity -* Wed Jan 1 2023 Ryan Stankiewicz - 1.34.5 +* Wed Jan 4 2023 Ryan Stankiewicz - 1.34.5 - Watchdog detect empty private key and regenerate - Update man page - Avoid redundant get_target_region call diff --git a/build-deb.sh b/build-deb.sh index b5e64432..278f2505 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -12,7 +12,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild VERSION=2.0.4 -RELEASE=1 +RELEASE=2 DEB_SYSTEM_RELEASE_PATH=/etc/os-release echo 'Cleaning deb build workspace' diff --git a/config.ini b/config.ini index 79723cdb..a57dd738 100644 --- a/config.ini +++ b/config.ini @@ -8,4 +8,4 @@ [global] version=2.0.4 -release=1 +release=2 From 1477f557f9f0e1a8897c82d05a58f37e4f920cb5 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 12 Aug 2024 20:21:57 +0000 Subject: [PATCH 17/51] Update rust requirements --- .circleci/config.yml | 10 +++++----- README.md | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 64cf3d45..04c22ea8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -99,7 +99,7 @@ commands: - run: name: Install dependencies command: | - yum install -y curl + yum install --skip-broken -y curl - run: name: Install latest Rust command: | @@ -265,10 +265,10 @@ workflows: - build-rpm-package: name: rocky8 image: rockylinux/rockylinux:8 - - build-rpm-package: + - build-rpm-package-rustup: name: amazon-linux-latest image: amazonlinux:latest - - build-rpm-package: + - build-rpm-package-rustup: name: amazon-linux-2 image: amazonlinux:2 - build-rpm-package: @@ -292,10 +292,10 @@ workflows: - build-rpm-package-rustup: name: fedora34 image: fedora:34 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora35 image: fedora:35 - - build-rpm-package: + - build-rpm-package-rustup: name: fedora36 image: fedora:36 - build-suse-rpm-package: diff --git a/README.md b/README.md index 9824a0c7..c9ef9e63 100644 --- a/README.md +++ b/README.md @@ -82,7 +82,7 @@ The `efs-utils` package has been verified against the following MacOS distributi * OpenSSL-devel 1.0.2+ * Python 3.7/3.8 * `stunnel` 4.56+ -- `rust` 1.68+ +- `rust` 1.70+ - `cargo` ## Optional @@ -117,7 +117,7 @@ for more guidance.) Other distributions require building the package from source and installing it. If your distribution doesn't provide a rust or cargo package, or it provides versions -that are older than 1.68, then you can install rust and cargo through rustup: +that are older than 1.70, then you can install rust and cargo through rustup: ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh . "$HOME/.cargo/env" @@ -167,7 +167,7 @@ $ sudo apt-get -y install ./build/amazon-efs-utils*deb ``` If your Debian distribution doesn't provide a rust or cargo package, or your distribution provides versions -that are older than 1.68, then you can install rust and cargo through rustup: +that are older than 1.70, then you can install rust and cargo through rustup: ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh . "$HOME/.cargo/env" @@ -177,7 +177,7 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh **`make rpm` fails due to "feature `edition2021` is required"**: Update to a version of rust and cargo -that is newer than 1.68. To install a new version of rust and cargo, run +that is newer than 1.70. To install a new version of rust and cargo, run ```bash curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh . "$HOME/.cargo/env" @@ -372,7 +372,7 @@ To utilize the improved performance benefits of efs-proxy, you must re-mount any Efs-proxy is not compatible with OCSP or Mac clients. In these cases, efs-utils will automatically revert back to using stunnel. -If you are building efs-utils v2.0.0 from source, then you need Rust and Cargo >= 1.68. +If you are building efs-utils v2.0.0 from source, then you need Rust and Cargo >= 1.70. ## Upgrading stunnel for RHEL/CentOS From d41437168b7e001d0b23ff2325452e9dae540256 Mon Sep 17 00:00:00 2001 From: retornam Date: Wed, 21 Aug 2024 19:14:37 -0700 Subject: [PATCH 18/51] Add rpm-without-system-rust to rpmspec Updated the rpmspec with the following conditions - with system_rust - without system_rust [1] is the default and builds the rpmspec with rustc and cargo provided they are installed with yum [2] builds the rpmspecfile with rustc and cargo installed outside of yum (in cases where rustup was used to install both rustc and cargo) [1] and [2] makes it possible to build both instances using the Makefile. The old makefile command make rpm stays the same but assumes [1] and the new command make rpm-without-system-rust assumes [2] --- Makefile | 7 ++++++- amazon-efs-utils.spec | 39 ++++++++++++++++++++++++++++++--------- 2 files changed, 36 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 3cc4e47f..bc39b058 100644 --- a/Makefile +++ b/Makefile @@ -11,6 +11,7 @@ SOURCE_TARBALL = $(PACKAGE_NAME).tar.gz SPECFILE = $(PACKAGE_NAME).spec BUILD_DIR = build/rpmbuild PROXY_VERSION = 2.0.0 +RPM_BUILD_FLAGS ?= --with system_rust export PYTHONPATH := $(shell pwd)/src .PHONY: clean @@ -48,12 +49,16 @@ rpm-only: cp $(SPECFILE) $(BUILD_DIR)/SPECS cp $(SOURCE_TARBALL) $(BUILD_DIR)/SOURCES cp config.toml $(BUILD_DIR)/SOURCES - rpmbuild -ba --define "_topdir `pwd`/$(BUILD_DIR)" --define "include_vendor_tarball false" $(BUILD_DIR)/SPECS/$(SPECFILE) + rpmbuild -ba --define "_topdir `pwd`/$(BUILD_DIR)" --define "include_vendor_tarball false" $(BUILD_DIR)/SPECS/$(SPECFILE) $(RPM_BUILD_FLAGS) cp $(BUILD_DIR)/RPMS/*/*rpm build .PHONY: rpm rpm: sources rpm-only +.PHONY: rpm-without-system-rust +rpm-without-system-rust: sources + $(MAKE) rpm-only RPM_BUILD_FLAGS="--without system_rust" + .PHONY: deb deb: ./build-deb.sh diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index a00768b9..68e96fe3 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -72,12 +72,14 @@ Requires(preun) : /sbin/service /sbin/chkconfig Requires(postun) : /sbin/service %endif -# RHEL 7 doesn't provide a Rust or Cargo package, -# so users are expected to install it through rustup. -# The conditional here checks for amzn2 because on amzn2, -# '0%{?rhel}' also evaluates to 7. -%if 0%{?amzn2} || 0%{?rhel} != 7 -BuildRequires : cargo rust +# Conditional to allow building without +# rust installed with yum +%bcond_without system_rust + +# If yum provides rust and cargo +# use rpmbuild --with system_rust +%if %{with system_rust} +BuildRequires: cargo rust %endif BuildRequires: openssl-devel @@ -94,6 +96,25 @@ This package provides utilities for simplifying the use of EFS file systems %global debug_package %{nil} %prep + +# If yum doesn't provides rust and cargo +# use rpmbuild --without system_rust +%if %{without system_rust} +source $HOME/.cargo/env || true +%endif + +# Ensure cargo is installed +if ! command -v cargo &> /dev/null; then + echo "Error: cargo is not in PATH. Please install cargo." + exit 1 +fi + +# Ensure rustc is installed +if ! command -v rustc &> /dev/null; then + echo "Error: rustc is not PATH. Please install rustc." + exit 1 +fi + %setup -n %{name} mkdir -p %{_builddir}/%{name}/src/proxy/.cargo %if "%{include_vendor_tarball}" == "true" @@ -176,17 +197,17 @@ fi * Tue Jun 18 2024 Arnav Gupta - 2.0.3 - Upgrade py version -- Replace deprecated usage of datetime +- Replace deprecated usage of datetime * Mon May 20 2024 Anthony Tse - 2.0.2 - Check for efs-proxy PIDs when cleaning tunnel state files - Add PID to log entries * Tue Apr 23 2024 Ryan Stankiewicz - 2.0.1 -- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies +- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies * Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 -- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. +- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. * Mon Mar 18 2024 Sean Zatz - 1.36.0 - Support new mount option: crossaccount, conduct cross account mounts via ip address. Use client AZ-ID to choose mount target. From 2911904e3d1a2e6a34669d7cf09e730218ed58d8 Mon Sep 17 00:00:00 2001 From: JD Davis Date: Tue, 17 Sep 2024 13:37:10 +0000 Subject: [PATCH 19/51] adding unit tests --- test/mount_efs_test/test_bootstrap_proxy.py | 4 ++++ .../test_get_aws_security_credentials.py | 23 ++++++++++--------- ...me_and_fallback_mount_target_ip_address.py | 7 +++++- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/test/mount_efs_test/test_bootstrap_proxy.py b/test/mount_efs_test/test_bootstrap_proxy.py index d56f3e53..739c416d 100644 --- a/test/mount_efs_test/test_bootstrap_proxy.py +++ b/test/mount_efs_test/test_bootstrap_proxy.py @@ -16,6 +16,7 @@ DNS_NAME = "%s.efs.us-east-1.amazonaws.com" % FS_ID MOUNT_POINT = "/mnt" REGION = "us-east-1" +DOMAIN_SUFFIX = "amazonaws.com" DEFAULT_TLS_PORT = 20049 @@ -40,6 +41,7 @@ def setup_mocks(mocker): return_value=(DNS_NAME, None), ) mocker.patch("mount_efs.get_target_region", return_value=REGION) + mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) mocker.patch("mount_efs.write_tunnel_state_file", return_value="~mocktempfile") mocker.patch("mount_efs.create_certificate") mocker.patch("os.rename") @@ -139,6 +141,7 @@ def test_bootstrap_proxy_cert_created_tls_mount(mocker, tmpdir): setup_mocks_without_popen(mocker) mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) mocker.patch("mount_efs.get_target_region", return_value=REGION) + mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) state_file_dir = str(tmpdir) tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) @@ -184,6 +187,7 @@ def test_bootstrap_proxy_cert_not_created_non_tls_mount(mocker, tmpdir): setup_mocks_without_popen(mocker) mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) mocker.patch("mount_efs.get_target_region", return_value=REGION) + mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) state_file_dir = str(tmpdir) tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) diff --git a/test/mount_efs_test/test_get_aws_security_credentials.py b/test/mount_efs_test/test_get_aws_security_credentials.py index e931f32e..a0798dc7 100644 --- a/test/mount_efs_test/test_get_aws_security_credentials.py +++ b/test/mount_efs_test/test_get_aws_security_credentials.py @@ -102,7 +102,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit mocker.patch("mount_efs.credentials_file_helper", return_value=file_helper_resp) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "test_profile" + config, True, "us-east-1", "amazonaws.com", "test_profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -126,7 +126,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit mocker.patch("mount_efs.credentials_file_helper", return_value=file_helper_resp) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "test_profile" + config, True, "us-east-1", "amazonaws.com", "test_profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -138,7 +138,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit def test_get_aws_security_credentials_do_not_use_iam(): config = get_fake_config() credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, False, "us-east-1", "test_profile" + config, False, "us-east-1", "amazonaws.com", "test_profile" ) assert not credentials @@ -165,7 +165,7 @@ def _test_get_aws_security_credentials_get_ecs_from_env_url(mocker): mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse(data=response)) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", None + config, True, "us-east-1", "amazonaws.com", None ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -187,7 +187,7 @@ def test_get_aws_security_credentials_get_ecs_from_option_url(mocker): ) mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse(data=response)) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", None, AWSCREDSURI + config, True, "us-east-1", "amazonaws.com", None, AWSCREDSURI ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -279,7 +279,7 @@ def _test_get_aws_security_credentials_get_instance_metadata_role_name( mocker.patch("mount_efs.urlopen", side_effect=side_effects) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", None + config, True, "us-east-1", "amazonaws.com", None ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -295,7 +295,7 @@ def test_get_aws_security_credentials_no_credentials_found(mocker, capsys): mocker.patch("mount_efs.urlopen") with pytest.raises(SystemExit) as ex: - mount_efs.get_aws_security_credentials(config, True, "us-east-1", None) + mount_efs.get_aws_security_credentials(config, True, "us-east-1", "amazonaws.com", None) assert 0 != ex.value.code @@ -320,7 +320,7 @@ def test_get_aws_security_credentials_credentials_not_found_in_files_and_botocor mount_efs.BOTOCORE_PRESENT = False with pytest.raises(SystemExit) as ex: - mount_efs.get_aws_security_credentials(config, True, "us-east-1", "default") + mount_efs.get_aws_security_credentials(config, True, "us-east-1", "amazonaws.com", "default") assert 0 != ex.value.code @@ -348,7 +348,7 @@ def test_get_aws_security_credentials_botocore_present_get_assumed_profile_crede ) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", awsprofile="test-profile" + config, True, "us-east-1", "amazonaws.com", awsprofile="test-profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL assert credentials["SecretAccessKey"] == SECRET_ACCESS_KEY_VAL @@ -365,7 +365,7 @@ def test_get_aws_security_credentials_credentials_not_found_in_aws_creds_uri( with pytest.raises(SystemExit) as ex: mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "default", AWSCREDSURI + config, True, "us-east-1", "amazonaws.com", "default", AWSCREDSURI ) assert 0 != ex.value.code @@ -474,6 +474,7 @@ def test_get_aws_security_credentials_from_webidentity_passed_in_both_params(moc config, True, "us-east-1", + "amazonaws.com", jwt_path=WEB_IDENTITY_TOKEN_FILE, role_arn=WEB_IDENTITY_ROLE_ARN, ) @@ -506,7 +507,7 @@ def test_get_aws_security_credentials_from_webidentity_passed_in_one_param( with pytest.raises(SystemExit) as ex: mount_efs.get_aws_security_credentials( - config, True, "us-east-1", jwt_path=WEB_IDENTITY_TOKEN_FILE + config, True, "us-east-1", "amazonaws.com", jwt_path=WEB_IDENTITY_TOKEN_FILE ) assert 0 != ex.value.code diff --git a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py index 59a185e7..7de5e02b 100644 --- a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py @@ -28,9 +28,14 @@ "cn-north-1": "amazonaws.com.cn", "cn-northwest-1": "amazonaws.com.cn", "us-iso-east-1": "c2s.ic.gov", + "us-iso-west-1": "c2s.ic.gov", "us-isob-east-1": "sc2s.sgov.gov", + "us-isob-west-1": "sc2s.sgov.gov", + "us-isof-south-1": "csp.hci.ic.gov", + "us-isof-east-1": "csp.hci.ic.gov", + "eu-isoe-west-1": "cloud.adc-e.uk" } -SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-isob-east-1"] +SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-iso-west-1", "us-isob-east-1", "us-isob-west-1", "us-isof-south-1", "us-isof-east-1", "eu-isoe-west-1"] DEFAULT_NFS_OPTIONS = {} OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} OPTIONS_WITH_IP = {"mounttargetip": IP_ADDRESS} From c08abb18a69f075d1074e48edefa5ed8bc081fbd Mon Sep 17 00:00:00 2001 From: Julie Rakas Date: Mon, 23 Sep 2024 17:07:10 +0000 Subject: [PATCH 20/51] efs-utils v2.1.0-1 release - Support region as a mount option (PR-171) - Add new regions to efs-utils.conf file (PR-241) --- .circleci/config.yml | 9 ++++++ README.md | 8 ++++- amazon-efs-utils.spec | 14 +++++---- build-deb.sh | 4 +-- config.ini | 4 +-- dist/amazon-efs-utils.control | 2 +- dist/efs-utils.conf | 16 ++++++++++ man/mount.efs.8 | 3 ++ requirements.txt | 30 +++++++------------ src/mount_efs/__init__.py | 19 +++++++----- src/proxy/Cargo.toml | 4 +-- src/watchdog/__init__.py | 2 +- .../test_get_target_instance_identity.py | 16 ++++++---- 13 files changed, 84 insertions(+), 47 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 04c22ea8..e7179b56 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -232,6 +232,15 @@ jobs: workflows: workflow: jobs: + - test: + name: python3_12 + image: python:3.12.4 + - test: + name: python3_11 + image: python:3.11.9 + - test: + name: python3_10 + image: python:3.10.13 - test: name: python3_9 image: python:3.9.13 diff --git a/README.md b/README.md index c9ef9e63..dbe31e2e 100644 --- a/README.md +++ b/README.md @@ -260,12 +260,18 @@ To mount file system within a given network namespace, run: $ sudo mount -t efs -o netns=netns-path file-system-id efs-mount-point/ ``` -To mount file system to the mount target in specific availability zone (e.g. us-east-1a), run: +To mount file system to the mount target in a specific availability zone (e.g. us-east-1a), run: ```bash $ sudo mount -t efs -o az=az-name file-system-id efs-mount-point/ ``` +To mount file system to the mount target in a specific region (e.g. us-east-1), run: + +```bash +$ sudo mount -t efs -o region=region-name file-system-id efs-mount-point/ +``` + **Note: The [prequisites in the crossaccount section below](#crossaccount-option-prerequisites) must be completed before using the crossaccount option.** To mount the filesystem mount target in the same physical availability zone ID (e.g. use1-az1) as the client instance over cross-AWS-account mounts, run: diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 68e96fe3..bbd16d3c 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,8 +41,8 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.0.4 -Release : 2%{platform} +Version : 2.1.0 +Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems Group : Amazon/Tools @@ -192,22 +192,26 @@ fi %clean %changelog +* Wed Sep 18 2024 Julie Rakas - 2.1.0 +- Add mount option for specifying region +- Add new ISO regions to config file + * Tue Jun 25 2024 Anthony Tse - 2.0.4 - Add retry logic to and increase timeout for EC2 metadata token retrieval requests * Tue Jun 18 2024 Arnav Gupta - 2.0.3 - Upgrade py version -- Replace deprecated usage of datetime +- Replace deprecated usage of datetime * Mon May 20 2024 Anthony Tse - 2.0.2 - Check for efs-proxy PIDs when cleaning tunnel state files - Add PID to log entries * Tue Apr 23 2024 Ryan Stankiewicz - 2.0.1 -- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies +- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies * Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 -- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. +- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. * Mon Mar 18 2024 Sean Zatz - 1.36.0 - Support new mount option: crossaccount, conduct cross account mounts via ip address. Use client AZ-ID to choose mount target. diff --git a/build-deb.sh b/build-deb.sh index 278f2505..76be9aff 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,8 +11,8 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.0.4 -RELEASE=2 +VERSION=2.1.0 +RELEASE=1 DEB_SYSTEM_RELEASE_PATH=/etc/os-release echo 'Cleaning deb build workspace' diff --git a/config.ini b/config.ini index a57dd738..86a460dd 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.0.4 -release=2 +version=2.1.0 +release=1 diff --git a/dist/amazon-efs-utils.control b/dist/amazon-efs-utils.control index 7734f8e6..d3f7b0c0 100644 --- a/dist/amazon-efs-utils.control +++ b/dist/amazon-efs-utils.control @@ -1,6 +1,6 @@ Package: amazon-efs-utils Architecture: all -Version: 2.0.4 +Version: 2.1.0 Section: utils Depends: python3, nfs-common, stunnel4 (>= 4.56), openssl (>= 1.0.2), util-linux Priority: optional diff --git a/dist/efs-utils.conf b/dist/efs-utils.conf index 1b6d849e..5d9482fa 100644 --- a/dist/efs-utils.conf +++ b/dist/efs-utils.conf @@ -74,6 +74,22 @@ stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem dns_name_suffix = sc2s.sgov.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem +[mount.us-isob-west-1] +dns_name_suffix = sc2s.sgov.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.us-isof-east-1] +dns_name_suffix = csp.hci.ic.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.us-isof-south-1] +dns_name_suffix = csp.hci.ic.gov +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.eu-isoe-west-1] +dns_name_suffix = cloud.adc-e.uk +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + [mount-watchdog] enabled = true poll_interval_sec = 1 diff --git a/man/mount.efs.8 b/man/mount.efs.8 index f962fd97..a3a807a7 100644 --- a/man/mount.efs.8 +++ b/man/mount.efs.8 @@ -79,6 +79,9 @@ this option is by default passed and the EFS file system is mounted over TLS\&. \fBnotls\fR Mounts the EFS file system without TLS, applies for Mac distributions only\&. .TP +\fBregion\fR +Mounts the EFS file system from the specified region, overriding any config file value\&. +.TP \fBtlsport=\fR\fIn\fR Configures the proxy process to listen for connections from the NFS client on the specified port\&. This is applicable to both non-tls and tls mounts. By default, the \ diff --git a/requirements.txt b/requirements.txt index 05dffd55..ccc3de50 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,10 @@ -attrs==17.4.0 -botocore==1.17.53 -configparser==3.5.0 -coverage==4.5.4 -enum34==1.1.6 -flake8==3.7.9 -funcsigs==1.0.2 -mccabe==0.6.1 -mock==2.0.0 -pbr==3.1.1 -pluggy==0.13.0 -py==1.11.0 -pycodestyle==2.5.0 -pyflakes==2.1.1 -pytest==4.6.7 -pytest-cov==2.8.1 -pytest-html==1.19.0 -pytest-metadata==1.7.0 -pytest-mock==1.11.2 -six==1.11.0 +botocore == 1.34.140 +configparser == 7.0.0 +coverage == 7.6.0 +flake8 == 7.1.0 +pytest == 8.2.2 +pytest-cov == 5.0.0 +pytest-html == 4.1.1 +pytest-metadata == 3.1.1 +pytest-mock == 3.14.0 +mock == 5.1.0 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 083e3fa1..675a18ca 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.0.4" +VERSION = "2.1.0" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -242,6 +242,7 @@ "noocsp", "notls", "ocsp", + "region", "tls", "tlsport", "verify", @@ -370,14 +371,18 @@ def fatal_error(user_message, log_message=None, exit_code=1): sys.exit(exit_code) -def get_target_region(config): +def get_target_region(config, options): def _fatal_error(message): fatal_error( 'Error retrieving region. Please set the "region" parameter ' - "in the efs-utils configuration file.", + "in the efs-utils configuration file or specify it as a " + "mount option.", message, ) + if "region" in options: + return options.get("region") + try: return config.get(CONFIG_SECTION, "region") except NoOptionError: @@ -1747,7 +1752,7 @@ def bootstrap_proxy( cert_details = None security_credentials = None client_info = get_client_info(config) - region = get_target_region(config) + region = get_target_region(config, options) if tls_enabled(options): cert_details = {} @@ -2662,7 +2667,7 @@ def _validate_replacement_field_count(format_str, expected_ct): if options and "crossaccount" in options: try: az_id = get_az_id_from_instance_metadata(config, options) - region = get_target_region(config) + region = get_target_region(config, options) dns_name = "%s.%s.efs.%s.amazonaws.com" % (az_id, fs_id, region) except RuntimeError: err_msg = "Cannot retrieve AZ-ID from metadata service. This is required for the crossaccount mount option." @@ -2687,7 +2692,7 @@ def _validate_replacement_field_count(format_str, expected_ct): if "{region}" in dns_name_format: expected_replacement_field_ct += 1 - format_args["region"] = get_target_region(config) + format_args["region"] = get_target_region(config, options) if "{dns_name_suffix}" in dns_name_format: expected_replacement_field_ct += 1 @@ -3380,7 +3385,7 @@ def get_botocore_client(config, service, options): botocore_config = botocore.config.Config(use_fips_endpoint=True) session = botocore.session.get_session() - region = get_target_region(config) + region = get_target_region(config, options) if options and options.get("awsprofile"): profile = options.get("awsprofile") diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index b5f1d721..602677c5 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.0.4" +version = "2.1.0" publish = false [dependencies] @@ -25,7 +25,7 @@ s2n-tls-sys = "0.0" serde = {version="1.0.175",features=["derive"]} serde_ini = "0.2.0" thiserror = "1.0.44" -tokio = { version = "1.29.0", features = ["full"] } +tokio = { version = "1.29.0, <1.39", features = ["full"] } tokio-util = "0.7.8" uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics"]} xdr-codec = "0.4.4" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 3247a380..d9f620aa 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.0.4" +VERSION = "2.1.0" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" diff --git a/test/mount_efs_test/test_get_target_instance_identity.py b/test/mount_efs_test/test_get_target_instance_identity.py index 6700fbea..e3672793 100644 --- a/test/mount_efs_test/test_get_target_instance_identity.py +++ b/test/mount_efs_test/test_get_target_instance_identity.py @@ -84,9 +84,9 @@ def get_config(dns_name_format, region=None): return config -def get_target_region_helper(): +def get_target_region_helper(options={}): config = get_config(DEFAULT_DNS_NAME_FORMAT) - return mount_efs.get_target_region(config) + return mount_efs.get_target_region(config, options) def get_target_az_helper(options={}): @@ -166,7 +166,7 @@ def test_get_target_region_from_metadata(mocker): mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse()) config = get_config("{fs_id}.efs.{region}.{dns_name_suffix}", None) - assert TARGET_REGION == mount_efs.get_target_region(config) + assert TARGET_REGION == mount_efs.get_target_region(config, {}) def test_get_target_region_config_metadata_unavailable(mocker, capsys): @@ -174,7 +174,7 @@ def test_get_target_region_config_metadata_unavailable(mocker, capsys): mocker.patch("mount_efs.urlopen", side_effect=URLError("test error")) config = get_config("{fs_id}.efs.{region}.{dns_name_suffix}") with pytest.raises(SystemExit) as ex: - mount_efs.get_target_region(config) + mount_efs.get_target_region(config, {}) assert 0 != ex.value.code out, err = capsys.readouterr() @@ -232,13 +232,13 @@ def test_get_target_region_missing_region(mocker, capsys): def test_get_target_region_from_config_variable(mocker): config = get_config("{az}.{fs_id}.efs.us-east-2.{dns_name_suffix}", TARGET_REGION) - assert TARGET_REGION == mount_efs.get_target_region(config) + assert TARGET_REGION == mount_efs.get_target_region(config, {}) def _test_get_target_region_from_dns_format(mocker, config): mocker.patch("mount_efs.get_aws_ec2_metadata_token", return_value=None) mocker.patch("mount_efs.urlopen", side_effect=URLError("test error")) - assert TARGET_REGION == mount_efs.get_target_region(config) + assert TARGET_REGION == mount_efs.get_target_region(config, {}) def test_get_target_region_from_legacy_dns_name_format(mocker): @@ -277,3 +277,7 @@ def test_get_target_az_not_present_in_options_and_instance_metadata(mocker): def test_get_target_az_from_options(mocker): assert TARGET_AZ == get_target_az_helper(options={"az": TARGET_AZ}) + + +def test_get_target_region_from_options(mocker): + assert TARGET_REGION == get_target_region_helper(options={"region": TARGET_REGION}) From 460841be3635390108e87e01ef00bc2b97301a7d Mon Sep 17 00:00:00 2001 From: JD Davis Date: Mon, 30 Sep 2024 16:03:11 +0000 Subject: [PATCH 21/51] fixing conflicts --- src/mount_efs/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 29889dbd..cdf3e68d 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -1766,7 +1766,7 @@ def bootstrap_proxy( cert_details = None security_credentials = None client_info = get_client_info(config) - region = get_target_region(config) + region = get_target_region(config, options) dns_name_suffix = get_target_domain_suffix(config) if tls_enabled(options): From 564763711c4da6551f21a914c8ede677a3da8988 Mon Sep 17 00:00:00 2001 From: Sean Zatz <141778948+seanzatzdev-amazon@users.noreply.github.com> Date: Tue, 1 Oct 2024 15:02:52 -0400 Subject: [PATCH 22/51] Revert "Adding region-specifc domain suffix for sts endpoints and adding new regions and domain suffixes" --- dist/efs-utils.conf | 2 + src/mount_efs/__init__.py | 37 ++----------------- src/watchdog/__init__.py | 29 +-------------- test/mount_efs_test/test_bootstrap_proxy.py | 4 -- .../test_get_aws_security_credentials.py | 23 ++++++------ ...me_and_fallback_mount_target_ip_address.py | 7 +--- 6 files changed, 20 insertions(+), 82 deletions(-) diff --git a/dist/efs-utils.conf b/dist/efs-utils.conf index 82bf90ec..5d9482fa 100644 --- a/dist/efs-utils.conf +++ b/dist/efs-utils.conf @@ -57,9 +57,11 @@ retry_nfs_mount_command_timeout_sec = 15 [mount.cn-north-1] dns_name_suffix = amazonaws.com.cn + [mount.cn-northwest-1] dns_name_suffix = amazonaws.com.cn + [mount.us-iso-east-1] dns_name_suffix = c2s.ic.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index db89d19a..675a18ca 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -195,7 +195,7 @@ CREDENTIALS_KEYS = ["AccessKeyId", "SecretAccessKey", "Token"] ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" INSTANCE_METADATA_SERVICE_URL = ( "http://169.254.169.254/latest/dynamic/instance-identity/document/" @@ -409,22 +409,6 @@ def _fatal_error(message): _fatal_error(metadata_exception) -def get_target_domain_suffix(config): - def _fatal_error(): - fatal_error( - 'Error retrieving region. Please set the "dns_name_suffix" parameter ' - "in the efs-utils configuration file." - ) - region = get_target_region(config) - config_section = get_config_section(config, region) - - try: - return config.get(config_section, "dns_name_suffix") - except NoOptionError: - pass - - _fatal_error() - def get_target_az(config, options): if "az" in options: @@ -707,7 +691,6 @@ def get_aws_security_credentials( config, use_iam, region, - dns_name_suffix, awsprofile=None, aws_creds_uri=None, jwt_path=None, @@ -752,7 +735,6 @@ def get_aws_security_credentials( role_arn, jwt_path, region, - dns_name_suffix, False, ) if credentials and credentials_source: @@ -767,7 +749,6 @@ def get_aws_security_credentials( os.environ[WEB_IDENTITY_ROLE_ARN_ENV], os.environ[WEB_IDENTITY_TOKEN_FILE_ENV], region, - dns_name_suffix, False, ) if credentials and credentials_source: @@ -841,7 +822,7 @@ def get_aws_security_credentials_from_ecs(config, aws_creds_uri, is_fatal=False) def get_aws_security_credentials_from_webidentity( - config, role_arn, token_file, region, dns_name_suffix, is_fatal=False + config, role_arn, token_file, region, is_fatal=False ): try: with open(token_file, "r") as f: @@ -853,7 +834,7 @@ def get_aws_security_credentials_from_webidentity( else: return None, None - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region,dns_name_suffix) + STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) webidentity_url = ( STS_ENDPOINT_URL + "?" @@ -1772,10 +1753,6 @@ def bootstrap_proxy( security_credentials = None client_info = get_client_info(config) region = get_target_region(config, options) -<<<<<<< HEAD - dns_name_suffix = get_target_domain_suffix(config) -======= ->>>>>>> upstream/master if tls_enabled(options): cert_details = {} @@ -1792,7 +1769,7 @@ def bootstrap_proxy( kwargs = {"awsprofile": get_aws_profile(options, use_iam)} security_credentials, credentials_source = get_aws_security_credentials( - config, use_iam, region, dns_name_suffix, **kwargs + config, use_iam, region, **kwargs ) if credentials_source: @@ -2690,14 +2667,8 @@ def _validate_replacement_field_count(format_str, expected_ct): if options and "crossaccount" in options: try: az_id = get_az_id_from_instance_metadata(config, options) -<<<<<<< HEAD - region = get_target_region(config) - dns_name_suffix = get_target_domain_suffix(config) - dns_name = "%s.%s.efs.%s.%s" % (az_id, fs_id, region, dns_name_suffix) -======= region = get_target_region(config, options) dns_name = "%s.%s.efs.%s.amazonaws.com" % (az_id, fs_id, region) ->>>>>>> upstream/master except RuntimeError: err_msg = "Cannot retrieve AZ-ID from metadata service. This is required for the crossaccount mount option." fatal_error(err_msg) diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 4d14de3c..d9f620aa 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -146,7 +146,7 @@ AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$") ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" INSTANCE_IAM_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" SECURITY_CREDS_ECS_URI_HELP_URL = ( @@ -383,10 +383,8 @@ def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, except Exception as e: logging.error("Error reading token file %s: %s", token_file, e) return None - - dns_name_suffix = get_target_domain_suffix(config, region) - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region, dns_name_suffix) + STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) webidentity_url = ( STS_ENDPOINT_URL + "?" @@ -499,29 +497,6 @@ def credentials_file_helper(file_path, awsprofile): return credentials -def get_target_domain_suffix(config, region): - def _fatal_error(): - fatal_error( - 'Error retrieving DNS domain suffix for region. Please set the "dns_name_suffix" parameter ' - "in the efs-utils configuration file." - ) - - config_section = get_config_section(config, region) - - try: - return config.get(config_section, "dns_name_suffix") - except NoOptionError: - pass - - _fatal_error() - -def get_config_section(config, region): - region_specific_config_section = "%s.%s" % (MOUNT_CONFIG_SECTION, region) - if config.has_section(region_specific_config_section): - config_section = region_specific_config_section - else: - config_section = MOUNT_CONFIG_SECTION - return config_section def is_instance_metadata_url(url): return url.startswith("http://169.254.169.254") diff --git a/test/mount_efs_test/test_bootstrap_proxy.py b/test/mount_efs_test/test_bootstrap_proxy.py index 739c416d..d56f3e53 100644 --- a/test/mount_efs_test/test_bootstrap_proxy.py +++ b/test/mount_efs_test/test_bootstrap_proxy.py @@ -16,7 +16,6 @@ DNS_NAME = "%s.efs.us-east-1.amazonaws.com" % FS_ID MOUNT_POINT = "/mnt" REGION = "us-east-1" -DOMAIN_SUFFIX = "amazonaws.com" DEFAULT_TLS_PORT = 20049 @@ -41,7 +40,6 @@ def setup_mocks(mocker): return_value=(DNS_NAME, None), ) mocker.patch("mount_efs.get_target_region", return_value=REGION) - mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) mocker.patch("mount_efs.write_tunnel_state_file", return_value="~mocktempfile") mocker.patch("mount_efs.create_certificate") mocker.patch("os.rename") @@ -141,7 +139,6 @@ def test_bootstrap_proxy_cert_created_tls_mount(mocker, tmpdir): setup_mocks_without_popen(mocker) mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) mocker.patch("mount_efs.get_target_region", return_value=REGION) - mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) state_file_dir = str(tmpdir) tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) mocker.patch("mount_efs.is_ocsp_enabled", return_value=False) @@ -187,7 +184,6 @@ def test_bootstrap_proxy_cert_not_created_non_tls_mount(mocker, tmpdir): setup_mocks_without_popen(mocker) mocker.patch("mount_efs.get_mount_specific_filename", return_value=DNS_NAME) mocker.patch("mount_efs.get_target_region", return_value=REGION) - mocker.patch("mount_efs.get_target_domain_suffix", return_value=DOMAIN_SUFFIX) state_file_dir = str(tmpdir) tls_dict = mount_efs.tls_paths_dictionary(DNS_NAME + "+", state_file_dir) diff --git a/test/mount_efs_test/test_get_aws_security_credentials.py b/test/mount_efs_test/test_get_aws_security_credentials.py index a0798dc7..e931f32e 100644 --- a/test/mount_efs_test/test_get_aws_security_credentials.py +++ b/test/mount_efs_test/test_get_aws_security_credentials.py @@ -102,7 +102,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit mocker.patch("mount_efs.credentials_file_helper", return_value=file_helper_resp) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", "test_profile" + config, True, "us-east-1", "test_profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -126,7 +126,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit mocker.patch("mount_efs.credentials_file_helper", return_value=file_helper_resp) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", "test_profile" + config, True, "us-east-1", "test_profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -138,7 +138,7 @@ def test_get_aws_security_credentials_config_or_creds_file_found_creds_found_wit def test_get_aws_security_credentials_do_not_use_iam(): config = get_fake_config() credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, False, "us-east-1", "amazonaws.com", "test_profile" + config, False, "us-east-1", "test_profile" ) assert not credentials @@ -165,7 +165,7 @@ def _test_get_aws_security_credentials_get_ecs_from_env_url(mocker): mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse(data=response)) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", None + config, True, "us-east-1", None ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -187,7 +187,7 @@ def test_get_aws_security_credentials_get_ecs_from_option_url(mocker): ) mocker.patch("mount_efs.urlopen", return_value=MockUrlLibResponse(data=response)) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", None, AWSCREDSURI + config, True, "us-east-1", None, AWSCREDSURI ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -279,7 +279,7 @@ def _test_get_aws_security_credentials_get_instance_metadata_role_name( mocker.patch("mount_efs.urlopen", side_effect=side_effects) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", None + config, True, "us-east-1", None ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL @@ -295,7 +295,7 @@ def test_get_aws_security_credentials_no_credentials_found(mocker, capsys): mocker.patch("mount_efs.urlopen") with pytest.raises(SystemExit) as ex: - mount_efs.get_aws_security_credentials(config, True, "us-east-1", "amazonaws.com", None) + mount_efs.get_aws_security_credentials(config, True, "us-east-1", None) assert 0 != ex.value.code @@ -320,7 +320,7 @@ def test_get_aws_security_credentials_credentials_not_found_in_files_and_botocor mount_efs.BOTOCORE_PRESENT = False with pytest.raises(SystemExit) as ex: - mount_efs.get_aws_security_credentials(config, True, "us-east-1", "amazonaws.com", "default") + mount_efs.get_aws_security_credentials(config, True, "us-east-1", "default") assert 0 != ex.value.code @@ -348,7 +348,7 @@ def test_get_aws_security_credentials_botocore_present_get_assumed_profile_crede ) credentials, credentials_source = mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", awsprofile="test-profile" + config, True, "us-east-1", awsprofile="test-profile" ) assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL assert credentials["SecretAccessKey"] == SECRET_ACCESS_KEY_VAL @@ -365,7 +365,7 @@ def test_get_aws_security_credentials_credentials_not_found_in_aws_creds_uri( with pytest.raises(SystemExit) as ex: mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", "default", AWSCREDSURI + config, True, "us-east-1", "default", AWSCREDSURI ) assert 0 != ex.value.code @@ -474,7 +474,6 @@ def test_get_aws_security_credentials_from_webidentity_passed_in_both_params(moc config, True, "us-east-1", - "amazonaws.com", jwt_path=WEB_IDENTITY_TOKEN_FILE, role_arn=WEB_IDENTITY_ROLE_ARN, ) @@ -507,7 +506,7 @@ def test_get_aws_security_credentials_from_webidentity_passed_in_one_param( with pytest.raises(SystemExit) as ex: mount_efs.get_aws_security_credentials( - config, True, "us-east-1", "amazonaws.com", jwt_path=WEB_IDENTITY_TOKEN_FILE + config, True, "us-east-1", jwt_path=WEB_IDENTITY_TOKEN_FILE ) assert 0 != ex.value.code diff --git a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py index 7de5e02b..59a185e7 100644 --- a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py @@ -28,14 +28,9 @@ "cn-north-1": "amazonaws.com.cn", "cn-northwest-1": "amazonaws.com.cn", "us-iso-east-1": "c2s.ic.gov", - "us-iso-west-1": "c2s.ic.gov", "us-isob-east-1": "sc2s.sgov.gov", - "us-isob-west-1": "sc2s.sgov.gov", - "us-isof-south-1": "csp.hci.ic.gov", - "us-isof-east-1": "csp.hci.ic.gov", - "eu-isoe-west-1": "cloud.adc-e.uk" } -SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-iso-west-1", "us-isob-east-1", "us-isob-west-1", "us-isof-south-1", "us-isof-east-1", "eu-isoe-west-1"] +SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-isob-east-1"] DEFAULT_NFS_OPTIONS = {} OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} OPTIONS_WITH_IP = {"mounttargetip": IP_ADDRESS} From 97fc737b794bf62491dd7522dd5f1816bf035b97 Mon Sep 17 00:00:00 2001 From: jrakas-dev Date: Tue, 8 Oct 2024 15:29:18 +0000 Subject: [PATCH 23/51] Enable support for EC2 Mac instances running macOS Sequoia --- README.md | 7 ++++--- src/mount_efs/__init__.py | 6 ++++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index dbe31e2e..5ccabc1c 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,7 @@ The `efs-utils` package has been verified against the following MacOS distributi | MacOS Monterey | `launchd` | | MacOS Ventura | `launchd` | | MacOS Sonoma | `launchd` | +| MacOS Sequoia | `launchd` | ## README contents - [Prerequisites](#prerequisites) @@ -202,11 +203,11 @@ Make sure that you have a linker installed on your system. For example, on Amazo yum install gcc ``` -### On MacOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution +### On macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution -For EC2 Mac instances running macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the +For EC2 Mac instances running macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the [homebrew-aws](https://github.com/aws/homebrew-aws) respository. **Note that this will ONLY work on EC2 instances -running macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, not local Mac computers.** +running macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, not local Mac computers.** ```bash brew install amazon-efs-utils ``` diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 675a18ca..61f09b0f 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -285,6 +285,7 @@ MACOS_MONTEREY_RELEASE = "macOS-12" MACOS_VENTURA_RELEASE = "macOS-13" MACOS_SONOMA_RELEASE = "macOS-14" +MACOS_SEQUOIA_RELEASE = "macOS-15" # Multiplier for max read ahead buffer size @@ -299,11 +300,12 @@ MACOS_MONTEREY_RELEASE, MACOS_VENTURA_RELEASE, MACOS_SONOMA_RELEASE, + MACOS_SEQUOIA_RELEASE, ] MAC_OS_PLATFORM_LIST = ["darwin"] -# MacOS Versions : Sonoma - 23.*, Ventura - 22.*, Monterey - 21.*, Big Sur - 20.*, Catalina - 19.*, Mojave - 18.*. Catalina and Mojave are not supported for now -MAC_OS_SUPPORTED_VERSION_LIST = ["20", "21", "22", "23"] +# MacOS Versions : Sequoia - 24.*, Sonoma - 23.*, Ventura - 22.*, Monterey - 21.*, Big Sur - 20.*, Catalina - 19.*, Mojave - 18.*. Catalina and Mojave are not supported for now +MAC_OS_SUPPORTED_VERSION_LIST = ["20", "21", "22", "23", "24"] AWS_FIPS_ENDPOINT_CONFIG_ENV = "AWS_USE_FIPS_ENDPOINT" ECS_URI_ENV = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" From 15f6f88521cfeca3f008740645604e4d29765094 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Wed, 20 Nov 2024 14:37:02 +0000 Subject: [PATCH 24/51] efs-utils v2.2.0-1 release --- .circleci/config.yml | 5 +- README.md | 2 +- amazon-efs-utils.spec | 14 ++-- build-deb.sh | 4 +- config.ini | 2 +- src/mount_efs/__init__.py | 64 ++++++++++++------- src/proxy/Cargo.toml | 1 + src/watchdog/__init__.py | 45 +++++++++++-- test/global_test/test_global_version_match.py | 1 - ...me_and_fallback_mount_target_ip_address.py | 15 +++-- ...st_get_fallback_mount_target_ip_address.py | 7 +- 11 files changed, 111 insertions(+), 49 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e7179b56..9b386754 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -50,7 +50,7 @@ commands: name: Install dependencies command: | DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata - apt-get -y install binutils git rustc cargo pkg-config libssl-dev + apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext - run: name: Add local build repo as safe git directory command: | @@ -280,9 +280,6 @@ workflows: - build-rpm-package-rustup: name: amazon-linux-2 image: amazonlinux:2 - - build-rpm-package: - name: fedora-latest - image: fedora:latest - build-rpm-package-rustup: name: fedora29 image: fedora:29 diff --git a/README.md b/README.md index 5ccabc1c..cd2d65cc 100644 --- a/README.md +++ b/README.md @@ -160,7 +160,7 @@ sudo zypper refresh ```bash $ sudo apt-get update -$ sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev +$ sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev gettext $ git clone https://github.com/aws/efs-utils $ cd efs-utils $ ./build-deb.sh diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index bbd16d3c..ccf62344 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.1.0 +Version : 2.2.0 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -192,6 +192,10 @@ fi %clean %changelog +* Wed Nov 13 2024 Anthony Tse - 2.2.0 +- Use region-specific domain suffixes for dns endpoints where missing +- Merge PR #211 - Amend Debian control to use binary architecture + * Wed Sep 18 2024 Julie Rakas - 2.1.0 - Add mount option for specifying region - Add new ISO regions to config file @@ -201,17 +205,17 @@ fi * Tue Jun 18 2024 Arnav Gupta - 2.0.3 - Upgrade py version -- Replace deprecated usage of datetime +- Replace deprecated usage of datetime * Mon May 20 2024 Anthony Tse - 2.0.2 - Check for efs-proxy PIDs when cleaning tunnel state files - Add PID to log entries * Tue Apr 23 2024 Ryan Stankiewicz - 2.0.1 -- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies +- Disable Nagle's algorithm for efs-proxy TLS mounts to improve latencies * Mon Apr 08 2024 Ryan Stankiewicz - 2.0.0 -- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. +- Replace stunnel, which provides TLS encryptions for mounts, with efs-proxy, a component built in-house at AWS. Efs-proxy lays the foundation for upcoming feature launches at EFS. * Mon Mar 18 2024 Sean Zatz - 1.36.0 - Support new mount option: crossaccount, conduct cross account mounts via ip address. Use client AZ-ID to choose mount target. @@ -343,4 +347,4 @@ fi * Tue Mar 03 2020 Yuan Gao - 1.23.2 - Support new option: netns, enable file system to mount in given network namespace - Support new option: awscredsuri, enable sourcing iam authorization from aws credentials relative uri -- List openssl and util-linux as package dependency for IAM/AP authorization and command nsenter to mount file system to given network namespace +- List openssl and util-linux as package dependency for IAM/AP authorization and command nsenter to mount file system to given network namespace \ No newline at end of file diff --git a/build-deb.sh b/build-deb.sh index a349d18e..720def25 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,9 +11,9 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.1.0 +VERSION=2.2.0 RELEASE=1 -ARCH=$(dpkg-architecture -qDEB_BUILD_ARCH) +ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release export VERSION RELEASE ARCH diff --git a/config.ini b/config.ini index 86a460dd..1c9f2ad0 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.1.0 +version=2.2.0 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 61f09b0f..394374b1 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.1.0" +VERSION = "2.2.0" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -188,14 +188,14 @@ FS_ID_RE = re.compile("^(?Pfs-[0-9a-f]+)$") EFS_FQDN_RE = re.compile( - r"^((?P[a-z0-9-]+)\.)?(?Pfs-[0-9a-f]+)\.efs\." + r"^((?P[a-z0-9-]+)\.)?(?Pfs-[0-9a-f]+)\.(?:[a-z-]+\.)+" r"(?P[a-z0-9-]+)\.(?P[a-z0-9.]+)$" ) AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$") CREDENTIALS_KEYS = ["AccessKeyId", "SecretAccessKey", "Token"] ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" INSTANCE_METADATA_SERVICE_URL = ( "http://169.254.169.254/latest/dynamic/instance-identity/document/" @@ -836,9 +836,9 @@ def get_aws_security_credentials_from_webidentity( else: return None, None - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) + sts_endpoint_url = get_sts_endpoint_url(config, region) webidentity_url = ( - STS_ENDPOINT_URL + sts_endpoint_url + "?" + urlencode( { @@ -852,11 +852,11 @@ def get_aws_security_credentials_from_webidentity( ) unsuccessful_resp = ( - "Unsuccessful retrieval of AWS security credentials at %s." % STS_ENDPOINT_URL + "Unsuccessful retrieval of AWS security credentials at %s." % sts_endpoint_url ) url_error_msg = ( "Unable to reach %s to retrieve AWS security credentials. See %s for more info." - % (STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL) + % (sts_endpoint_url, SECURITY_CREDS_WEBIDENTITY_HELP_URL) ) resp = url_request_helper( config, @@ -886,6 +886,30 @@ def get_aws_security_credentials_from_webidentity( return None, None +def get_sts_endpoint_url(config, region): + dns_name_suffix = get_dns_name_suffix(config, region) + return STS_ENDPOINT_URL_FORMAT.format(region, dns_name_suffix) + + +def get_dns_name_suffix(config, region): + return get_mount_config(config, region, "dns_name_suffix") + + +def get_mount_config(config, region, config_name): + try: + config_section = get_config_section(config, region) + return config.get(config_section, config_name) + except NoOptionError: + pass + + try: + return config.get(CONFIG_SECTION, config_name) + except NoOptionError: + fatal_error( + "Error retrieving config. Please set the {} configuration in efs-utils.conf".format(config_name) + ) + + def get_aws_security_credentials_from_instance_metadata(config, iam_role_name): security_creds_lookup_url = INSTANCE_IAM_URL + iam_role_name unsuccessful_resp = ( @@ -2670,7 +2694,8 @@ def _validate_replacement_field_count(format_str, expected_ct): try: az_id = get_az_id_from_instance_metadata(config, options) region = get_target_region(config, options) - dns_name = "%s.%s.efs.%s.amazonaws.com" % (az_id, fs_id, region) + dns_name_suffix = get_dns_name_suffix(config, region) + dns_name = "%s.%s.efs.%s.%s" % (az_id, fs_id, region, dns_name_suffix) except RuntimeError: err_msg = "Cannot retrieve AZ-ID from metadata service. This is required for the crossaccount mount option." fatal_error(err_msg) @@ -2692,27 +2717,18 @@ def _validate_replacement_field_count(format_str, expected_ct): else: dns_name_format = dns_name_format.replace("{az}.", "") + region = None if "{region}" in dns_name_format: + region = get_target_region(config, options) expected_replacement_field_ct += 1 - format_args["region"] = get_target_region(config, options) + format_args["region"] = region if "{dns_name_suffix}" in dns_name_format: expected_replacement_field_ct += 1 - config_section = CONFIG_SECTION - region = format_args.get("region") - - if region: - config_section = get_config_section(config, region) - - format_args["dns_name_suffix"] = config.get( - config_section, "dns_name_suffix" - ) - - logging.debug( - "Using dns_name_suffix %s in config section [%s]", - format_args.get("dns_name_suffix"), - config_section, - ) + region = region or get_target_region(config, options) + dns_name_suffix = get_dns_name_suffix(config, region) + format_args["dns_name_suffix"] = dns_name_suffix + logging.debug("Using dns_name_suffix %s", dns_name_suffix) _validate_replacement_field_count( dns_name_format, expected_replacement_field_ct diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 602677c5..3f35f0bb 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -5,6 +5,7 @@ build = "build.rs" # The version of efs-proxy is tied to efs-utils. version = "2.1.0" publish = false +license = "MIT" [dependencies] anyhow = "1.0.72" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index d9f620aa..f8b4e3c1 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.1.0" +VERSION = "2.2.0" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -146,7 +146,7 @@ AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$") ECS_TASK_METADATA_API = "http://169.254.170.2" -STS_ENDPOINT_URL_FORMAT = "https://sts.{}.amazonaws.com/" +STS_ENDPOINT_URL_FORMAT = "https://sts.{}.{}/" INSTANCE_IAM_URL = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" INSTANCE_METADATA_TOKEN_URL = "http://169.254.169.254/latest/api/token" SECURITY_CREDS_ECS_URI_HELP_URL = ( @@ -384,9 +384,9 @@ def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, logging.error("Error reading token file %s: %s", token_file, e) return None - STS_ENDPOINT_URL = STS_ENDPOINT_URL_FORMAT.format(region) + sts_endpoint_url = get_sts_endpoint_url(config, region) webidentity_url = ( - STS_ENDPOINT_URL + sts_endpoint_url + "?" + urlencode( { @@ -400,11 +400,11 @@ def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, ) unsuccessful_resp = ( - "Unsuccessful retrieval of AWS security credentials at %s." % STS_ENDPOINT_URL + "Unsuccessful retrieval of AWS security credentials at %s." % sts_endpoint_url ) url_error_msg = ( "Unable to reach %s to retrieve AWS security credentials. See %s for more info." - % (STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL) + % (sts_endpoint_url, SECURITY_CREDS_WEBIDENTITY_HELP_URL) ) resp = url_request_helper( config, @@ -430,6 +430,39 @@ def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, return None +def get_sts_endpoint_url(config, region): + dns_name_suffix = get_dns_name_suffix(config, region) + return STS_ENDPOINT_URL_FORMAT.format(region, dns_name_suffix) + + +def get_dns_name_suffix(config, region): + return get_mount_config(config, region, "dns_name_suffix") + + +def get_mount_config(config, region, config_name): + try: + config_section = get_mount_config_section(config, region) + return config.get(config_section, config_name) + except NoOptionError: + pass + + try: + return config.get(MOUNT_CONFIG_SECTION, config_name) + except NoOptionError: + fatal_error( + "Error retrieving config. Please set the {} configuration in efs-utils.conf".format(config_name) + ) + + +def get_mount_config_section(config, region): + region_specific_config_section = "%s.%s" % (MOUNT_CONFIG_SECTION, region) + if config.has_section(region_specific_config_section): + config_section = region_specific_config_section + else: + config_section = MOUNT_CONFIG_SECTION + return config_section + + def get_aws_security_credentials_from_instance_metadata(config): # through IAM role name security credentials lookup uri (after lookup for IAM role name attached to instance) dict_keys = ["AccessKeyId", "SecretAccessKey", "Token"] diff --git a/test/global_test/test_global_version_match.py b/test/global_test/test_global_version_match.py index f1c34639..4cb53d6b 100644 --- a/test/global_test/test_global_version_match.py +++ b/test/global_test/test_global_version_match.py @@ -20,7 +20,6 @@ "build-deb.sh", "src/watchdog/__init__.py", "src/mount_efs/__init__.py", - "dist/amazon-efs-utils.control", "build-deb.sh", "amazon-efs-utils.spec", ] diff --git a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py index 59a185e7..c49dc6dd 100644 --- a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py @@ -28,9 +28,14 @@ "cn-north-1": "amazonaws.com.cn", "cn-northwest-1": "amazonaws.com.cn", "us-iso-east-1": "c2s.ic.gov", + "us-iso-west-1": "c2s.ic.gov", "us-isob-east-1": "sc2s.sgov.gov", + "us-isob-west-1": "sc2s.sgov.gov", + "us-isof-south-1": "csp.hci.ic.gov", + "us-isof-east-1": "csp.hci.ic.gov", + "eu-isoe-west-1": "cloud.adc-e.uk", } -SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-isob-east-1"] +SPECIAL_REGIONS = SPECIAL_REGION_DNS_DICT.keys() DEFAULT_NFS_OPTIONS = {} OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} OPTIONS_WITH_IP = {"mounttargetip": IP_ADDRESS} @@ -158,7 +163,8 @@ def test_get_dns_name_region_hardcoded(mocker): config, FS_ID, DEFAULT_NFS_OPTIONS ) - utils.assert_not_called(get_target_region_mock) + # get_target_region will be called 1 time to get dns_name_suffix + utils.assert_called_n_times(get_target_region_mock, 1) assert "%s.efs.%s.amazonaws.com" % (FS_ID, DEFAULT_REGION) == dns_name assert None == ip_address @@ -271,14 +277,15 @@ def test_get_dns_name_region_in_suffix(mocker): config, FS_ID, DEFAULT_NFS_OPTIONS ) - utils.assert_not_called(get_target_region_mock) - assert ( "%s.efs.%s.%s" % (FS_ID, special_region, special_dns_name_suffix) == dns_name ) assert None == ip_address + # get_target_region will be called 1 time for each region to get dns_name_suffix + utils.assert_called_n_times(get_target_region_mock, len(SPECIAL_REGIONS)) + def test_dns_name_can_be_resolved_dns_resolve_failure(mocker): dns_mock = mocker.patch("socket.gethostbyname", side_effect=socket.gaierror) diff --git a/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py index 5ced0793..1b7bd8a7 100644 --- a/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py @@ -28,9 +28,14 @@ "cn-north-1": "amazonaws.com.cn", "cn-northwest-1": "amazonaws.com.cn", "us-iso-east-1": "c2s.ic.gov", + "us-iso-west-1": "c2s.ic.gov", "us-isob-east-1": "sc2s.sgov.gov", + "us-isob-west-1": "sc2s.sgov.gov", + "us-isof-south-1": "csp.hci.ic.gov", + "us-isof-east-1": "csp.hci.ic.gov", + "eu-isoe-west-1": "cloud.adc-e.uk", } -SPECIAL_REGIONS = ["cn-north-1", "cn-northwest-1", "us-iso-east-1", "us-isob-east-1"] +SPECIAL_REGIONS = SPECIAL_REGION_DNS_DICT.keys() DEFAULT_NFS_OPTIONS = {} OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} OPTIONS_WITH_CROSSACCOUNT = {"crossaccount": None} From b160f0286ed28d4fd3adf9436e5a43ab7933885c Mon Sep 17 00:00:00 2001 From: Mihir Thakur Date: Tue, 4 Feb 2025 18:52:18 +0000 Subject: [PATCH 25/51] Add installation instruction for make rpm edge case --- README.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/README.md b/README.md index cd2d65cc..68b55fb5 100644 --- a/README.md +++ b/README.md @@ -203,6 +203,18 @@ Make sure that you have a linker installed on your system. For example, on Amazo yum install gcc ``` +**Installation Issue - Failed Build Dependencies** + +If rust dependencies was installed using rustup and the package manager does not have a rust and/or cargo package installed, you may see an error like this. + +``` +error: Failed build dependencies: + cargo is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 + rust is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 +``` + +In this case, the 'make rpm' command in the installation script above should be replaced by 'make rpm-without-system-rust' to remove the rpmbuild dependency check. + ### On macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution For EC2 Mac instances running macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the From ca29621bc3d3309ab872f2d8561cbce792713a8a Mon Sep 17 00:00:00 2001 From: jrakas-dev Date: Mon, 10 Feb 2025 15:06:34 +0000 Subject: [PATCH 26/51] Upgrade log4rs version to mitigate security vulnerabilities --- src/proxy/Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 3f35f0bb..89f328c7 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -16,7 +16,7 @@ clap = { version = "=4.0.0", features = ["derive"] } fern = "0.6" futures = "0.3" log = "0.4" -log4rs = { version = "0", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"]} +log4rs = { version = "1.2.0", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"]} nix = { version = "0.26.2", features = ["signal"]} onc-rpc = "0.2.3" rand = "0.8.5" @@ -37,4 +37,4 @@ tokio = { version = "1.29.0", features = ["test-util"] } tempfile = "3.10.1" [build-dependencies] -xdrgen = "0.4.4" \ No newline at end of file +xdrgen = "0.4.4" From 7ef52bed23b213363bf1b021e5d3d1600117f83f Mon Sep 17 00:00:00 2001 From: mskanth972 Date: Tue, 4 Mar 2025 10:49:04 -0500 Subject: [PATCH 27/51] Remove RHEL 7 support from efs-utils README as its no longer supported. --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 68b55fb5..36e0fafc 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ The `efs-utils` package has been verified against the following Linux distributi | Amazon Linux 2 | `rpm` | `systemd` | | Amazon Linux 2023 | `rpm` | `systemd` | | CentOS 8 | `rpm` | `systemd` | -| RHEL 7 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | | Fedora 29 | `rpm` | `systemd` | From d4684043afe80a97a2a7b2dd2fe3a41610ad482d Mon Sep 17 00:00:00 2001 From: mskanth972 Date: Tue, 4 Mar 2025 10:52:03 -0500 Subject: [PATCH 28/51] Revert "Remove RHEL 7 support from efs-utils README as its no longer supported." This reverts commit 7ef52bed23b213363bf1b021e5d3d1600117f83f. --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 36e0fafc..68b55fb5 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,7 @@ The `efs-utils` package has been verified against the following Linux distributi | Amazon Linux 2 | `rpm` | `systemd` | | Amazon Linux 2023 | `rpm` | `systemd` | | CentOS 8 | `rpm` | `systemd` | +| RHEL 7 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | | Fedora 29 | `rpm` | `systemd` | From e28dafd06aa2fcfb1e959836ee8518695a4130ea Mon Sep 17 00:00:00 2001 From: mskanth972 Date: Tue, 4 Mar 2025 10:53:53 -0500 Subject: [PATCH 29/51] Remove RHEL 7 support from efs-utils README as its no longer supported. --- README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/README.md b/README.md index 68b55fb5..36e0fafc 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,6 @@ The `efs-utils` package has been verified against the following Linux distributi | Amazon Linux 2 | `rpm` | `systemd` | | Amazon Linux 2023 | `rpm` | `systemd` | | CentOS 8 | `rpm` | `systemd` | -| RHEL 7 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | | Fedora 29 | `rpm` | `systemd` | From c0b60d9c9698c60fb8aef4732681364a68bd3dd3 Mon Sep 17 00:00:00 2001 From: Daniel Luthcke Date: Tue, 11 Mar 2025 17:22:32 +0000 Subject: [PATCH 30/51] efs-utils v2.2.1-1 release --- .circleci/config.yml | 18 +++++++++--------- amazon-efs-utils.spec | 8 ++++++-- build-deb.sh | 2 +- config.ini | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.toml | 2 +- src/watchdog/__init__.py | 2 +- 7 files changed, 20 insertions(+), 16 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9b386754..13a80447 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -233,20 +233,20 @@ workflows: workflow: jobs: - test: - name: python3_12 - image: python:3.12.4 + name: python3_8 + image: python:3.8.13 - test: - name: python3_11 - image: python:3.11.9 + name: python3_9 + image: python:3.9.13 - test: name: python3_10 image: python:3.10.13 - test: - name: python3_9 - image: python:3.9.13 + name: python3_11 + image: python:3.11.9 - test: - name: python3_8 - image: python:3.8.13 + name: python3_12 + image: python:3.12.4 - build-deb-package: name: ubuntu-latest image: ubuntu:latest @@ -318,4 +318,4 @@ workflows: image: opensuse/leap:15.4 - build-suse-rpm-package: name: opensuse-leap-latest - image: opensuse/leap:latest + image: opensuse/leap:latest \ No newline at end of file diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index ccf62344..c0346390 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.2.0 +Version : 2.2.1 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -192,6 +192,10 @@ fi %clean %changelog +* Thu Mar 06 2025 Daniel Luthcke - 2.2.1 +- Readme Updates +- Update log4rs to mitigate CVE-2020-35881 + * Wed Nov 13 2024 Anthony Tse - 2.2.0 - Use region-specific domain suffixes for dns endpoints where missing - Merge PR #211 - Amend Debian control to use binary architecture @@ -347,4 +351,4 @@ fi * Tue Mar 03 2020 Yuan Gao - 1.23.2 - Support new option: netns, enable file system to mount in given network namespace - Support new option: awscredsuri, enable sourcing iam authorization from aws credentials relative uri -- List openssl and util-linux as package dependency for IAM/AP authorization and command nsenter to mount file system to given network namespace \ No newline at end of file +- List openssl and util-linux as package dependency for IAM/AP authorization and command nsenter to mount file system to given network namespace diff --git a/build-deb.sh b/build-deb.sh index 720def25..7a77a6e5 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.2.0 +VERSION=2.2.1 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 1c9f2ad0..900ba7ad 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.2.0 +version=2.2.1 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 394374b1..24ab61e2 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -85,7 +85,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.2.0" +VERSION = "2.2.1" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 89f328c7..e03248d5 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.1.0" +version = "2.2.1" publish = false license = "MIT" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index f8b4e3c1..465a396c 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.2.0" +VERSION = "2.2.1" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" From 14e73becda1bf3835dd17f0a93de6f3ae820a467 Mon Sep 17 00:00:00 2001 From: Tillman Jex Date: Wed, 26 Mar 2025 10:07:30 +0100 Subject: [PATCH 31/51] remove $ from all codeblocks Having `$` at the beginning of lines in bash codeblocks makes copy and pasting a pain. We regularly spin up new ec2 instances and need to manually clone and build efs-utils from this repo. If the `$` were included to safeguard against users running pasted code without thinking, then there were many code blocks (also with sudo) where the lines did not begin with `$`. So I figured it was no a convention of the repository. --- README.md | 80 +++++++++++++++++++++++++++---------------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/README.md b/README.md index 36e0fafc..ba1dfa2a 100644 --- a/README.md +++ b/README.md @@ -128,22 +128,22 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh If the distribution is not OpenSUSE or SLES ```bash -$ sudo yum -y install git rpm-build make rust cargo openssl-devel -$ git clone https://github.com/aws/efs-utils -$ cd efs-utils -$ make rpm -$ sudo yum -y install build/amazon-efs-utils*rpm +sudo yum -y install git rpm-build make rust cargo openssl-devel +git clone https://github.com/aws/efs-utils +cd efs-utils +make rpm +sudo yum -y install build/amazon-efs-utils*rpm ``` Otherwise ```bash -$ sudo zypper refresh -$ sudo zypper install -y git rpm-build make rust cargo openssl-devel -$ git clone https://github.com/aws/efs-utils -$ cd efs-utils -$ make rpm -$ sudo zypper --no-gpg-checks install -y build/amazon-efs-utils*rpm +sudo zypper refresh +sudo zypper install -y git rpm-build make rust cargo openssl-devel +git clone https://github.com/aws/efs-utils +cd efs-utils +make rpm +sudo zypper --no-gpg-checks install -y build/amazon-efs-utils*rpm ``` On OpenSUSE, if you see error like `File './suse/noarch/bash-completion-2.11-2.1.noarch.rpm' not found on medium 'http://download.opensuse.org/tumbleweed/repo/oss/'` @@ -158,12 +158,12 @@ sudo zypper refresh - To build and install a Debian package: ```bash -$ sudo apt-get update -$ sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev gettext -$ git clone https://github.com/aws/efs-utils -$ cd efs-utils -$ ./build-deb.sh -$ sudo apt-get -y install ./build/amazon-efs-utils*deb +sudo apt-get update +sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev gettext +git clone https://github.com/aws/efs-utils +cd efs-utils +./build-deb.sh +sudo apt-get -y install ./build/amazon-efs-utils*deb ``` If your Debian distribution doesn't provide a rust or cargo package, or your distribution provides versions @@ -235,15 +235,15 @@ brew info amazon-efs-utils - [Set up a virtualenv](http://libzx.so/main/learning/2016/03/13/best-practice-for-virtualenv-and-git-repos.html) for efs-utils ```bash -$ virtualenv ~/.envs/efs-utils -$ source ~/.envs/efs-utils/bin/activate -$ pip install -r requirements.txt +virtualenv ~/.envs/efs-utils +source ~/.envs/efs-utils/bin/activate +pip install -r requirements.txt ``` - Run tests ```bash -$ make test +make test ``` ## Usage @@ -257,57 +257,57 @@ This proxy is responsible for TLS encryption, and for providing improved through To mount with the recommended default options, simply run: ```bash -$ sudo mount -t efs file-system-id efs-mount-point/ +sudo mount -t efs file-system-id efs-mount-point/ ``` To mount file system to a specific mount target of the file system, run: ```bash -$ sudo mount -t efs -o mounttargetip=mount-target-ip-address file-system-id efs-mount-point/ +sudo mount -t efs -o mounttargetip=mount-target-ip-address file-system-id efs-mount-point/ ``` To mount file system within a given network namespace, run: ```bash -$ sudo mount -t efs -o netns=netns-path file-system-id efs-mount-point/ +sudo mount -t efs -o netns=netns-path file-system-id efs-mount-point/ ``` To mount file system to the mount target in a specific availability zone (e.g. us-east-1a), run: ```bash -$ sudo mount -t efs -o az=az-name file-system-id efs-mount-point/ +sudo mount -t efs -o az=az-name file-system-id efs-mount-point/ ``` To mount file system to the mount target in a specific region (e.g. us-east-1), run: ```bash -$ sudo mount -t efs -o region=region-name file-system-id efs-mount-point/ +sudo mount -t efs -o region=region-name file-system-id efs-mount-point/ ``` **Note: The [prequisites in the crossaccount section below](#crossaccount-option-prerequisites) must be completed before using the crossaccount option.** To mount the filesystem mount target in the same physical availability zone ID (e.g. use1-az1) as the client instance over cross-AWS-account mounts, run: ``` -$ sudo mount -t efs -o crossaccount file-system-id efs-mount-point/ +sudo mount -t efs -o crossaccount file-system-id efs-mount-point/ ``` To mount over TLS, simply add the `tls` option: ```bash -$ sudo mount -t efs -o tls file-system-id efs-mount-point/ +sudo mount -t efs -o tls file-system-id efs-mount-point/ ``` To authenticate with EFS using the system’s IAM identity, add the `iam` option. This option requires the `tls` option. ```bash -$ sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ +sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ ``` To mount using an access point, use the `accesspoint=` option. This option requires the `tls` option. The access point must be in the "available" state before it can be used to mount EFS. ```bash -$ sudo mount -t efs -o tls,accesspoint=access-point-id file-system-id efs-mount-point/ +sudo mount -t efs -o tls,accesspoint=access-point-id file-system-id efs-mount-point/ ``` To mount your file system automatically with any of the options above, you can add entries to `/efs/fstab` like: @@ -342,7 +342,7 @@ Given a client instance in Account A/VPC A and an EFS instance in Account B/VPC Once the above steps have been completed, to mount the filesystem mount target in the same physical availability zone ID (e.g. use1-az1) as the client instance over cross-AWS-account mounts, run: ``` -$ sudo mount -t efs -o crossaccount file-system-id efs-mount-point/ +sudo mount -t efs -o crossaccount file-system-id efs-mount-point/ ``` @@ -351,17 +351,17 @@ $ sudo mount -t efs -o crossaccount file-system-id efs-mount-point/ For EC2 instances using Mac distribution, the recommended default options will perform a tls mount: ```bash -$ sudo mount -t efs file-system-id efs-mount-point/ +sudo mount -t efs file-system-id efs-mount-point/ ``` or ```bash -$ sudo mount -t efs -o tls file-system-id efs-mount-point/ +sudo mount -t efs -o tls file-system-id efs-mount-point/ ``` To mount without TLS, simply add the `notls` option: ```bash -$ sudo mount -t efs -o notls file-system-id efs-mount-point/ +sudo mount -t efs -o notls file-system-id efs-mount-point/ ``` @@ -541,13 +541,13 @@ sed -i "s/optimize_readahead = true/optimize_readahead = false/" /etc/amazon/efs You can mount file system with a given rsize, run: ```bash -$ sudo mount -t efs -o rsize=rsize-value-in-bytes file-system-id efs-mount-point/ +sudo mount -t efs -o rsize=rsize-value-in-bytes file-system-id efs-mount-point/ ``` You can also manually chose a value of read_ahead_kb to optimize read throughput on Linux 5.4+ after mount. ```bash -$ sudo bash -c "echo read-ahead-value-in-kb > /sys/class/bdi/0:$(stat -c '%d' efs-mount-point)/read_ahead_kb" +sudo bash -c "echo read-ahead-value-in-kb > /sys/class/bdi/0:$(stat -c '%d' efs-mount-point)/read_ahead_kb" ``` ## Using botocore to retrieve mount target ip address when dns name cannot be resolved @@ -585,7 +585,7 @@ To authenticate with EFS using the system’s IAM identity of an awsprofile, add `awsprofile` option. These options require the `tls` option. ```bash -$ sudo mount -t efs -o tls,iam,awsprofile=test-profile file-system-id efs-mount-point/ +sudo mount -t efs -o tls,iam,awsprofile=test-profile file-system-id efs-mount-point/ ``` To configure the named profile, see the [Named Profiles doc](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html) @@ -636,13 +636,13 @@ You can use [web identity to assume a role](https://docs.aws.amazon.com/STS/late 1) By setting environment variable the path to the file containing the JWT token in `AWS_WEB_IDENTITY_TOKEN_FILE` and by setting `ROLE_ARN` environment variable. The command below shows an example of to leverage it. ```bash -$ sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ +sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ ``` 2) By passing the JWT token file path and the role arn as parameters to the mount command. The command below shows an example of to leverage it. ```bash -$ sudo mount -t efs -o tls,iam,rolearn="ROLE_ARN",jwtpath="PATH/JWT_TOKEN_FILE" file-system-id efs-mount-point/ +sudo mount -t efs -o tls,iam,rolearn="ROLE_ARN",jwtpath="PATH/JWT_TOKEN_FILE" file-system-id efs-mount-point/ ``` ## Enabling FIPS Mode @@ -656,7 +656,7 @@ Note: FIPS mode requires that the installed version of OpenSSL is compiled with To verify that the installed version is compiled with FIPS, look for `OpenSSL X.X.Xx-fips` in the `stunnel -version` command output e.g. ```bash -$ stunnel -version +stunnel -version stunnel 4.56 on x86_64-koji-linux-gnu platform Compiled/running with OpenSSL 1.0.2k-fips 26 Jan 2017 Threading:PTHREAD Sockets:POLL,IPv6 SSL:ENGINE,OCSP,FIPS Auth:LIBWRAP From c65af2f6ec014853e21045745a598758c93e3903 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Tue, 29 Apr 2025 14:30:12 +0000 Subject: [PATCH 32/51] efs-utils v2.3.0-1 release --- README.md | 4 + amazon-efs-utils.spec | 6 +- build-deb.sh | 2 +- config.ini | 2 +- dist/efs-utils.conf | 12 +- src/mount_efs/__init__.py | 107 +- src/proxy/Cargo.toml | 2 +- src/proxy/src/config_parser.rs | 7 +- src/proxy/src/connection_task.rs | 159 ++ src/proxy/src/connections.rs | 150 +- src/proxy/src/controller.rs | 1327 +---------------- src/proxy/src/efs_rpc.rs | 131 +- src/proxy/src/lib.rs | 34 +- src/proxy/src/main.rs | 18 +- src/proxy/src/proxy.rs | 448 +----- src/proxy/src/proxy_task.rs | 310 ++++ src/proxy/src/rpc.rs | 50 +- src/proxy/src/status_reporter.rs | 2 +- src/proxy/src/test_utils.rs | 174 +++ src/proxy/src/tls.rs | 42 +- src/watchdog/__init__.py | 47 +- .../test_get_aws_security_credentials.py | 66 + ...me_and_fallback_mount_target_ip_address.py | 45 +- ...st_get_fallback_mount_target_ip_address.py | 24 + test/mount_efs_test/test_helper_function.py | 25 +- test/mount_efs_test/test_match_device.py | 8 +- .../test_write_stunnel_config_file.py | 38 + .../test_get_aws_security_credentials.py | 30 + 28 files changed, 1131 insertions(+), 2139 deletions(-) create mode 100644 src/proxy/src/connection_task.rs create mode 100644 src/proxy/src/proxy_task.rs create mode 100644 src/proxy/src/test_utils.rs diff --git a/README.md b/README.md index ba1dfa2a..38964e7e 100644 --- a/README.md +++ b/README.md @@ -657,6 +657,10 @@ Note: FIPS mode requires that the installed version of OpenSSL is compiled with To verify that the installed version is compiled with FIPS, look for `OpenSSL X.X.Xx-fips` in the `stunnel -version` command output e.g. ```bash stunnel -version +``` + +Example output for FIPS compiled stunnel +``` stunnel 4.56 on x86_64-koji-linux-gnu platform Compiled/running with OpenSSL 1.0.2k-fips 26 Jan 2017 Threading:PTHREAD Sockets:POLL,IPv6 SSL:ENGINE,OCSP,FIPS Auth:LIBWRAP diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index c0346390..a0983b79 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.2.1 +Version : 2.3.0 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -192,6 +192,10 @@ fi %clean %changelog +* Thu Apr 17 2025 Anthony Tse - 2.3.0 +- Add support for pod-identity credentials in the credentials chain +- Enable mounting with IPv6 when using with the 'stunnel' mount option + * Thu Mar 06 2025 Daniel Luthcke - 2.2.1 - Readme Updates - Update log4rs to mitigate CVE-2020-35881 diff --git a/build-deb.sh b/build-deb.sh index 7a77a6e5..1d523c1e 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.2.1 +VERSION=2.3.0 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 900ba7ad..6dde4b66 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.2.1 +version=2.3.0 release=1 diff --git a/dist/efs-utils.conf b/dist/efs-utils.conf index 5d9482fa..62382885 100644 --- a/dist/efs-utils.conf +++ b/dist/efs-utils.conf @@ -62,6 +62,14 @@ dns_name_suffix = amazonaws.com.cn dns_name_suffix = amazonaws.com.cn +[mount.eu-isoe-west-1] +dns_name_suffix = cloud.adc-e.uk +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + +[mount.eusc-de-east-1] +dns_name_suffix = amazonaws.eu + + [mount.us-iso-east-1] dns_name_suffix = c2s.ic.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem @@ -86,10 +94,6 @@ stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem dns_name_suffix = csp.hci.ic.gov stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem -[mount.eu-isoe-west-1] -dns_name_suffix = cloud.adc-e.uk -stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem - [mount-watchdog] enabled = true poll_interval_sec = 1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 24ab61e2..43d9b662 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -34,6 +34,7 @@ import errno import hashlib import hmac +import ipaddress import json import logging import os @@ -85,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.2.1" +VERSION = "2.3.0" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -316,6 +317,16 @@ ECS_FARGATE_TASK_METADATA_ENDPOINT_URL_EXTENSION = "/task" ECS_FARGATE_CLIENT_IDENTIFIER = "ecs.fargate" +AWS_CONTAINER_CREDS_FULL_URI_ENV = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +AWS_CONTAINER_AUTH_TOKEN_FILE_ENV = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" + + +def is_ipv6_address(ip_address): + try: + return isinstance(ipaddress.ip_address(ip_address), ipaddress.IPv6Address) + except ValueError: + return False + def errcheck(ret, func, args): from ctypes import get_errno @@ -729,6 +740,13 @@ def get_aws_security_credentials( if credentials and credentials_source: return credentials, credentials_source + # attempt to lookup AWS security credentials through Pod Identity + credentials, credentials_source = get_aws_security_credentials_from_pod_identity( + config, False + ) + if credentials and credentials_source: + return credentials, credentials_source + # attempt to lookup AWS security credentials through AssumeRoleWithWebIdentity # (e.g. for IAM Role for Service Accounts (IRSA) approach on EKS) if jwt_path and role_arn: @@ -886,6 +904,55 @@ def get_aws_security_credentials_from_webidentity( return None, None +def get_aws_security_credentials_from_pod_identity(config, is_fatal=False): + if ( + AWS_CONTAINER_CREDS_FULL_URI_ENV not in os.environ + or AWS_CONTAINER_AUTH_TOKEN_FILE_ENV not in os.environ + ): + return None, None + + creds_uri = os.environ[AWS_CONTAINER_CREDS_FULL_URI_ENV] + token_file = os.environ[AWS_CONTAINER_AUTH_TOKEN_FILE_ENV] + + try: + with open(token_file, "r") as f: + token = f.read().strip() + if "\r" in token or "\n" in token: + if is_fatal: + unsuccessful_resp = ( + "AWS Container Auth Token contains invalid characters" + ) + fatal_error(unsuccessful_resp, unsuccessful_resp) + return None, None + except Exception as e: + if is_fatal: + unsuccessful_resp = ( + f"Error reading Aws Container Auth Token file {token_file}: {e}" + ) + fatal_error(unsuccessful_resp, unsuccessful_resp) + return None, None + + unsuccessful_resp = f"Unsuccessful retrieval of AWS security credentials from Container Credentials URI at {creds_uri}" + url_error_msg = f"Unable to reach Container Credentials URI at {creds_uri}" + + pod_identity_security_dict = url_request_helper( + config, + creds_uri, + unsuccessful_resp, + url_error_msg, + headers={"Authorization": token}, + ) + + if pod_identity_security_dict and all( + k in pod_identity_security_dict for k in CREDENTIALS_KEYS + ): + return pod_identity_security_dict, f"podidentity:{creds_uri},{token_file}" + + if is_fatal: + fatal_error(unsuccessful_resp, unsuccessful_resp) + return None, None + + def get_sts_endpoint_url(config, region): dns_name_suffix = get_dns_name_suffix(config, region) return STS_ENDPOINT_URL_FORMAT.format(region, dns_name_suffix) @@ -906,7 +973,8 @@ def get_mount_config(config, region, config_name): return config.get(CONFIG_SECTION, config_name) except NoOptionError: fatal_error( - "Error retrieving config. Please set the {} configuration in efs-utils.conf".format(config_name) + f"Error retrieving config. Please set the {config_name} configuration " + "in efs-utils.conf" ) @@ -1479,6 +1547,9 @@ def write_stunnel_config_file( else: efs_config["checkHost"] = dns_name[dns_name.index(fs_id) :] + if not efs_proxy_enabled and is_ipv6_address(fallback_ip_address): + efs_config["sni"] = dns_name[dns_name.index(fs_id) :] + # Only use the config setting if the override is not set if not efs_proxy_enabled and ocsp_enabled: if is_stunnel_option_supported(stunnel_options, b"OCSPaia"): @@ -1801,7 +1872,7 @@ def bootstrap_proxy( if credentials_source: cert_details["awsCredentialsMethod"] = credentials_source logging.debug( - "AWS credentials source used for IAM authentication: ", + "AWS credentials source used for IAM authentication: %s", credentials_source, ) @@ -2021,30 +2092,43 @@ def to_nfs_option(k, v): return ",".join(nfs_options) +def get_ipv6_addresses(hostname): + try: + addrinfo = socket.getaddrinfo(hostname, None, socket.AF_INET6) + return [addr[4][0] for addr in addrinfo] + except socket.gaierror: + return [] + + def mount_nfs(config, dns_name, path, mountpoint, options, fallback_ip_address=None): if legacy_stunnel_mode_enabled(options, config): if "tls" in options: mount_path = "127.0.0.1:%s" % path elif fallback_ip_address: - mount_path = "%s:%s" % (fallback_ip_address, path) + if is_ipv6_address(fallback_ip_address): + mount_path = f"[{fallback_ip_address}]:{path}" + else: + mount_path = "%s:%s" % (fallback_ip_address, path) else: mount_path = "%s:%s" % (dns_name, path) else: mount_path = "127.0.0.1:%s" % path + nfs_options = get_nfs_mount_options(options, config) + if not check_if_platform_is_mac(): command = [ "/sbin/mount.nfs4", mount_path, mountpoint, "-o", - get_nfs_mount_options(options, config), + nfs_options, ] else: command = [ "/sbin/mount_nfs", "-o", - get_nfs_mount_options(options, config), + nfs_options, mount_path, mountpoint, ] @@ -2847,8 +2931,8 @@ def check_and_remove_lock_file(path, file): def dns_name_can_be_resolved(dns_name): try: - socket.gethostbyname(dns_name) - return True + addr_info = socket.getaddrinfo(dns_name, None, socket.AF_UNSPEC) + return len(addr_info) > 0 except socket.gaierror: return False @@ -2904,10 +2988,11 @@ def get_fallback_mount_target_ip_address_helper(config, options, fs_id): efs_client = get_botocore_client(config, "efs", options) mount_target = get_mount_target_in_az(efs_client, ec2_client, fs_id, az_name) - mount_target_ip = mount_target.get("IpAddress") - logging.debug("Found mount target ip address %s in AZ %s", mount_target_ip, az_name) - return mount_target_ip + if "IpAddress" in mount_target: + return mount_target.get("IpAddress") + elif "Ipv6Address" in mount_target: + return mount_target.get("Ipv6Address") def throw_dns_resolve_failure_with_fallback_message(dns_name, fallback_message=None): diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index e03248d5..3836ca94 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.2.1" +version = "2.3.0" publish = false license = "MIT" diff --git a/src/proxy/src/config_parser.rs b/src/proxy/src/config_parser.rs index badcac3e..0a49fb14 100644 --- a/src/proxy/src/config_parser.rs +++ b/src/proxy/src/config_parser.rs @@ -90,14 +90,9 @@ pub struct EfsConfig { #[cfg(test)] pub mod tests { use super::*; + use crate::test_utils::TEST_CONFIG_PATH; use std::{path::Path, string::String}; - pub static TEST_CONFIG_PATH: &str = "tests/certs/test_config.ini"; - - pub fn get_test_config() -> ProxyConfig { - ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") - } - #[test] fn test_read_config_from_file() { assert!(ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).is_ok()); diff --git a/src/proxy/src/connection_task.rs b/src/proxy/src/connection_task.rs new file mode 100644 index 00000000..bb759900 --- /dev/null +++ b/src/proxy/src/connection_task.rs @@ -0,0 +1,159 @@ +use bytes::BytesMut; +use log::{debug, error, trace}; +use tokio::{ + io::{split, AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf}, + sync::mpsc::{self}, +}; +use tokio_util::sync::CancellationToken; + +use crate::{ + connections::ProxyStream, + rpc::RpcBatch, + shutdown::{ShutdownHandle, ShutdownReason}, +}; +use crate::{ + proxy_task::{ConnectionMessage, BUFFER_SIZE}, + rpc::RpcFragmentParseError, +}; + +pub struct ConnectionTask { + stream: S, + proxy_receiver: mpsc::Receiver, + proxy_sender: mpsc::Sender, +} + +impl ConnectionTask { + pub fn new( + stream: S, + proxy_receiver: mpsc::Receiver, + proxy_sender: mpsc::Sender, + ) -> Self { + Self { + stream, + proxy_receiver, + proxy_sender, + } + } + + pub async fn run(self, shutdown_handle: ShutdownHandle) { + let (r, w) = split(self.stream); + + let shutdown = shutdown_handle.clone(); + + // This CancellationToken facilitates graceful TLS connection closures by ensuring that + // that the ReadHalf is dropped only after the WriteHalf.shutdown() has returned + let connection_cancellation_token = CancellationToken::new(); + + // ConnectionTask Writer receives messages from NFSClient's Reader (ProxyTask reader) and writes them to connection socket + let writer = Self::run_writer( + w, + self.proxy_receiver, + shutdown_handle.clone(), + connection_cancellation_token.clone(), + ); + tokio::spawn(async move { + tokio::select! { + _ = shutdown.cancellation_token.cancelled() => trace!("Cancelled"), + _ = writer => {}, + } + }); + + // ConnectionTask Reader reads messages from NFSServer's socket and sends to NFSClient Writer (ProxyTask writer) + let reader = Self::run_reader(r, self.proxy_sender, shutdown_handle.clone()); + tokio::spawn(async move { + tokio::select! { + _ = connection_cancellation_token.cancelled() => trace!("Cancelled"), + _ = reader => {}, + } + }); + } + + // EFS to Proxy + async fn run_reader( + mut server_read_half: ReadHalf, + sender: mpsc::Sender, + shutdown: ShutdownHandle, + ) { + let reason; + let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); + loop { + match server_read_half.read_buf(&mut buffer).await { + Ok(n_read) => { + if n_read == 0 { + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + } + Err(e) => { + debug!("Error reading from server: {:?}", e); + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + }; + + match RpcBatch::parse_batch(&mut buffer) { + Ok(Some(batch)) => { + if let Err(e) = sender.send(ConnectionMessage::Response(batch)).await { + debug!("Error sending result back: {:?}", e); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + } + Err(RpcFragmentParseError::InvalidSizeTooSmall) => { + drop(server_read_half); + error!("Server Error: invalid RPC size - size too small"); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + Err(RpcFragmentParseError::SizeLimitExceeded) => { + drop(server_read_half); + error!("Server Error: invalid RPC size - size limit exceeded"); + reason = Some(ShutdownReason::UnexpectedError); + break; + } + Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), + } + + if buffer.capacity() == 0 { + buffer.reserve(BUFFER_SIZE) + } + } + shutdown.exit(reason).await; + } + + // Proxy to EFS + async fn run_writer( + mut server_write_half: WriteHalf, + mut receiver: mpsc::Receiver, + shutdown: ShutdownHandle, + connection_cancellation_token: CancellationToken, + ) { + let mut reason = Option::None; + loop { + let Some(batch) = receiver.recv().await else { + debug!("sender dropped"); + break; + }; + + for b in &batch.rpcs { + match server_write_half.write_all(b).await { + Ok(_) => (), + Err(e) => { + debug!("Error writing to server: {:?}", e); + reason = Option::Some(ShutdownReason::NeedsRestart); + break; + } + }; + } + } + + tokio::spawn(async move { + match server_write_half.shutdown().await { + Ok(_) => (), + Err(e) => debug!("Failed to gracefully shutdown connection: {}", e), + }; + connection_cancellation_token.cancel(); + }); + shutdown.exit(reason).await; + } +} diff --git a/src/proxy/src/connections.rs b/src/proxy/src/connections.rs index 67d4651e..aca91c39 100644 --- a/src/proxy/src/connections.rs +++ b/src/proxy/src/connections.rs @@ -390,11 +390,10 @@ impl PartitionFinder> for TlsPartitionFinder { #[cfg(test)] mod tests { use super::*; - use crate::config_parser::tests::get_test_config; + use crate::config_parser::ProxyConfig; use crate::connections::PartitionFinder; - use crate::controller::tests::{find_available_port, ServiceAction, TestService}; - use crate::controller::DEFAULT_SCALE_UP_CONFIG; - use crate::ProxyConfig; + use crate::test_utils::find_available_port; + use crate::tls::get_tls_config; use nix::sys::signal::kill; use nix::sys::signal::Signal; use std::path::Path; @@ -409,120 +408,6 @@ mod tests { incarnation: 0, }; - struct MultiplexTest { - service: TestService, - partition_finder: TlsPartitionFinder, - initial_partition_id: PartitionId, - } - - impl MultiplexTest { - async fn new() -> Self { - let service = TestService::new(true).await; - MultiplexTest::new_with_service(service).await - } - - async fn new_with_service(service: TestService) -> Self { - let mut tls_config = TlsConfig::new_from_config(&get_test_config()) - .await - .expect("Failed to acquire TlsConfig."); - tls_config.remote_addr = format!("127.0.0.1:{}", service.listen_port); - - let partition_finder = TlsPartitionFinder::new(Arc::new(Mutex::new(tls_config))); - - let (_s, id, _) = partition_finder - .establish_connection(PROXY_ID) - .await - .expect("Failed to connect to server"); - - let Some(initial_partition_id) = id else { - panic!("Partition Id not found for initial connection.") - }; - - MultiplexTest { - service, - partition_finder, - initial_partition_id, - } - } - } - - #[tokio::test] - async fn test_establish_multiplex_same_partition_found() { - let test = MultiplexTest::new().await; - - let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); - - let (new_connnection_id, connections, _) = test - .partition_finder - .inner_establish_multiplex_connection( - PROXY_ID, - Some(test.initial_partition_id), - shutdown_handle, - ) - .await - .expect("Could not establish a multiplex connection"); - - assert_eq!(test.initial_partition_id, new_connnection_id); - assert_eq!( - DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections - 1, - connections.len() as i32 - ); - - test.service.shutdown().await; - } - - #[tokio::test] - async fn test_establish_multiplex_new_partition_found() { - let test = MultiplexTest::new().await; - - let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); - - test.service - .post_action(ServiceAction::StopPartitionAcceptor( - test.initial_partition_id, - )) - .await; - - let (new_connnection_id, connections, _) = test - .partition_finder - .inner_establish_multiplex_connection( - PROXY_ID, - Some(test.initial_partition_id), - shutdown_handle, - ) - .await - .expect("Could not establish a multiplex connection"); - - assert_eq!( - DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections, - connections.len() as i32 - ); - assert_ne!(test.initial_partition_id, new_connnection_id); - - test.service.shutdown().await; - } - - #[tokio::test] - async fn test_establish_multiplex_no_target() { - let test = MultiplexTest::new().await; - - let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); - - let (new_connnection_id, connections, _) = test - .partition_finder - .inner_establish_multiplex_connection(PROXY_ID, None, shutdown_handle) - .await - .expect("Could not establish a multiplex connection"); - - assert_eq!( - DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections, - connections.len() as i32 - ); - assert_ne!(test.initial_partition_id, new_connnection_id); - - test.service.shutdown().await; - } - #[tokio::test] async fn test_establish_connection_timeout() { let (_listener, port) = find_available_port().await; @@ -581,33 +466,6 @@ mod tests { assert!(matches!(error, Err((ConnectError::Cancelled, None)))); } - #[tokio::test] - async fn test_scale_up_max_attempts() { - // Create a service in which the all calls of bind_client_to_partition will return a - // different value. Our "TestService" returns these PartitionIds in a round robin fashion, - // and this service will have more PartitionId than MAX_ATTEMPT_COUNT - let service = - TestService::new_with_partition_count((MAX_ATTEMPT_COUNT + 2) as usize, true).await; - - let test = MultiplexTest::new_with_service(service).await; - - let (shutdown_handle, _waiter) = ShutdownHandle::new(CancellationToken::new()); - - let error = test - .partition_finder - .inner_establish_multiplex_connection( - PROXY_ID, - Some(test.initial_partition_id), - shutdown_handle.clone(), - ) - .await; - - assert!(matches!( - error, - Err((ConnectError::MaxAttemptsExceeded, None)) - )); - } - #[allow(clippy::enum_variant_names)] enum BrokenPartitionFinderType { _ConnectIoError, @@ -690,7 +548,7 @@ mod tests { if (sigs_hangup_listener.recv().await).is_some() { //Reloading the TLS configuration let mut locked_config = cloned_tls_config_ptr.lock().await; - *locked_config = crate::get_tls_config(&proxy_config).await.unwrap(); + *locked_config = get_tls_config(&proxy_config).await.unwrap(); tx.send(()).unwrap(); break; } diff --git a/src/proxy/src/controller.rs b/src/proxy/src/controller.rs index 4891b769..1b00756c 100644 --- a/src/proxy/src/controller.rs +++ b/src/proxy/src/controller.rs @@ -5,8 +5,9 @@ use crate::shutdown::ShutdownReason; use crate::status_reporter::{self, StatusReporter}; use crate::{ connections::{PartitionFinder, ProxyStream}, - proxy::{PerformanceStats, Proxy}, + proxy::Proxy, proxy_identifier::ProxyIdentifier, + proxy_task::PerformanceStats, shutdown::ShutdownHandle, }; use log::{debug, error, info, warn}; @@ -69,13 +70,13 @@ impl IncarnationState { } pub struct Controller { - listener: TcpListener, - partition_finder: Arc + Sync + Send>, - proxy_id: ProxyIdentifier, - scale_up_attempt_count: u64, - restart_count: u64, - scale_up_config: ScaleUpConfig, - status_reporter: StatusReporter, + pub listener: TcpListener, + pub partition_finder: Arc + Sync + Send>, + pub proxy_id: ProxyIdentifier, + pub scale_up_attempt_count: u64, + pub restart_count: u64, + pub scale_up_config: ScaleUpConfig, + pub status_reporter: StatusReporter, } impl Controller { @@ -101,6 +102,7 @@ impl Controller { pub async fn run(mut self, token: CancellationToken) -> Option { let mut ready_connections = None; + // Main Proxy incarnation management loop loop { info!("Starting new incarnation of proxy"); let nfs_client = match self.listener.accept().await { @@ -131,7 +133,8 @@ impl Controller { return Some(ShutdownReason::UnexpectedError); } - let (events_tx, mut events_rx) = mpsc::channel(1024); + // Create Status Notifications channel, to be used by Proxy's status_reporter for notifying controller about Proxy status + let (status_events_tx, mut status_events_rx) = mpsc::channel(1024); let (shutdown, mut waiter) = ShutdownHandle::new(token.child_token()); let (partition_id, partition_servers, scale_up_config) = match ready_connections { @@ -167,12 +170,18 @@ impl Controller { let mut state = IncarnationState::new( self.proxy_id, partition_id, - events_tx.clone(), + status_events_tx.clone(), partition_servers.len() as u16, ); - let mut proxy = Proxy::new(nfs_client, partition_servers, events_tx, shutdown.clone()); + let mut proxy = Proxy::new( + nfs_client, + partition_servers, + status_events_tx, + shutdown.clone(), + ); + // Proxy status loop loop { let mut err = Ok(()); tokio::select! { @@ -188,7 +197,7 @@ impl Controller { }; self.status_reporter.publish_status(report).await; } - event = events_rx.recv() => { + event = status_events_rx.recv() => { if let Some(next_event) = event { match self.handle_event(next_event, &mut proxy, &mut state, shutdown.clone()).await { Ok(EventResult::Restart(connections)) => { @@ -320,1297 +329,3 @@ impl Controller { Ok(EventResult::Ok) } } - -#[cfg(test)] -pub mod tests { - use crate::config_parser::tests::get_test_config; - use crate::connections::PlainTextPartitionFinder; - use crate::connections::ProxyStream; - use crate::connections::MULTIPLEX_CONNECTION_TIMEOUT_SEC; - use crate::controller::ConnectionSearchState; - use crate::controller::DEFAULT_SCALE_UP_BACKOFF; - use crate::efs_prot; - use crate::efs_prot::BindResponse; - use crate::efs_prot::ScaleUpConfig; - use crate::efs_rpc; - use crate::efs_rpc::PartitionId; - use crate::proxy; - use crate::proxy_identifier::ProxyIdentifier; - use crate::proxy_identifier::INITIAL_INCARNATION; - use crate::rpc; - use crate::rpc::RPC_HEADER_SIZE; - use crate::shutdown::ShutdownReason; - use crate::status_reporter; - use crate::status_reporter::Report; - use crate::status_reporter::StatusRequester; - use crate::tls::tests::get_server_config; - use crate::tls::TlsConfig; - use crate::{connections::TlsPartitionFinder, controller::Controller}; - - use bytes::BytesMut; - use log::debug; - use onc_rpc::RpcMessage; - use rand::Rng; - use std::collections::HashMap; - use std::collections::HashSet; - use std::io::ErrorKind; - use std::sync::atomic::AtomicU32; - use std::time::Duration; - use std::{self, io::Error, sync::Arc}; - use test_case::test_case; - use tokio::time::error::Elapsed; - use tokio::time::timeout; - use tokio::{ - io::AsyncWriteExt, - net::{TcpListener, TcpStream}, - sync::oneshot, - sync::Mutex, - task::JoinHandle, - }; - use tokio_util::sync::CancellationToken; - - use super::DEFAULT_SCALE_UP_CONFIG; - - #[derive(Copy, Clone, Debug, PartialEq)] - pub enum ServiceAction { - // Server will reject the next incoming TCP connection. Further attempts will succeed. - // - RejectNextNewConnectionRequest, - - // The server will close the next connection that receives a request from the proxy. - // - CloseOnNextRequest, - - // The server will close a random connection without waiting for any incoming request. - // - CloseRandomConnection, - - // This service will restart accepting connections to the given PartitionId - // - _RestartPartitionAcceptor(PartitionId), - - // This service will not accept connections to the given PartitionId - // - StopPartitionAcceptor(PartitionId), - - // This service will close the connection if a bind_client_to_partition request is received - // - CloseOnNextBindClientToPartitionRequest, - - // The service will send BindResponse::RETRY_LATER on subsequent bind_client_to_partition requests - // - DisableScaleUp, - - // The service will allow re-enabling scale up after the DisableScaleUp action is posted. - // - EnableScaleUp, - - // The service will respond with BindResponse::RETRY on the next n bind_client_to_partition requests - SendRetries(u32), - } - - const PARTITION_COUNT: usize = 3; - - pub struct TestService { - pub listen_port: u16, - posted_action: Arc>>, - shutdown_tx: oneshot::Sender<()>, - join_handle: JoinHandle<()>, - pub partition_ids: Vec, - pub stopped_partitions: Arc>>, - pub request_counter: Arc>>>>, - } - - impl TestService { - const ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC: i32 = 0; - const NEVER_SCALE_UP_THRESHOLD_BYTES_PER_SEC: i32 = i32::MAX; - - pub async fn new(tls: bool) -> Self { - TestService::new_with_partition_count(PARTITION_COUNT, tls).await - } - - pub async fn new_with_partition_count(count: usize, tls: bool) -> Self { - TestService::new_with_partition_count_and_scale_up_config( - count, - super::DEFAULT_SCALE_UP_CONFIG, - tls, - ) - .await - } - - pub async fn new_with_throughput_scale_up_threshold(threshold: i32, tls: bool) -> Self { - let mut config = super::DEFAULT_SCALE_UP_CONFIG; - config.scale_up_bytes_per_sec_threshold = threshold; - TestService::new_with_partition_count_and_scale_up_config(PARTITION_COUNT, config, tls) - .await - } - - pub async fn new_with_partition_count_and_scale_up_threshold( - count: usize, - threshold: i32, - tls: bool, - ) -> Self { - let mut config = super::DEFAULT_SCALE_UP_CONFIG; - config.scale_up_bytes_per_sec_threshold = threshold; - TestService::new_with_partition_count_and_scale_up_config(count, config, tls).await - } - - pub async fn new_with_partition_count_and_scale_up_config( - count: usize, - scale_up_config: ScaleUpConfig, - tls: bool, - ) -> Self { - let (tcp_listener, listen_port) = find_available_port().await; - - let partition_ids = (0..count) - .map(|_| PartitionId { - id: efs_rpc::tests::generate_partition_id().0, - }) - .collect::>(); - - let stopped_partitions = Arc::new(Mutex::new(HashSet::new())); - - let mut counter = HashMap::new(); - for id in partition_ids.iter() { - counter.insert(*id, Vec::new()); - } - let request_counter = Arc::new(Mutex::new(counter)); - - let posted_action = Arc::new(Mutex::new(Option::None)); - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - - let service_handle = TestService::run( - tcp_listener, - scale_up_config, - partition_ids.clone(), - stopped_partitions.clone(), - request_counter.clone(), - posted_action.clone(), - tls, - shutdown_rx, - ); - - TestService { - listen_port, - posted_action, - shutdown_tx, - join_handle: service_handle, - partition_ids, - stopped_partitions, - request_counter, - } - } - - pub async fn post_action(&self, new_action: ServiceAction) { - match new_action { - ServiceAction::_RestartPartitionAcceptor(id) => { - let mut stopped = self.stopped_partitions.lock().await; - assert!(stopped.remove(&id), "Partition is not stopped"); - return; - } - ServiceAction::StopPartitionAcceptor(id) => { - let mut stopped = self.stopped_partitions.lock().await; - stopped.insert(id); - return; - } - ServiceAction::EnableScaleUp => { - TestService::check_and_consume_action( - &self.posted_action, - ServiceAction::DisableScaleUp, - ) - .await; - return; - } - _ => (), - }; - - let mut consumable_action = self.posted_action.lock().await; - if consumable_action.is_some() { - panic!("Previous action was not consumed"); - } - *consumable_action = Some(new_action); - } - - #[allow(clippy::too_many_arguments)] - fn run( - listener: TcpListener, - scale_up_config: ScaleUpConfig, - partition_ids: Vec, - stopped_partitions: Arc>>, - request_counter: Arc>>>>, - posted_action: Arc>>, - tls: bool, - mut shutdown_rx: oneshot::Receiver<()>, - ) -> JoinHandle<()> { - tokio::spawn(async move { - let mut partition_idx = 0; - loop { - tokio::select! { - socket = listener.accept() => { - let Ok((tcp_stream, _socket_addr)) = socket else { - panic!("Failed to establish connection to client"); - }; - - if tls { - let tls_acceptor = s2n_tls_tokio::TlsAcceptor::new(get_server_config().await.expect("Could not get config")); - let tls_stream = match tls_acceptor.accept(tcp_stream).await { - Ok(conn) => conn, - Err(e) => { - panic!("Failed to establish TLS connection: {}", e); - } - }; - Self::inner_run(tls_stream, scale_up_config, &mut partition_idx, &partition_ids, stopped_partitions.clone(), request_counter.clone(), posted_action.clone()).await; - } else { - Self::inner_run(tcp_stream, scale_up_config, &mut partition_idx, &partition_ids, stopped_partitions.clone(), request_counter.clone(), posted_action.clone()).await; - } - }, - _ = &mut shutdown_rx => { - break; - } - }; - } - }) - } - - async fn inner_run( - stream: S, - scale_up_config: ScaleUpConfig, - partition_idx: &mut usize, - partition_ids: &[PartitionId], - stopped_partitions: Arc>>, - request_counter: Arc>>>>, - posted_action: Arc>>, - ) { - if TestService::check_and_consume_action( - &posted_action, - ServiceAction::RejectNextNewConnectionRequest, - ) - .await - || TestService::check_and_consume_action( - &posted_action, - ServiceAction::CloseRandomConnection, - ) - .await - { - debug!("RejectNextNewConnectionRequest processed"); - drop(stream); - } else { - let stopped = stopped_partitions.lock().await; - let mut next_id = None; - for i in 0..partition_ids.len() { - *partition_idx = (*partition_idx + i + 1) % partition_ids.len(); - if !stopped.contains(&partition_ids[*partition_idx]) { - next_id = Some(partition_ids[*partition_idx]); - break; - } - } - let Some(id) = next_id else { - panic!("No available PartitionIds") - }; - - let request_count = Arc::new(AtomicU32::new(0)); - request_counter - .lock() - .await - .get_mut(&id) - .expect("Counter for partition not found") - .push(request_count.clone()); - - tokio::spawn(TestService::new_connection( - stream, - scale_up_config, - posted_action.clone(), - id, - request_count.clone(), - )); - } - } - - async fn check_and_consume_action( - posted_action: &Arc>>, - to_check: ServiceAction, - ) -> bool { - let mut action = posted_action.lock().await; - if *action == Some(to_check) { - *action = Option::None; - true - } else { - false - } - } - - async fn check_action( - posted_action: &Arc>>, - to_check: ServiceAction, - ) -> bool { - let action = posted_action.lock().await; - *action == Some(to_check) - } - - async fn new_connection( - mut stream: S, - scale_up_config: ScaleUpConfig, - posted_action: Arc>>, - partition_id: PartitionId, - request_count: Arc, - ) { - loop { - let Ok(message) = rpc::read_rpc_bytes(&mut stream).await else { - break; - }; - - request_count.fetch_add(1, std::sync::atomic::Ordering::AcqRel); - - if TestService::check_and_consume_action( - &posted_action, - ServiceAction::CloseOnNextRequest, - ) - .await - { - debug!("CloseOnNextRequest processed"); - break; - } - - let response = match TestService::parse_bind_client_to_partition_request(&message) { - Ok(rpc_message) => { - if TestService::check_and_consume_action( - &posted_action, - ServiceAction::CloseOnNextBindClientToPartitionRequest, - ) - .await - { - debug!("CloseOnNextBindClientToPartitionRequest processed"); - break; - } - - let mut bind_response = - BindResponse::READY(efs_prot::PartitionId(partition_id.id)); - - if TestService::check_action(&posted_action, ServiceAction::DisableScaleUp) - .await - { - bind_response = BindResponse::RETRY_LATER( - "Returning BindResponse::RETRY_LATER".into(), - ); - } - - let mut action = posted_action.lock().await; - if let Some(ServiceAction::SendRetries(count)) = *action { - bind_response = - BindResponse::RETRY("Returning BindResponse::RETRY".into()); - if count > 1 { - *action = Some(ServiceAction::SendRetries(count - 1)); - } else { - *action = None; - } - } - - efs_rpc::tests::create_bind_client_to_partition_response( - rpc_message.xid(), - bind_response, - scale_up_config, - ) - .expect("Could not create response") - } - Err(_) => { - // If the test server doesn't parse a `bind_client_to_partition` request, - // then echo request back to the client - message - } - }; - - stream - .write_all(&response) - .await - .expect("Could not write to stream"); - } - } - - #[allow(clippy::type_complexity)] - fn parse_bind_client_to_partition_request( - request: &Vec, - ) -> Result, Box> { - let rpc_message = onc_rpc::RpcMessage::try_from(request.as_slice())?; - efs_rpc::tests::parse_bind_client_to_partition_request(&rpc_message)?; - Ok(rpc_message) - } - - pub async fn shutdown(self) { - drop(self.shutdown_tx); - self.join_handle.await.unwrap(); - } - } - - struct TestClient { - stream: TcpStream, - next_xid: u32, - } - - impl TestClient { - async fn new(proxy_port: u16) -> Self { - let stream = TcpStream::connect(("127.0.0.1", proxy_port)).await.unwrap(); - Self { - stream, - next_xid: 0, - } - } - - async fn send_message_with_size(&mut self, size: usize) -> Result<(), Error> { - self.next_xid += 1; - let (request, expected_data) = rpc::test::generate_msg_fragments(size, 1); - self.stream.write_all(&request).await?; - - let response = rpc::read_rpc_bytes(&mut self.stream).await?; - - let payload_result = - rpc::RpcBatch::parse_batch(&mut BytesMut::from(response.as_slice())) - .expect("No message found") - .expect("failed to parse"); - - let rpc = payload_result.rpcs.first().expect("No RPCs found"); - assert_eq!(expected_data, rpc.to_vec()[RPC_HEADER_SIZE..]); - Ok(()) - } - - async fn send_partial_message_with_size(&mut self, size: usize) -> Result<(), Error> { - self.next_xid += 1; - let (_, m1) = rpc::test::generate_msg_fragments(size, 1); - let mut rng = rand::thread_rng(); - self.stream - .write_all(&m1[0..rng.gen_range(1..size - 1)]) - .await?; - Ok(()) - } - } - - pub struct ProxyUnderTest { - listen_port: u16, - handle: JoinHandle>, - status_requester: StatusRequester, - scale_up_config: ScaleUpConfig, - } - - impl ProxyUnderTest { - pub async fn new(tls: bool, server_port: u16) -> Self { - let scale_up_config = DEFAULT_SCALE_UP_CONFIG; - let (tcp_listener, listen_port) = find_available_port().await; - - let (status_requester, status_reporter) = status_reporter::create_status_channel(); - - let handle = if tls { - let mut tls_config = TlsConfig::new_from_config(&get_test_config()) - .await - .expect("Failed to acquire TlsConfig."); - tls_config.remote_addr = format!("127.0.0.1:{}", server_port); - - let partition_finder = - Arc::new(TlsPartitionFinder::new(Arc::new(Mutex::new(tls_config)))); - - let controller = Controller { - listener: tcp_listener, - partition_finder, - proxy_id: ProxyIdentifier::new(), - scale_up_attempt_count: 0, - restart_count: 0, - scale_up_config, - status_reporter, - }; - - let token = CancellationToken::new(); - tokio::spawn(controller.run(token)) - } else { - let partition_finder = Arc::new(PlainTextPartitionFinder { - mount_target_addr: format!("127.0.0.1:{}", server_port), - }); - - let controller = Controller { - listener: tcp_listener, - partition_finder, - proxy_id: ProxyIdentifier::new(), - scale_up_attempt_count: 0, - restart_count: 0, - scale_up_config, - status_reporter, - }; - - let token = CancellationToken::new(); - tokio::spawn(controller.run(token)) - }; - - Self { - listen_port, - handle, - status_requester, - scale_up_config, - } - } - - pub async fn poll_scale_up(&mut self) -> Result<(), Elapsed> { - timeout(Duration::from_secs(5), async { - loop { - let num_connections = self.get_num_connections().await; - if num_connections == self.scale_up_config.max_multiplexed_connections as usize - { - break; - } else { - tokio::time::sleep(Duration::from_millis(500)).await; - } - } - }) - .await - } - - pub async fn get_report(&mut self) -> Report { - self.status_requester - ._request_status() - .await - .expect("Could not get report") - } - - pub async fn get_proxy_id(&mut self) -> ProxyIdentifier { - let report = self.get_report().await; - report.proxy_id - } - - async fn get_num_connections(&mut self) -> usize { - let report = self.get_report().await; - report.num_connections - } - } - - pub async fn find_available_port() -> (TcpListener, u16) { - for port in 10000..15000 { - match TcpListener::bind(("127.0.0.1", port)).await { - Ok(v) => { - return (v, port); - } - Err(_) => continue, - } - } - panic!("Failed to find port"); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_basic(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - client.send_message_with_size(1024).await.unwrap(); - - let report = proxy.get_report().await; - assert!(report.partition_id.is_some()); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_success_after_connection_closed_on_bind_client_to_partition_request( - tls_enabled: bool, - ) { - let service = TestService::new(tls_enabled).await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - - service - .post_action(ServiceAction::CloseOnNextBindClientToPartitionRequest) - .await; - - client.send_message_with_size(10).await.unwrap(); - client.send_message_with_size(1024).await.unwrap(); - - let report = proxy.get_report().await; - assert!(report.partition_id.is_none()); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_success_after_bind_client_to_partition_stop_response_on_initial_connection( - tls_enabled: bool, - ) { - let service = TestService::new(tls_enabled).await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - - service.post_action(ServiceAction::DisableScaleUp).await; - - client.send_message_with_size(10).await.unwrap(); - client.send_message_with_size(1024).await.unwrap(); - - let report = proxy.get_report().await; - assert!(report.partition_id.is_none()); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_closed_connection(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - service.post_action(ServiceAction::CloseOnNextRequest).await; - let result = client.send_message_with_size(10).await; - assert!(result.is_err()); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_closed_connection_after_scale_up(tls_enabled: bool) { - // Use a single partition so that the same PartitionId is return on each - // bind_client_to_partition request. This prevents a controller "reset", which simplifies - // testing that the proxy will retry scale up after the backoff time as elapsed. - // - let scale_up_threshold = 10; - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(100).await.unwrap(); - - // Expect that scale up does not occur - proxy.poll_scale_up().await.expect("Scale up did not occur"); - - // Close one proxy connection. The subsequent requests should fail. - service.post_action(ServiceAction::CloseOnNextRequest).await; - client.send_message_with_size(100).await.unwrap_err(); - - // Wait some time for proxy to reset - tokio::time::sleep(Duration::from_secs(5)).await; - - for _ in 0..5 { - client.send_message_with_size(100).await.unwrap_err(); - } - - // Reconnecting with the client should result in successful requests - let mut new_client = TestClient::new(proxy.listen_port).await; - new_client.send_message_with_size(5).await.unwrap(); - - let num_connections = proxy.get_report().await.num_connections; - assert_eq!(1, num_connections); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_closed_connection_when_big_frame_sent(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - let result = client.send_message_with_size(22222220).await; - assert!(result.is_err()); - let error = result.unwrap_err(); - assert!( - error.kind() == ErrorKind::BrokenPipe || error.kind() == ErrorKind::ConnectionReset - ); - let reason_opt = proxy.handle.await.unwrap(); - assert_eq!(reason_opt, Some(ShutdownReason::FrameSizeExceeded)); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_message_too_small(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut client = TestClient::new(proxy.listen_port).await; - let _ = client.send_message_with_size(1).await; - let reason_opt = proxy.handle.await.unwrap(); - assert_eq!(reason_opt, Some(ShutdownReason::FrameSizeTooSmall)); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_client_disconnects(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut initial_client = TestClient::new(proxy.listen_port).await; - let _ = initial_client.send_partial_message_with_size(1000).await; - // Drop has been implemented to simulate client disconnection - drop(initial_client); - - // After initial_client is disconnects, the proxy should still accept new connection - let mut client = TestClient::new(proxy.listen_port).await; - assert!(matches!( - client.send_partial_message_with_size(1000).await, - Ok(()) - )); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_client_disconnects_without_send(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // Drop this client to simulate a connection to the proxy port that immediately closes - let disconnecting_client = TestClient::new(proxy.listen_port).await; - drop(disconnecting_client); - - // After the connection to the disconnecting_client is dropped, the proxy should still accept new connection - let mut client = TestClient::new(proxy.listen_port).await; - assert!(matches!( - client.send_partial_message_with_size(1000).await, - Ok(()) - )); - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_handle_server_disconnect(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - assert!(client.send_message_with_size(10).await.is_ok()); - - // Incarnation is incremented when connection with NFS client is established - assert_eq!( - INITIAL_INCARNATION + 1, - proxy.get_proxy_id().await.incarnation - ); - - service.post_action(ServiceAction::CloseOnNextRequest).await; - - assert!(client.send_message_with_size(10).await.is_err()); - - // Reconnect - client = TestClient::new(proxy.listen_port).await; - assert!(client.send_message_with_size(10).await.is_ok()); - - // Incarnation is incremented when connection with NFS client is reestablished - assert_eq!( - INITIAL_INCARNATION + 2, - proxy.get_proxy_id().await.incarnation - ); - - proxy.handle.abort(); - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_same_partition(tls_enabled: bool) { - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, - tls_enabled, - ) - .await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // A request from the client will cause the proxy to establish an addition connection to the NFS server - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_periodic_workload(tls_enabled: bool) { - // Requests of 15 bytes every 100 milliseconds should result in 300 bytes of traffic (150 - // bytes sent, 150 bytes received) every second. This exceeds the scale_up_threshold of 299 - // bytes/s. - let scale_up_threshold = 299; - let num_requests = 60; - let request_size = 30; - let request_interval_millis = 100; - - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - for _ in 0..num_requests { - client.send_message_with_size(request_size).await.unwrap(); - tokio::time::sleep(Duration::from_millis(request_interval_millis)).await; - } - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_no_scale_up_periodic_workload(tls_enabled: bool) { - // Requests of 10 bytes every 100 milliseconds should result in 200 bytes of traffic (100 - // bytes sent, 100 bytes received) every seconds. This does not exceeds the - // scale_up_threshold of 300 bytes/s. - // - let scale_up_threshold = 300; - let num_requests = 60; - let request_size = 10; - let request_interval_millis = 100; - - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // Only requests proxied within the monitoring window will be considered when determining - // when to scale up. The following requests should not result in a scale up attempt. - // - let mut client = TestClient::new(proxy.listen_port).await; - for _ in 0..num_requests { - client.send_message_with_size(request_size).await.unwrap(); - tokio::time::sleep(Duration::from_millis(request_interval_millis)).await; - } - - proxy - .poll_scale_up() - .await - .expect_err("Unexpected Scale Up"); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_new_partition(tls_enabled: bool) { - let service = TestService::new_with_throughput_scale_up_threshold( - TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, - tls_enabled, - ) - .await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // A request from the client will cause the proxy to establish an addition connection to - // the NFS server - // - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - - let report = proxy.get_report().await; - let initial_partition_id = report.partition_id.expect("No PartitionId"); - - service - .post_action(ServiceAction::StopPartitionAcceptor(initial_partition_id)) - .await; - - // After scale up, we need to wait for the controller to reset and to listen to a new - // connection from the client - // - tokio::time::sleep(Duration::from_secs(5)).await; - - let mut new_client = TestClient::new(proxy.listen_port).await; - new_client.send_message_with_size(10).await.unwrap(); - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - let connection_state = proxy.get_report().await.connection_state; - assert_eq!(ConnectionSearchState::Idle, connection_state); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_successful_scale_up_with_retries(tls_enabled: bool) { - let scale_up_threshold = 10; - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // A request from the client will cause the proxy to establish an addition connection to the NFS server - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(5).await.unwrap(); - - service - .post_action(ServiceAction::SendRetries(std::cmp::min( - 5, - crate::connections::MAX_ATTEMPT_COUNT - 5, - ))) - .await; - - client.send_message_with_size(100).await.unwrap(); - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_no_scale_up_threshold_not_exceed(tls_enabled: bool) { - let service = TestService::new_with_throughput_scale_up_threshold( - TestService::NEVER_SCALE_UP_THRESHOLD_BYTES_PER_SEC, - tls_enabled, - ) - .await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // Requests from the client below the throughput threshold should not cause new connections - // to the NFS server to be established - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - - proxy - .poll_scale_up() - .await - .expect_err("Unexpected scale up occured"); - - let connection_state = proxy.get_report().await.connection_state; - assert_eq!(ConnectionSearchState::Idle, connection_state); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_no_scale_up_if_already_scaled_up(tls_enabled: bool) { - let scale_up_threshold = 10; - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 5, - scale_up_threshold, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - // Requests from the client below the throughput threshold should not cause scale up - let mut client = TestClient::new(proxy.listen_port).await; - client - .send_message_with_size((scale_up_threshold - 1) as usize) - .await - .unwrap(); - - // Stop initial partition so that the proxy resets after scale up - let initial_report = proxy.get_report().await; - let initial_partition_id = initial_report.partition_id.expect("No PartitionId"); - assert_eq!(0, initial_report.scale_up_attempt_count); - assert_eq!(0, initial_report.restart_count); - - service - .post_action(ServiceAction::StopPartitionAcceptor(initial_partition_id)) - .await; - - // This requests should cause scale up to be attempted - client - .send_message_with_size((scale_up_threshold + 10) as usize) - .await - .unwrap(); - - tokio::time::sleep(Duration::from_secs(5)).await; - let mut client = TestClient::new(proxy.listen_port).await; - client - .send_message_with_size((scale_up_threshold - 1) as usize) - .await - .unwrap(); - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - let second_report = proxy.get_report().await; - assert_eq!(ConnectionSearchState::Idle, second_report.connection_state); - assert_eq!( - DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections as usize, - second_report.num_connections - ); - assert_eq!(1, second_report.scale_up_attempt_count); - assert_eq!(1, second_report.restart_count); - - // Additional requests from the client should not cause additional scale up attempts - for _ in 0..5 { - client - .send_message_with_size((scale_up_threshold + 10) as usize) - .await - .unwrap(); - tokio::time::sleep(Duration::from_secs(1)).await; - } - - let third_report = proxy.get_report().await; - assert_eq!(ConnectionSearchState::Idle, third_report.connection_state); - assert_eq!( - DEFAULT_SCALE_UP_CONFIG.max_multiplexed_connections as usize, - third_report.num_connections - ); - assert_eq!(1, third_report.scale_up_attempt_count); - assert_eq!(1, third_report.restart_count); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_failed_too_many_retries(tls_enabled: bool) { - // Use a single partition so that the same PartitionId is return on each - // bind_client_to_partition request. This prevents a controller "reset", which simplifies - // testing that the proxy will retry scale up after the backoff time as elapsed. - // - let scale_up_threshold = 10; - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - - // Send an initial request in which the bind_client_to_partition request succeeds, and the - // main controller loop starts, but scale up is not requested - // - client - .send_message_with_size((scale_up_threshold - 1) as usize) - .await - .unwrap(); - - // Update the server to return BindResponse::RETRY until scale up attempt fails - service - .post_action(ServiceAction::SendRetries( - crate::connections::MAX_ATTEMPT_COUNT + 1, - )) - .await; - - // This request will cause the proxy to attempt scale up, in which bind_client_to_partition - // requests will fail - // - client.send_message_with_size(100).await.unwrap(); - - // Wait for scale up to fail - tokio::time::sleep(Duration::from_secs(5)).await; - - // Expect that scale up does not occur - proxy - .poll_scale_up() - .await - .expect_err("Unexpected scale up occured"); - - let report = proxy.get_report().await; - assert!(matches!( - report.connection_state, - ConnectionSearchState::Stop(_) - )); - - // Advance time and assert that scale up occurs after backoff duration elapsed - tokio::time::pause(); - tokio::time::advance( - DEFAULT_SCALE_UP_BACKOFF + Duration::from_secs(MULTIPLEX_CONNECTION_TIMEOUT_SEC), - ) - .await; - tokio::time::resume(); - - service.post_action(ServiceAction::EnableScaleUp).await; - client.send_message_with_size(100).await.unwrap(); - - proxy.poll_scale_up().await.expect("Scale up failed"); - - let connection_state = proxy.get_report().await.connection_state; - assert_eq!(ConnectionSearchState::Idle, connection_state); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_failed_retry_later(tls_enabled: bool) { - // Use a single partition so that the same PartitionId is return on each - // bind_client_to_partition request. This prevents a controller "reset", which simplifies - // testing that the proxy will retry scale up after the backoff time as elapsed. - // - let scale_up_threshold = 10; - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - scale_up_threshold, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - - // Send an initial request in which the bind_client_to_partition request succeeds, and the - // main controller loop starts, but scale up is not requested - // - client - .send_message_with_size((scale_up_threshold - 1) as usize) - .await - .unwrap(); - - // Update the server to return BindResponse::RETRY_LATER on the next bind_client_to_partition rpc - // request - // - service.post_action(ServiceAction::DisableScaleUp).await; - - // This request will cause the proxy to attempt scale up, in which bind_client_to_partition - // requests will fail - // - client - .send_message_with_size((scale_up_threshold) as usize) - .await - .unwrap(); - - // Expect that scale up does not occur - proxy - .poll_scale_up() - .await - .expect_err("Unexpected scale up occured"); - - let report = proxy.get_report().await; - assert!(matches!( - report.connection_state, - ConnectionSearchState::Stop(_) - )); - - // Advance time and assert that scale up occurs after backoff duration elapsed - tokio::time::pause(); - tokio::time::advance( - DEFAULT_SCALE_UP_BACKOFF + Duration::from_secs(MULTIPLEX_CONNECTION_TIMEOUT_SEC), - ) - .await; - tokio::time::resume(); - - service.post_action(ServiceAction::EnableScaleUp).await; - client - .send_message_with_size( - (scale_up_threshold * proxy::REPORT_INTERVAL_SECS as i32) as usize, - ) - .await - .unwrap(); - - proxy.poll_scale_up().await.expect("Scale up failed"); - - let connection_state = proxy.get_report().await.connection_state; - assert_eq!(ConnectionSearchState::Idle, connection_state); - - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[test_case(false; "tls disabled")] - #[tokio::test] - async fn test_scale_up_connection_usage(tls_enabled: bool) { - // Prevent controller reset after scale up by using existing partition - let service = TestService::new_with_partition_count_and_scale_up_threshold( - 1, - TestService::ALWAYS_SCALE_UP_THRESHOLD_BYTES_PER_SEC, - tls_enabled, - ) - .await; - - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - - proxy - .poll_scale_up() - .await - .expect("Timeout exceeded while awaiting scale up"); - - let request_to_send_per_connection = 10; - for _ in - 0..(request_to_send_per_connection * proxy.scale_up_config.max_multiplexed_connections) - { - client.send_message_with_size(10).await.unwrap(); - } - - // Check that requests are routed over multiple connections - let partition_id = proxy - .get_report() - .await - .partition_id - .expect("Missing PartitionId"); - - let request_counter = service.request_counter.lock().await; - let counts = request_counter - .get(&partition_id) - .expect("Missing request counts"); - - assert!(counts.len() >= proxy.scale_up_config.max_multiplexed_connections as usize); - for count in counts { - let operation_count = count.load(std::sync::atomic::Ordering::Acquire); - // Unused connections to a partition can be established during connection search. For - // this connections, the operation count will be 1 - // - assert!( - operation_count >= request_to_send_per_connection as u32 || operation_count == 1 - ); - } - - drop(request_counter); - service.shutdown().await; - } - - #[test_case(true; "tls enabled")] - #[tokio::test] - async fn test_efs_utils_port_test(tls_enabled: bool) { - let service = TestService::new(tls_enabled).await; - let mut proxy = ProxyUnderTest::new(tls_enabled, service.listen_port).await; - let mut port_health_check = TestClient::new(proxy.listen_port).await; - // Mimic efs-utils's port test which checks whether efs-proxy is alive. - port_health_check.stream.shutdown().await.unwrap(); - let mut client = TestClient::new(proxy.listen_port).await; - client.send_message_with_size(10).await.unwrap(); - client.send_message_with_size(1024).await.unwrap(); - - let report = proxy.get_report().await; - assert!(report.partition_id.is_some()); - - service.shutdown().await; - } -} diff --git a/src/proxy/src/efs_rpc.rs b/src/proxy/src/efs_rpc.rs index 199fb9e7..72fa789f 100644 --- a/src/proxy/src/efs_rpc.rs +++ b/src/proxy/src/efs_rpc.rs @@ -3,14 +3,13 @@ use tokio::io::AsyncWriteExt; use crate::connections::ProxyStream; use crate::efs_prot; -use crate::efs_prot::BindClientResponse; -use crate::efs_prot::OperationType; +use crate::efs_prot::{BindClientResponse, OperationType}; use crate::error::RpcError; use crate::proxy_identifier::ProxyIdentifier; use crate::rpc; -const PROGRAM_NUMBER: u32 = 100200; -const PROGRAM_VERSION: u32 = 1; +pub const EFS_PROGRAM_NUMBER: u32 = 100200; +pub const EFS_PROGRAM_VERSION: u32 = 1; #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)] pub struct PartitionId { @@ -42,8 +41,8 @@ pub fn create_bind_client_to_partition_request( xdr_codec::pack(&payload, &mut payload_buf)?; let call_body = onc_rpc::CallBody::new( - PROGRAM_NUMBER, - PROGRAM_VERSION, + EFS_PROGRAM_NUMBER, + EFS_PROGRAM_VERSION, OperationType::OP_BIND_CLIENT_TO_PARTITION as u32, onc_rpc::auth::AuthFlavor::AuthNone::>(None), onc_rpc::auth::AuthFlavor::AuthNone::>(None), @@ -86,128 +85,14 @@ pub fn parse_bind_client_to_partition_response( #[cfg(test)] pub mod tests { use super::*; - use crate::controller::tests::TestService; - use crate::controller::DEFAULT_SCALE_UP_CONFIG; use crate::efs_prot::BindResponse; - use crate::efs_prot::ScaleUpConfig; - use crate::tls::tests::get_client_config; + + use crate::controller::DEFAULT_SCALE_UP_CONFIG; + use crate::test_utils::*; use onc_rpc::{AuthError, RejectedReply}; - use rand::RngCore; - use s2n_tls_tokio::TlsConnector; - use tokio::net::TcpStream; const XID: u32 = 1; - pub fn parse_bind_client_to_partition_request( - request: &onc_rpc::RpcMessage<&[u8], &[u8]>, - ) -> Result { - let call_body = request.call_body().expect("not a call rpc"); - - if PROGRAM_NUMBER != call_body.program() || PROGRAM_VERSION != call_body.program_version() { - return Err(RpcError::GarbageArgs); - } - - let mut payload = Cursor::new(call_body.payload()); - let raw_proxy_id = xdr_codec::unpack::<_, efs_prot::ProxyIdentifier>(&mut payload)?; - - Ok(ProxyIdentifier { - uuid: uuid::Builder::from_bytes( - raw_proxy_id - .identifier - .try_into() - .expect("Failed not convert vec to sized array"), - ) - .into_uuid(), - incarnation: i64::from_be_bytes( - raw_proxy_id - .incarnation - .try_into() - .expect("Failed to convert vec to sized array"), - ), - }) - } - - pub fn create_bind_client_to_partition_response( - xid: u32, - bind_response: BindResponse, - scale_up_config: ScaleUpConfig, - ) -> Result, RpcError> { - let mut payload_buf = Vec::new(); - - let response = BindClientResponse { - bind_response, - scale_up_config, - }; - xdr_codec::pack(&response, &mut payload_buf)?; - - create_bind_client_to_partition_response_from_accepted_status( - xid, - onc_rpc::AcceptedStatus::Success(payload_buf), - ) - } - - pub fn create_bind_client_to_partition_response_from_accepted_status( - xid: u32, - accepted_status: onc_rpc::AcceptedStatus>, - ) -> Result, RpcError> { - let reply_body = onc_rpc::ReplyBody::Accepted(onc_rpc::AcceptedReply::new( - onc_rpc::auth::AuthFlavor::AuthNone::>(None), - accepted_status, - )); - - onc_rpc::RpcMessage::new(xid, onc_rpc::MessageType::Reply(reply_body)) - .serialise() - .map_err(|e| e.into()) - } - - fn generate_parse_bind_client_to_partition_response_result( - accepted_status: onc_rpc::AcceptedStatus>, - ) -> Result { - let response = - create_bind_client_to_partition_response_from_accepted_status(XID, accepted_status)?; - let deserialized = onc_rpc::RpcMessage::try_from(response.as_slice())?; - parse_bind_client_to_partition_response(&deserialized) - } - - pub fn generate_partition_id() -> efs_prot::PartitionId { - let mut bytes = [0u8; efs_prot::PARTITION_ID_LENGTH as usize]; - rand::thread_rng().fill_bytes(&mut bytes); - efs_prot::PartitionId(bytes) - } - - #[tokio::test] - async fn test_bind_client_to_partition() { - let server = TestService::new(true).await; - let tcp_stream = TcpStream::connect(("127.0.0.1", server.listen_port)) - .await - .expect("Could not connect to test server."); - - let connector = - TlsConnector::new(get_client_config().await.expect("Failed to read config")); - let mut tls_stream = connector - .connect("localhost", tcp_stream) - .await - .expect("Failed to establish TLS Connection"); - - let response = bind_client_to_partition(ProxyIdentifier::new(), &mut tls_stream) - .await - .expect("bind_client_to_partition request failed"); - - let partition_id = match response.bind_response { - BindResponse::READY(id) => PartitionId { id: id.0 }, - _ => panic!(), - }; - - assert_eq!( - server - .partition_ids - .get(1) - .expect("Service has no partition IDs"), - &partition_id - ); - server.shutdown().await; - } - #[test] fn test_request_serde() -> Result<(), RpcError> { let proxy_id = ProxyIdentifier::new(); diff --git a/src/proxy/src/lib.rs b/src/proxy/src/lib.rs index 008bcdf6..42111954 100644 --- a/src/proxy/src/lib.rs +++ b/src/proxy/src/lib.rs @@ -1,4 +1,30 @@ -//! One-sentence summary of your crate. -//! -//! Followed by more detailed Markdown documentation of your crate. -#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] +// EFS Proxy modules are made visible such that they can be reused in the Integration tests. +// EFS proxy Integration tests are implemented in a white box testing manner. +// We want to keep all the proxy internals visible and accessible. +// +#![warn(rust_2018_idioms)] + +pub mod config_parser; +pub mod connection_task; +pub mod connections; +pub mod controller; +pub mod efs_rpc; +pub mod error; +pub mod logger; +pub mod proxy; +pub mod proxy_identifier; +pub mod proxy_task; +pub mod rpc; +pub mod shutdown; +pub mod status_reporter; +pub mod test_utils; +pub mod tls; + +#[allow(clippy::all)] +#[allow(deprecated)] +#[allow(invalid_value)] +#[allow(non_camel_case_types)] +#[allow(unused_assignments)] +pub mod efs_prot { + include!(concat!(env!("OUT_DIR"), "/efs_prot_xdr.rs")); +} diff --git a/src/proxy/src/main.rs b/src/proxy/src/main.rs index a7c1e54b..92d4d1e4 100644 --- a/src/proxy/src/main.rs +++ b/src/proxy/src/main.rs @@ -1,5 +1,6 @@ use crate::config_parser::ProxyConfig; use crate::connections::{PlainTextPartitionFinder, TlsPartitionFinder}; +use crate::tls::get_tls_config; use crate::tls::TlsConfig; use clap::Parser; use controller::Controller; @@ -12,6 +13,7 @@ use tokio::sync::Mutex; use tokio_util::sync::CancellationToken; mod config_parser; +mod connection_task; mod connections; mod controller; mod efs_rpc; @@ -19,9 +21,11 @@ mod error; mod logger; mod proxy; mod proxy_identifier; +mod proxy_task; mod rpc; mod shutdown; mod status_reporter; +mod test_utils; mod tls; #[allow(clippy::all)] @@ -118,20 +122,6 @@ async fn write_pid_file(pid_file_path: &Path) -> Result<(), anyhow::Error> { Ok(()) } -async fn get_tls_config(proxy_config: &ProxyConfig) -> Result { - let tls_config = TlsConfig::new( - proxy_config.fips, - Path::new(&proxy_config.nested_config.ca_file), - Path::new(&proxy_config.nested_config.client_cert_pem_file), - Path::new(&proxy_config.nested_config.client_private_key_pem_file), - &proxy_config.nested_config.mount_target_addr, - &proxy_config.nested_config.expected_server_hostname_tls, - ) - .await; - let tls_config = tls_config?; - Ok(tls_config) -} - fn run_sighup_handler(proxy_config: ProxyConfig, tls_config: Arc>) { tokio::spawn(async move { let mut sighup_listener = match signal::unix::signal(signal::unix::SignalKind::hangup()) { diff --git a/src/proxy/src/proxy.rs b/src/proxy/src/proxy.rs index d686e144..1bb9c5f5 100644 --- a/src/proxy/src/proxy.rs +++ b/src/proxy/src/proxy.rs @@ -1,70 +1,23 @@ -use std::{ - error::Error, - marker::PhantomData, - sync::{atomic::AtomicU64, Arc}, - time::{Duration, Instant}, -}; +use std::{error::Error, marker::PhantomData, sync::Arc, time::Duration}; -use bytes::BytesMut; -use log::{debug, error, info, trace}; use tokio::{ - io::{split, AsyncReadExt, AsyncWriteExt, ReadHalf, WriteHalf}, - net::{ - tcp::{OwnedReadHalf, OwnedWriteHalf}, - TcpStream, - }, + net::TcpStream, sync::{ mpsc::{self}, Mutex, }, task::JoinHandle, }; -use tokio_util::sync::CancellationToken; -use crate::rpc::{RpcFragmentParseError, RPC_MAX_SIZE}; use crate::{ + connection_task::ConnectionTask, connections::ProxyStream, controller::Event, + proxy_task::{ConnectionMessage, ProxyTask}, rpc::RpcBatch, - shutdown::{ShutdownHandle, ShutdownReason}, + shutdown::ShutdownHandle, }; -pub const REPORT_INTERVAL_SECS: u64 = 3; - -#[derive(Copy, Clone, Debug)] -pub struct PerformanceStats { - _num_connections: usize, - pub read_bytes: u64, - pub write_bytes: u64, - pub time_delta: Duration, -} - -impl PerformanceStats { - pub fn new( - num_connections: usize, - read_bytes: u64, - write_bytes: u64, - time_delta: Duration, - ) -> Self { - PerformanceStats { - _num_connections: num_connections, - read_bytes, - write_bytes, - time_delta, - } - } - - // Return total throughput in bytes per second - pub fn get_total_throughput(&self) -> u64 { - let time_delta_seconds = self.time_delta.as_secs(); - if time_delta_seconds == 0 { - 0 - } else { - let total_bytes = self.read_bytes + self.write_bytes; - total_bytes / time_delta_seconds - } - } -} pub struct Proxy { partition_to_nfs_cli_queue: mpsc::Sender, partition_senders: Arc>>>, @@ -82,8 +35,11 @@ impl Proxy { notification_queue: mpsc::Sender>, shutdown: ShutdownHandle, ) -> Self { + // Channel for NFSServer -> NFSClient communication let (tx, rx) = mpsc::channel(64); + // tx is passed to ConnectionTasks, so each ConnectionTask will be reading from NFS socket + // and sending messages to NFSClient channel via tx let senders = partition_servers .into_iter() .map(|stream| Proxy::create_connection(stream, tx.clone(), shutdown.clone())) @@ -91,14 +47,16 @@ impl Proxy { let partition_senders = Arc::new(Mutex::new(senders)); - let proxy = ProxyTask::new( + // rx is passed to ProxyTask, so it can receive NFS response messages from ConnectionTask + // and write it to NFSClient socket + let proxy_task = ProxyTask::new( nfs_client, notification_queue, partition_senders.clone(), rx, shutdown.clone(), ); - let proxy_task_handle = tokio::spawn(proxy.run()); + let proxy_task_handle = tokio::spawn(proxy_task.run()); Self { partition_to_nfs_cli_queue: tx, partition_senders, @@ -141,385 +99,3 @@ impl Proxy { } } } - -const BUFFER_SIZE: usize = RPC_MAX_SIZE; - -struct ProxyTask { - nfs_client: TcpStream, - notification_queue: mpsc::Sender>, - partition_senders: Arc>>>, - response_queue: mpsc::Receiver, - shutdown: ShutdownHandle, -} - -enum ConnectionMessage { - Response(RpcBatch), -} - -impl ProxyTask { - pub fn new( - nfs_client: TcpStream, - notification_queue: mpsc::Sender>, - partition_senders: Arc>>>, - response_queue: mpsc::Receiver, - shutdown: ShutdownHandle, - ) -> Self { - Self { - nfs_client, - notification_queue, - partition_senders, - response_queue, - shutdown, - } - } - - async fn run(self) { - // Runs Proxy between NFS Client and the EFS Service. - // - // This function returns when it is cancelled by the `ShutdownHandle`, or if an error - // causes the `ProxyTask`'s `reader`, `writer`, or `reporter` task to return. In any of - // these cases, the `tokio::select!` block will cancel all of the tasks run by this object. - // - // An unused `mspc::Sender` is passed to each task spawned, so that we can await task - // shutdown with `mspc::Receiver::recv`. See https://tokio.rs/tokio/topics/shutdown. - - trace!("Starting proxy task"); - - let (shutdown_sender, mut shutdown_receiver) = mpsc::channel::(1); - - let write_byte_count = Arc::new(AtomicU64::new(0)); - let read_byte_count = Arc::new(AtomicU64::new(0)); - - let (read_half, write_half) = self.nfs_client.into_split(); - - let reader = Self::run_reader( - read_half, - read_byte_count.clone(), - self.partition_senders.clone(), - self.shutdown.clone(), - shutdown_sender.clone(), - ); - let shutdown = self.shutdown.clone(); - tokio::spawn(async move { - tokio::select! { - _ = reader => trace!("Proxy reader stopped"), - _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reader stopped by ShutdownHandle"), - } - }); - - let writer = Self::run_writer( - write_half, - write_byte_count.clone(), - self.response_queue, - self.shutdown.clone(), - shutdown_sender.clone(), - ); - let shutdown = self.shutdown.clone(); - tokio::spawn(async move { - tokio::select! { - _ = writer => trace!("Proxy writer stopped"), - _ = shutdown.cancellation_token.cancelled() => trace!("Proxy writer stopped by ShutdownHandle"), - } - }); - - let reporter = Self::run_reporter( - read_byte_count, - write_byte_count, - self.partition_senders.clone(), - self.notification_queue.clone(), - shutdown_sender.clone(), - ); - let shutdown = self.shutdown.clone(); - tokio::spawn(async move { - tokio::select! { - _ = reporter => trace!("Proxy reporter stopped"), - _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reporter stopped by ShutdownHandle"), - } - }); - - drop(shutdown_sender); - shutdown_receiver.recv().await; - } - - // NFS client to Proxy - async fn run_reader( - mut read_half: OwnedReadHalf, - read_count: Arc, - partition_senders: Arc>>>, - shutdown: ShutdownHandle, - _shutdown_sender: mpsc::Sender, - ) { - trace!("Starting proxy reader"); - let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); - let reason; - let mut next_conn = 0; - - loop { - match read_half.read_buf(&mut buffer).await { - Ok(n_read) => { - if n_read == 0 { - reason = Some(ShutdownReason::Unmount); - break; - } else { - read_count.fetch_add(n_read as u64, std::sync::atomic::Ordering::AcqRel); - } - } - Err(e) => { - info!("Error reading from NFS client {:?}", e); - reason = Some(ShutdownReason::Unmount); - break; - } - } - - match RpcBatch::parse_batch(&mut buffer) { - Ok(Some(batch)) => { - let f = partition_senders.lock().await; - let r = f[next_conn].send(batch).await; - next_conn = (next_conn + 1) % f.len(); - if let Err(e) = r { - debug!("Error sending RPC batch to connection task {:?}", e); - reason = Some(ShutdownReason::UnexpectedError); - break; - }; - } - Err(RpcFragmentParseError::InvalidSizeTooSmall) => { - drop(read_half); - error!("NFS Client Error: invalid RPC size - size too small"); - reason = Some(ShutdownReason::FrameSizeTooSmall); - break; - } - Err(RpcFragmentParseError::SizeLimitExceeded) => { - drop(read_half); - error!("NFS Client Error: invalid RPC size - size limit exceeded"); - reason = Some(ShutdownReason::FrameSizeExceeded); - break; - } - Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), - } - - if buffer.capacity() == 0 { - buffer.reserve(BUFFER_SIZE) - } - } - trace!("cli_to_server exiting!"); - shutdown.exit(reason).await; - } - - // Proxy to NFS Client - async fn run_writer( - mut write_half: OwnedWriteHalf, - write_count: Arc, - mut response_queue: mpsc::Receiver, - shutdown: ShutdownHandle, - _shutdown_sender: mpsc::Sender, - ) { - trace!("Starting proxy writer"); - - let mut reason = None; - loop { - match response_queue.recv().await { - Some(ConnectionMessage::Response(batch)) => { - let mut total_written = 0; - - for b in &batch.rpcs { - match write_half.write_all(b).await { - Ok(_) => total_written += b.len(), - Err(e) => { - debug!("Error writing to nfs_client. {:?}", e); - reason = Some(ShutdownReason::Unmount); - break; - } - }; - } - - write_count - .fetch_add(total_written as u64, std::sync::atomic::Ordering::AcqRel); - } - None => { - info!("Exiting server_to_cli"); - break; - } - } - } - shutdown.exit(reason).await; - } - - async fn run_reporter( - read_count: Arc, - write_count: Arc, - partition_senders: Arc>>>, - notification_queue: mpsc::Sender>, - _shutdown_sender: mpsc::Sender, - ) { - trace!("Starting reporter task"); - - let mut last = Instant::now(); - loop { - tokio::time::sleep(Duration::from_secs(REPORT_INTERVAL_SECS)).await; - - let num_connections; - { - let t = partition_senders.lock().await; - num_connections = t.len(); - drop(t); - } - - let now = Instant::now(); - let delta = now - last; - last = now; - let read = read_count.swap(0, std::sync::atomic::Ordering::AcqRel); - let write = write_count.swap(0, std::sync::atomic::Ordering::AcqRel); - let result = notification_queue - .send(Event::ProxyUpdate(PerformanceStats::new( - num_connections, - read, - write, - delta, - ))) - .await; - if result.is_err() { - break; - } - } - } -} - -struct ConnectionTask { - stream: S, - proxy_receiver: mpsc::Receiver, - proxy_sender: mpsc::Sender, -} - -impl ConnectionTask { - fn new( - stream: S, - proxy_receiver: mpsc::Receiver, - proxy_sender: mpsc::Sender, - ) -> Self { - Self { - stream, - proxy_receiver, - proxy_sender, - } - } - - async fn run(self, shutdown_handle: ShutdownHandle) { - let (r, w) = split(self.stream); - - let shutdown = shutdown_handle.clone(); - - // This CancellationToken facilitates graceful TLS connection closures by ensuring that - // that the ReadHalf is dropped only after the WriteHalf.shutdown() has returned - let connection_cancellation_token = CancellationToken::new(); - - let writer = Self::run_writer( - w, - self.proxy_receiver, - shutdown_handle.clone(), - connection_cancellation_token.clone(), - ); - tokio::spawn(async move { - tokio::select! { - _ = shutdown.cancellation_token.cancelled() => trace!("Cancelled"), - _ = writer => {}, - } - }); - - let reader = Self::run_reader(r, self.proxy_sender, shutdown_handle.clone()); - tokio::spawn(async move { - tokio::select! { - _ = connection_cancellation_token.cancelled() => trace!("Cancelled"), - _ = reader => {}, - } - }); - } - - // EFS to Proxy - async fn run_reader( - mut server_read_half: ReadHalf, - sender: mpsc::Sender, - shutdown: ShutdownHandle, - ) { - let reason; - let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); - loop { - match server_read_half.read_buf(&mut buffer).await { - Ok(n_read) => { - if n_read == 0 { - reason = Option::Some(ShutdownReason::NeedsRestart); - break; - } - } - Err(e) => { - debug!("Error reading from server: {:?}", e); - reason = Option::Some(ShutdownReason::NeedsRestart); - break; - } - }; - - match RpcBatch::parse_batch(&mut buffer) { - Ok(Some(batch)) => { - if let Err(e) = sender.send(ConnectionMessage::Response(batch)).await { - debug!("Error sending result back: {:?}", e); - reason = Some(ShutdownReason::UnexpectedError); - break; - } - } - Err(RpcFragmentParseError::InvalidSizeTooSmall) => { - drop(server_read_half); - error!("Server Error: invalid RPC size - size too small"); - reason = Some(ShutdownReason::UnexpectedError); - break; - } - Err(RpcFragmentParseError::SizeLimitExceeded) => { - drop(server_read_half); - error!("Server Error: invalid RPC size - size limit exceeded"); - reason = Some(ShutdownReason::UnexpectedError); - break; - } - Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), - } - - if buffer.capacity() == 0 { - buffer.reserve(BUFFER_SIZE) - } - } - shutdown.exit(reason).await; - } - - // Proxy to EFS - async fn run_writer( - mut server_write_half: WriteHalf, - mut receiver: mpsc::Receiver, - shutdown: ShutdownHandle, - connection_cancellation_token: CancellationToken, - ) { - let mut reason = Option::None; - loop { - let Some(batch) = receiver.recv().await else { - debug!("sender dropped"); - break; - }; - - for b in &batch.rpcs { - match server_write_half.write_all(b).await { - Ok(_) => (), - Err(e) => { - debug!("Error writing to server: {:?}", e); - reason = Option::Some(ShutdownReason::NeedsRestart); - break; - } - }; - } - } - - tokio::spawn(async move { - match server_write_half.shutdown().await { - Ok(_) => (), - Err(e) => debug!("Failed to gracefully shutdown connection: {}", e), - }; - connection_cancellation_token.cancel(); - }); - shutdown.exit(reason).await; - } -} diff --git a/src/proxy/src/proxy_task.rs b/src/proxy/src/proxy_task.rs new file mode 100644 index 00000000..95340b36 --- /dev/null +++ b/src/proxy/src/proxy_task.rs @@ -0,0 +1,310 @@ +use std::{ + sync::{atomic::AtomicU64, Arc}, + time::{Duration, Instant}, +}; + +use bytes::BytesMut; +use log::{debug, error, info, trace}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{ + tcp::{OwnedReadHalf, OwnedWriteHalf}, + TcpStream, + }, + sync::{ + mpsc::{self}, + Mutex, + }, +}; + +use crate::rpc::{RpcFragmentParseError, RPC_MAX_SIZE}; +use crate::{ + connections::ProxyStream, + controller::Event, + rpc::RpcBatch, + shutdown::{ShutdownHandle, ShutdownReason}, +}; + +#[derive(Copy, Clone, Debug)] +pub struct PerformanceStats { + _num_connections: usize, + pub read_bytes: u64, + pub write_bytes: u64, + pub time_delta: Duration, +} + +impl PerformanceStats { + pub fn new( + num_connections: usize, + read_bytes: u64, + write_bytes: u64, + time_delta: Duration, + ) -> Self { + PerformanceStats { + _num_connections: num_connections, + read_bytes, + write_bytes, + time_delta, + } + } + + // Return total throughput in bytes per second + pub fn get_total_throughput(&self) -> u64 { + let time_delta_seconds = self.time_delta.as_secs(); + if time_delta_seconds == 0 { + 0 + } else { + let total_bytes = self.read_bytes + self.write_bytes; + total_bytes / time_delta_seconds + } + } +} + +pub const BUFFER_SIZE: usize = RPC_MAX_SIZE; +pub const REPORT_INTERVAL_SECS: u64 = 3; + +pub struct ProxyTask { + nfs_client: TcpStream, + notification_queue: mpsc::Sender>, + partition_senders: Arc>>>, + response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, +} + +pub enum ConnectionMessage { + Response(RpcBatch), +} + +impl ProxyTask { + pub fn new( + nfs_client: TcpStream, + notification_queue: mpsc::Sender>, + partition_senders: Arc>>>, + response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, + ) -> Self { + Self { + nfs_client, + notification_queue, + partition_senders, + response_queue, + shutdown, + } + } + + pub async fn run(self) { + // Runs Proxy between NFS Client and the EFS Service. + // + // This function returns when it is cancelled by the `ShutdownHandle`, or if an error + // causes the `ProxyTask`'s `reader`, `writer`, or `reporter` task to return. In any of + // these cases, the `tokio::select!` block will cancel all of the tasks run by this object. + // + // An unused `mspc::Sender` is passed to each task spawned, so that we can await task + // shutdown with `mspc::Receiver::recv`. See https://tokio.rs/tokio/topics/shutdown. + + trace!("Starting proxy task"); + + let (shutdown_sender, mut shutdown_receiver) = mpsc::channel::(1); + + let write_byte_count = Arc::new(AtomicU64::new(0)); + let read_byte_count = Arc::new(AtomicU64::new(0)); + + let (read_half, write_half) = self.nfs_client.into_split(); + + // ProxyTask Reader reads NFS messages from NFSClient socket and sends it to ConnectionTask + let reader = Self::run_reader( + read_half, + read_byte_count.clone(), + self.partition_senders.clone(), + self.shutdown.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = reader => trace!("Proxy reader stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reader stopped by ShutdownHandle"), + } + }); + + // ProxyTask Writer takes items from NFSClient channel and writes to the NFSClient socket + let writer = Self::run_writer( + write_half, + write_byte_count.clone(), + self.response_queue, + self.shutdown.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = writer => trace!("Proxy writer stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy writer stopped by ShutdownHandle"), + } + }); + + let reporter = Self::run_reporter( + read_byte_count, + write_byte_count, + self.partition_senders.clone(), + self.notification_queue.clone(), + shutdown_sender.clone(), + ); + let shutdown = self.shutdown.clone(); + tokio::spawn(async move { + tokio::select! { + _ = reporter => trace!("Proxy reporter stopped"), + _ = shutdown.cancellation_token.cancelled() => trace!("Proxy reporter stopped by ShutdownHandle"), + } + }); + + drop(shutdown_sender); + shutdown_receiver.recv().await; + } + + // NFS client to Proxy + async fn run_reader( + mut read_half: OwnedReadHalf, + read_count: Arc, + partition_senders: Arc>>>, + shutdown: ShutdownHandle, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting proxy reader"); + let mut buffer = BytesMut::with_capacity(BUFFER_SIZE); + let reason; + let mut next_conn = 0; + + loop { + // Read data from NFSClient socket + match read_half.read_buf(&mut buffer).await { + Ok(n_read) => { + if n_read == 0 { + reason = Some(ShutdownReason::Unmount); + break; + } else { + read_count.fetch_add(n_read as u64, std::sync::atomic::Ordering::AcqRel); + } + } + Err(e) => { + info!("Error reading from NFS client {:?}", e); + reason = Some(ShutdownReason::Unmount); + break; + } + } + + // Parse message and send to particular connection's channel + match RpcBatch::parse_batch(&mut buffer) { + Ok(Some(batch)) => { + let f = partition_senders.lock().await; + let r = f[next_conn].send(batch).await; + + // select connection via round-robin + next_conn = (next_conn + 1) % f.len(); + if let Err(e) = r { + debug!("Error sending RPC batch to connection task {:?}", e); + reason = Some(ShutdownReason::UnexpectedError); + break; + }; + } + Err(RpcFragmentParseError::InvalidSizeTooSmall) => { + drop(read_half); + error!("NFS Client Error: invalid RPC size - size too small"); + reason = Some(ShutdownReason::FrameSizeTooSmall); + break; + } + Err(RpcFragmentParseError::SizeLimitExceeded) => { + drop(read_half); + error!("NFS Client Error: invalid RPC size - size limit exceeded"); + reason = Some(ShutdownReason::FrameSizeExceeded); + break; + } + Ok(None) | Err(RpcFragmentParseError::Incomplete) => (), + } + + if buffer.capacity() == 0 { + buffer.reserve(BUFFER_SIZE) + } + } + trace!("cli_to_server exiting!"); + shutdown.exit(reason).await; + } + + // Proxy to NFS Client + async fn run_writer( + mut write_half: OwnedWriteHalf, + write_count: Arc, + mut response_queue: mpsc::Receiver, + shutdown: ShutdownHandle, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting proxy writer"); + + let mut reason = None; + loop { + match response_queue.recv().await { + Some(ConnectionMessage::Response(batch)) => { + let mut total_written = 0; + + for b in &batch.rpcs { + match write_half.write_all(b).await { + Ok(_) => total_written += b.len(), + Err(e) => { + debug!("Error writing to nfs_client. {:?}", e); + reason = Some(ShutdownReason::Unmount); + break; + } + }; + } + + write_count + .fetch_add(total_written as u64, std::sync::atomic::Ordering::AcqRel); + } + None => { + info!("Exiting server_to_cli"); + break; + } + } + } + shutdown.exit(reason).await; + } + + async fn run_reporter( + read_count: Arc, + write_count: Arc, + partition_senders: Arc>>>, + notification_queue: mpsc::Sender>, + _shutdown_sender: mpsc::Sender, + ) { + trace!("Starting reporter task"); + + let mut last = Instant::now(); + loop { + tokio::time::sleep(Duration::from_secs(REPORT_INTERVAL_SECS)).await; + + let num_connections; + { + let t = partition_senders.lock().await; + num_connections = t.len(); + drop(t); + } + + let now = Instant::now(); + let delta = now - last; + last = now; + let read = read_count.swap(0, std::sync::atomic::Ordering::AcqRel); + let write = write_count.swap(0, std::sync::atomic::Ordering::AcqRel); + let result = notification_queue + .send(Event::ProxyUpdate(PerformanceStats::new( + num_connections, + read, + write, + delta, + ))) + .await; + if result.is_err() { + break; + } + } + } +} diff --git a/src/proxy/src/rpc.rs b/src/proxy/src/rpc.rs index f167839d..19008912 100644 --- a/src/proxy/src/rpc.rs +++ b/src/proxy/src/rpc.rs @@ -101,46 +101,10 @@ fn extract_u32_from_bytes(header: &[u8]) -> u32 { #[cfg(test)] pub mod test { - use crate::rpc::RPC_MAX_SIZE; + use crate::{rpc::RPC_MAX_SIZE, test_utils::generate_rpc_msg_fragments}; use super::{RpcBatch, RpcFragmentParseError, RPC_HEADER_SIZE, RPC_LAST_FRAG}; use bytes::{BufMut, BytesMut}; - use rand::Rng; - - // Generates message fragments for tests - // - // This function generates a set of message fragments from random data. The fragments are constructed - // in a way that they can be later assembled into the full long message data - // function. - // - // # Arguments - // * `size` - The total size of the message. - // * `num_fragments` - The number of fragments to generate. - // - pub fn generate_msg_fragments(size: usize, num_fragments: usize) -> (bytes::BytesMut, Vec) { - let mut rng = rand::thread_rng(); - let data: Vec = (0..size).map(|_| rng.gen()).collect(); - - let fragment_data_size = data.len() / num_fragments; - - let mut data_buffer = bytes::BytesMut::new(); - for i in 0..num_fragments { - let start_idx = i * fragment_data_size; - let end_idx = std::cmp::min(size, start_idx + fragment_data_size); - let fragment_data = &data[start_idx..end_idx]; - - let mut header = (end_idx - start_idx) as u32; - if end_idx == size { - header |= 1 << 31; - } - - data_buffer.extend_from_slice(&header.to_be_bytes()); - data_buffer.extend_from_slice(fragment_data); - } - assert_eq!(data_buffer.len(), (num_fragments * 4) + data.len()); - - (data_buffer, data) - } #[test] fn multiple_messages() { @@ -167,7 +131,7 @@ pub mod test { #[test] fn test_invalid_rpc_small_fragment() { let num_fragments = 1; - let (mut input_buffer, _) = generate_msg_fragments(1, num_fragments); + let (mut input_buffer, _) = generate_rpc_msg_fragments(1, num_fragments); let result = RpcBatch::parse_batch(&mut input_buffer); assert!(matches!( result, @@ -178,7 +142,7 @@ pub mod test { #[test] fn test_invalid_rpc_big_fragment() { let num_fragments = 1; - let (mut input_buffer, _) = generate_msg_fragments(RPC_MAX_SIZE + 1, num_fragments); + let (mut input_buffer, _) = generate_rpc_msg_fragments(RPC_MAX_SIZE + 1, num_fragments); let result = RpcBatch::parse_batch(&mut input_buffer); assert!(matches!( result, @@ -191,7 +155,7 @@ pub mod test { // Create an input buffer with multiple RPC fragments let num_fragments = 3; let message_size = 12; - let (mut input_buffer, _) = generate_msg_fragments(message_size, num_fragments); + let (mut input_buffer, _) = generate_rpc_msg_fragments(message_size, num_fragments); let mut rpc_batch = RpcBatch::parse_batch(&mut input_buffer) .expect("parse batch failed") .expect("no rpc messages found"); @@ -208,15 +172,15 @@ pub mod test { // Create an input buffer with multiple RPC messages let num_fragments_1 = 3; let message_size_1 = 12; - let (mut input_buffer, _) = generate_msg_fragments(message_size_1, num_fragments_1); + let (mut input_buffer, _) = generate_rpc_msg_fragments(message_size_1, num_fragments_1); let num_fragments_2 = 6; let message_size_2 = 24; - let (input_buffer_2, _) = generate_msg_fragments(message_size_2, num_fragments_2); + let (input_buffer_2, _) = generate_rpc_msg_fragments(message_size_2, num_fragments_2); let num_fragments_3 = 1; let message_size_3 = 50; - let (input_buffer_3, _) = generate_msg_fragments(message_size_3, num_fragments_3); + let (input_buffer_3, _) = generate_rpc_msg_fragments(message_size_3, num_fragments_3); input_buffer.extend_from_slice(&input_buffer_2); input_buffer.extend_from_slice(&input_buffer_3); diff --git a/src/proxy/src/status_reporter.rs b/src/proxy/src/status_reporter.rs index 4aa77432..b53d8bce 100644 --- a/src/proxy/src/status_reporter.rs +++ b/src/proxy/src/status_reporter.rs @@ -1,6 +1,6 @@ use crate::controller::ConnectionSearchState; use crate::efs_rpc::PartitionId; -use crate::{proxy::PerformanceStats, proxy_identifier::ProxyIdentifier}; +use crate::{proxy_identifier::ProxyIdentifier, proxy_task::PerformanceStats}; use anyhow::{Error, Result}; use tokio::sync::mpsc::{self, Receiver, Sender}; use tokio::time::Instant; diff --git a/src/proxy/src/test_utils.rs b/src/proxy/src/test_utils.rs new file mode 100644 index 00000000..9d21b2f2 --- /dev/null +++ b/src/proxy/src/test_utils.rs @@ -0,0 +1,174 @@ +// Testing utility used for both unit and integration tests. +// + +// Using #[allow(dead_code)] is a common and acceptable practice for test utility functions. +#![allow(dead_code)] + +use crate::{ + config_parser::ProxyConfig, + efs_prot::{self, BindClientResponse, BindResponse, ScaleUpConfig}, + efs_rpc::{parse_bind_client_to_partition_response, EFS_PROGRAM_NUMBER, EFS_PROGRAM_VERSION}, + error::RpcError, + proxy_identifier::ProxyIdentifier, + tls::{create_config_builder, InsecureAcceptAllCertificatesHandler, TlsConfig}, +}; +use anyhow::Result; +use rand::{Rng, RngCore}; +use s2n_tls::config::Config; +use std::{io::Cursor, path::Path}; +use tokio::net::TcpListener; + +// Proxy Configuration testing utils +// + +pub static TEST_CONFIG_PATH: &str = "tests/certs/test_config.ini"; +const XID: u32 = 1; + +pub fn get_test_config() -> ProxyConfig { + ProxyConfig::from_path(Path::new(TEST_CONFIG_PATH)).expect("Could not parse test config.") +} + +pub async fn get_client_config() -> Result { + let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; + let builder = create_config_builder(&tls_config); + + let config = builder.build()?; + Ok(config) +} + +pub async fn get_server_config() -> Result { + let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; + let mut builder = create_config_builder(&tls_config); + + // Accept all client certificates + builder.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?; + + let config = builder.build()?; + Ok(config) +} + +pub async fn find_available_port() -> (TcpListener, u16) { + for port in 10000..15000 { + match TcpListener::bind(("127.0.0.1", port)).await { + Ok(v) => { + return (v, port); + } + Err(_) => continue, + } + } + panic!("Failed to find port"); +} + +/// generate_rpc_msg_fragments: Generates message fragments for tests +/// +/// This function generates a set of message fragments from random data. The fragments are constructed +/// in a way that they can be later assembled into the full long message data +/// function. +/// +/// # Arguments +/// * `size` - The total size of the message. +/// * `num_fragments` - The number of fragments to generate. +/// +pub fn generate_rpc_msg_fragments(size: usize, num_fragments: usize) -> (bytes::BytesMut, Vec) { + let mut rng = rand::thread_rng(); + let data: Vec = (0..size).map(|_| rng.gen()).collect(); + + let fragment_data_size = data.len() / num_fragments; + + let mut data_buffer = bytes::BytesMut::new(); + for i in 0..num_fragments { + let start_idx = i * fragment_data_size; + let end_idx = std::cmp::min(size, start_idx + fragment_data_size); + let fragment_data = &data[start_idx..end_idx]; + + let mut header = (end_idx - start_idx) as u32; + if end_idx == size { + header |= 1 << 31; + } + + data_buffer.extend_from_slice(&header.to_be_bytes()); + data_buffer.extend_from_slice(fragment_data); + } + assert_eq!(data_buffer.len(), (num_fragments * 4) + data.len()); + + (data_buffer, data) +} + +pub fn generate_partition_id() -> efs_prot::PartitionId { + let mut bytes = [0u8; efs_prot::PARTITION_ID_LENGTH as usize]; + rand::thread_rng().fill_bytes(&mut bytes); + efs_prot::PartitionId(bytes) +} + +pub fn parse_bind_client_to_partition_request( + request: &onc_rpc::RpcMessage<&[u8], &[u8]>, +) -> Result { + let call_body = request.call_body().expect("not a call rpc"); + + if EFS_PROGRAM_NUMBER != call_body.program() + || EFS_PROGRAM_VERSION != call_body.program_version() + { + return Err(RpcError::GarbageArgs); + } + + let mut payload = Cursor::new(call_body.payload()); + let raw_proxy_id = xdr_codec::unpack::<_, efs_prot::ProxyIdentifier>(&mut payload)?; + + Ok(ProxyIdentifier { + uuid: uuid::Builder::from_bytes( + raw_proxy_id + .identifier + .try_into() + .expect("Failed not convert vec to sized array"), + ) + .into_uuid(), + incarnation: i64::from_be_bytes( + raw_proxy_id + .incarnation + .try_into() + .expect("Failed to convert vec to sized array"), + ), + }) +} + +pub fn create_bind_client_to_partition_response( + xid: u32, + bind_response: BindResponse, + scale_up_config: ScaleUpConfig, +) -> Result, RpcError> { + let mut payload_buf = Vec::new(); + + let response = BindClientResponse { + bind_response, + scale_up_config, + }; + xdr_codec::pack(&response, &mut payload_buf)?; + + create_bind_client_to_partition_response_from_accepted_status( + xid, + onc_rpc::AcceptedStatus::Success(payload_buf), + ) +} + +pub fn create_bind_client_to_partition_response_from_accepted_status( + xid: u32, + accepted_status: onc_rpc::AcceptedStatus>, +) -> Result, RpcError> { + let reply_body = onc_rpc::ReplyBody::Accepted(onc_rpc::AcceptedReply::new( + onc_rpc::auth::AuthFlavor::AuthNone::>(None), + accepted_status, + )); + + onc_rpc::RpcMessage::new(xid, onc_rpc::MessageType::Reply(reply_body)) + .serialise() + .map_err(|e| e.into()) +} + +pub fn generate_parse_bind_client_to_partition_response_result( + accepted_status: onc_rpc::AcceptedStatus>, +) -> Result { + let response = + create_bind_client_to_partition_response_from_accepted_status(XID, accepted_status)?; + let deserialized = onc_rpc::RpcMessage::try_from(response.as_slice())?; + parse_bind_client_to_partition_response(&deserialized) +} diff --git a/src/proxy/src/tls.rs b/src/proxy/src/tls.rs index c91fe1fd..b25d9cdd 100644 --- a/src/proxy/src/tls.rs +++ b/src/proxy/src/tls.rs @@ -9,6 +9,7 @@ use s2n_tls_tokio::TlsStream; use std::path::Path; use tokio::net::TcpStream; +use crate::config_parser::ProxyConfig; use crate::connections::configure_stream; use crate::error::ConnectError; @@ -40,6 +41,20 @@ pub struct TlsConfig { pub server_domain: String, } +pub async fn get_tls_config(proxy_config: &ProxyConfig) -> Result { + let tls_config = TlsConfig::new( + proxy_config.fips, + Path::new(&proxy_config.nested_config.ca_file), + Path::new(&proxy_config.nested_config.client_cert_pem_file), + Path::new(&proxy_config.nested_config.client_private_key_pem_file), + &proxy_config.nested_config.mount_target_addr, + &proxy_config.nested_config.expected_server_hostname_tls, + ) + .await; + let tls_config = tls_config?; + Ok(tls_config) +} + // s2n-tls errors if there are comments in the certificate files. This function removes comments if // they are present. async fn read_file_with_comments_removed(path: &Path) -> Result> { @@ -108,8 +123,7 @@ impl TlsConfig { }) } - #[cfg(test)] - pub async fn new_from_config(config: &crate::ProxyConfig) -> Result { + pub async fn new_from_config(config: &ProxyConfig) -> Result { let efs_config = &config.nested_config; let ca_file = Path::new(&efs_config.ca_file); @@ -149,7 +163,7 @@ pub async fn establish_tls_stream( Ok(tls_stream) } -fn create_config_builder(tls_config: &TlsConfig) -> s2n_tls::config::Builder { +pub fn create_config_builder(tls_config: &TlsConfig) -> s2n_tls::config::Builder { let mut config = Config::builder(); let policy = if tls_config.fips_enabled { @@ -194,30 +208,8 @@ fn create_config_builder(tls_config: &TlsConfig) -> s2n_tls::config::Builder { #[cfg(test)] pub mod tests { - - use crate::config_parser::tests::get_test_config; - use super::*; - pub async fn get_client_config() -> Result { - let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; - let builder = create_config_builder(&tls_config); - - let config = builder.build()?; - Ok(config) - } - - pub async fn get_server_config() -> Result { - let tls_config = TlsConfig::new_from_config(&get_test_config()).await?; - let mut builder = create_config_builder(&tls_config); - - // Accept all client certificates - builder.set_verify_host_callback(InsecureAcceptAllCertificatesHandler {})?; - - let config = builder.build()?; - Ok(config) - } - #[tokio::test] async fn test_remove_comments() { let comment_file = Path::new("tests/certs/cert_with_comments.pem"); diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 465a396c..3db23757 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.2.1" +VERSION = "2.3.0" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -210,6 +210,8 @@ def get_aws_security_credentials(config, credentials_source, region): return get_aws_security_credentials_from_file("config", value) elif method == "ecs": return get_aws_security_credentials_from_ecs(config, value) + elif method == "podidentity": + return get_aws_security_credentials_from_pod_identity(config, value) elif method == "webidentity": return get_aws_security_credentials_from_webidentity( config, *(value.split(",")), region=region @@ -376,6 +378,46 @@ def get_aws_security_credentials_from_ecs(config, uri): return None +def get_aws_security_credentials_from_pod_identity(config, value): + dict_keys = ["AccessKeyId", "SecretAccessKey", "Token"] + + try: + creds_uri, token_file = value.split(",") + except ValueError: + logging.info("Invalid Aws Container Auth token URI format") + return None + + try: + with open(token_file, "r") as f: + token = f.read().strip() + if "\r" in token or "\n" in token: + logging.error("AWS Container Auth Token contains invalid characters") + return None + except Exception as e: + logging.error("Error reading token file %s: %s", token_file, e) + return None + + unsuccessful_resp = ( + f"Unsuccessful retrieval of AWS security credentials at {creds_uri}" + ) + url_error_msg = f"Unable to reach {creds_uri} to retrieve AWS security credentials" + + pod_identity_security_dict = url_request_helper( + config, + creds_uri, + unsuccessful_resp, + url_error_msg, + headers={"Authorization": token}, + ) + + if pod_identity_security_dict and all( + k in pod_identity_security_dict for k in dict_keys + ): + return pod_identity_security_dict + + return None + + def get_aws_security_credentials_from_webidentity(config, role_arn, token_file, region): try: with open(token_file, "r") as f: @@ -450,7 +492,8 @@ def get_mount_config(config, region, config_name): return config.get(MOUNT_CONFIG_SECTION, config_name) except NoOptionError: fatal_error( - "Error retrieving config. Please set the {} configuration in efs-utils.conf".format(config_name) + f"Error retrieving config. Please set the {config_name} configuration " + "in efs-utils.conf" ) diff --git a/test/mount_efs_test/test_get_aws_security_credentials.py b/test/mount_efs_test/test_get_aws_security_credentials.py index e931f32e..3e75014e 100644 --- a/test/mount_efs_test/test_get_aws_security_credentials.py +++ b/test/mount_efs_test/test_get_aws_security_credentials.py @@ -44,6 +44,13 @@ WEB_IDENTITY_ROLE_ARN = "FAKE_ROLE_ARN" WEB_IDENTITY_TOKEN_FILE = "WEB_IDENTITY_TOKEN_FILE" +AWS_CONTAINER_CREDS_FULL_URI_ENV = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +AWS_CONTAINER_AUTH_TOKEN_FILE_ENV = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE" +POD_IDENTITY_CREDS_URI = "http://169.254.170.23/v1/credentials" +POD_IDENTITY_TOKEN_FILE = ( + "/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token" +) + class MockHeaders(object): def __init__(self, content_charset=None): @@ -520,3 +527,62 @@ def test_get_aws_security_credentials_from_webidentity_passed_in_one_param( "from ECS credentials relative uri, or from the instance security credentials service" in err ) + + +def test_get_aws_security_credentials_pod_identity(mocker): + config = get_fake_config() + token_content = "fake-token" + response = json.dumps( + { + "AccessKeyId": ACCESS_KEY_ID_VAL, + "SecretAccessKey": SECRET_ACCESS_KEY_VAL, + "Token": SESSION_TOKEN_VAL, + } + ) + + mocker.patch.dict( + os.environ, + { + AWS_CONTAINER_CREDS_FULL_URI_ENV: POD_IDENTITY_CREDS_URI, + AWS_CONTAINER_AUTH_TOKEN_FILE_ENV: POD_IDENTITY_TOKEN_FILE, + }, + ) + + mock_open = mocker.patch("builtins.open", mocker.mock_open(read_data=token_content)) + + mocker.patch("mount_efs.url_request_helper", return_value=json.loads(response)) + mocker.patch("os.path.exists", return_value=False) + mocker.patch("mount_efs.get_iam_role_name", return_value=None) + + credentials, credentials_source = mount_efs.get_aws_security_credentials( + config, True, "us-east-1" + ) + + assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL + assert credentials["SecretAccessKey"] == SECRET_ACCESS_KEY_VAL + assert credentials["Token"] == SESSION_TOKEN_VAL + assert ( + credentials_source + == f"podidentity:{POD_IDENTITY_CREDS_URI},{POD_IDENTITY_TOKEN_FILE}" + ) + + +def test_get_aws_security_credentials_pod_identity_invalid_token_file(mocker): + config = get_fake_config() + creds_uri = "http://169.254.170.23/v1/credentials" + token_file = "/nonexistent/file" + + mocker.patch.dict( + os.environ, + { + AWS_CONTAINER_CREDS_FULL_URI_ENV: creds_uri, + AWS_CONTAINER_AUTH_TOKEN_FILE_ENV: token_file, + }, + ) + + mocker.patch("builtins.open", side_effect=IOError("File not found")) + + with pytest.raises(SystemExit) as ex: + mount_efs.get_aws_security_credentials(config, True, "us-east-1") + + assert ex.value.code == 1 diff --git a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py index c49dc6dd..1808902e 100644 --- a/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_dns_name_and_fallback_mount_target_ip_address.py @@ -43,11 +43,18 @@ MOCK_EFS_AGENT = "fake-efs-client" MOCK_EC2_AGENT = "fake-ec2-client" +TEST_SOCKET_GET_ADDR_INFO_RETURN = [ + (socket.AF_INET, socket.SOCK_STREAM, 6, "", ("93.184.216.34", 80)) +] + @pytest.fixture(autouse=True) def setup(mocker): mocker.patch("mount_efs.get_target_region", return_value=DEFAULT_REGION) - mocker.patch("socket.gethostbyname") + mocker.patch( + "socket.getaddrinfo", + return_value=TEST_SOCKET_GET_ADDR_INFO_RETURN, + ) def _get_mock_config( @@ -84,7 +91,7 @@ def test_get_dns_name_and_fallback_mount_target_ip_address(): ) assert "%s.efs.%s.amazonaws.com" % (FS_ID, DEFAULT_REGION) == dns_name - assert None == ip_address + assert not ip_address def test_get_dns_name_with_az_in_options(): @@ -221,7 +228,7 @@ def test_get_dns_name_bad_format_too_many_specifiers_2(): def test_get_dns_name_unresolvable(mocker, capsys): config = _get_mock_config() - mocker.patch("socket.gethostbyname", side_effect=socket.gaierror) + mocker.patch("socket.getaddrinfo", side_effect=socket.gaierror) with pytest.raises(SystemExit) as ex: mount_efs.get_dns_name_and_fallback_mount_target_ip_address( @@ -288,17 +295,15 @@ def test_get_dns_name_region_in_suffix(mocker): def test_dns_name_can_be_resolved_dns_resolve_failure(mocker): - dns_mock = mocker.patch("socket.gethostbyname", side_effect=socket.gaierror) + dns_mock = mocker.patch("socket.getaddrinfo", side_effect=socket.gaierror) result = mount_efs.dns_name_can_be_resolved(DNS_NAME) - assert False == result + assert not result utils.assert_called(dns_mock) -def test_dns_name_can_be_resolved_dns_resolve_succeed(mocker): - dns_mock = mocker.patch("socket.gethostbyname") +def test_dns_name_can_be_resolved_dns_resolve_succeed(): result = mount_efs.dns_name_can_be_resolved(DNS_NAME) assert True == result - utils.assert_called(dns_mock) def test_get_dns_name_and_fall_back_ip_address_success(mocker): @@ -307,7 +312,7 @@ def test_get_dns_name_and_fall_back_ip_address_success(mocker): """ config = _get_mock_config() - dns_mock = mocker.patch("socket.gethostbyname", side_effect=socket.gaierror) + dns_mock = mocker.patch("socket.getaddrinfo", side_effect=socket.gaierror) get_fallback_mount_target_ip_mock = mocker.patch( "mount_efs.get_fallback_mount_target_ip_address", return_value=IP_ADDRESS ) @@ -329,7 +334,7 @@ def test_get_dns_name_and_mount_target_ip_address_via_option_success(mocker): """ config = _get_mock_config() - dns_mock = mocker.patch("socket.gethostbyname") + dns_mock = mocker.patch("socket.getaddrinfo") get_fallback_mount_target_ip_mock = mocker.patch( "mount_efs.get_fallback_mount_target_ip_address" ) @@ -355,7 +360,7 @@ def test_get_dns_name_and_mount_target_ip_address_via_option_failure(mocker, cap """ config = _get_mock_config() - dns_mock = mocker.patch("socket.gethostbyname") + dns_mock = mocker.patch("socket.getaddrinfo") get_fallback_mount_target_ip_mock = mocker.patch( "mount_efs.get_fallback_mount_target_ip_address" ) @@ -388,7 +393,7 @@ def test_get_dns_name_and_fall_back_ip_address_failure(mocker, capsys): """ config = _get_mock_config() - dns_mock = mocker.patch("socket.gethostbyname", side_effect=socket.gaierror) + dns_mock = mocker.patch("socket.getaddrinfo", side_effect=socket.gaierror) get_fallback_mount_target_ip_mock = mocker.patch( "mount_efs.get_fallback_mount_target_ip_address", side_effect=mount_efs.FallbackException("timeout"), @@ -406,3 +411,19 @@ def test_get_dns_name_and_fall_back_ip_address_failure(mocker, capsys): utils.assert_called(dns_mock) utils.assert_called(get_fallback_mount_target_ip_mock) + + +def test_get_dns_name_with_ipv6_in_options(mocker): + config = _get_mock_config() + ipv6_address = "2001:0db8:85a3:0000:0000:8a2e:0370:7334" + options_with_ipv6 = {"mounttargetip": ipv6_address} + ip_address_connect_mock = mocker.patch( + "mount_efs.mount_target_ip_address_can_be_resolved", return_value=True + ) + dns_name, ip_address = mount_efs.get_dns_name_and_fallback_mount_target_ip_address( + config, FS_ID, options_with_ipv6 + ) + + assert "%s.efs.%s.amazonaws.com" % (FS_ID, DEFAULT_REGION) == dns_name + assert ipv6_address == ip_address + utils.assert_called(ip_address_connect_mock) diff --git a/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py b/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py index 1b7bd8a7..ef34b91c 100644 --- a/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py +++ b/test/mount_efs_test/test_get_fallback_mount_target_ip_address.py @@ -4,6 +4,7 @@ # for the specific language governing permissions and limitations under # the License. +import ipaddress import socket import pytest @@ -328,3 +329,26 @@ def test_get_dns_name_and_fall_back_ip_address_cannot_be_resolved(mocker, capsys utils.assert_called(check_fallback_enabled_mock) utils.assert_called(get_fallback_mount_target_ip_mock) utils.assert_called(check_ip_resolve_mock) + + +def test_get_fallback_mount_target_ip_address_helper_prefer_ipv4(mocker): + config = _get_mock_config() + + get_botocore_client_mock = mocker.patch( + "mount_efs.get_botocore_client", side_effect=[MOCK_EFS_AGENT, MOCK_EC2_AGENT] + ) + + ipv4_address = "127:0:0:1" + ipv6_address = "2001:db8:3333:4444:5555:6666:7777:8888" + mount_target_info = {"IpAddress": ipv4_address, "Ipv6Address": ipv6_address} + get_mount_target_az_mock = mocker.patch( + "mount_efs.get_mount_target_in_az", + return_value=mount_target_info, + ) + + options = {} + ip_address = mount_efs.get_fallback_mount_target_ip_address_helper( + config, options, FS_ID + ) + + assert ip_address == ipv4_address diff --git a/test/mount_efs_test/test_helper_function.py b/test/mount_efs_test/test_helper_function.py index cbd5e76a..2f10bf59 100644 --- a/test/mount_efs_test/test_helper_function.py +++ b/test/mount_efs_test/test_helper_function.py @@ -5,10 +5,11 @@ # the License. import logging +import socket import sys import unittest from collections import namedtuple -from unittest.mock import MagicMock, mock_open +from unittest.mock import MagicMock, mock_open, patch import pytest from botocore.exceptions import ProfileNotFound @@ -538,3 +539,25 @@ def test_stunnel5_non_al2(mocker): args, _ = check_output_mock.call_args args = args[0] assert "stunnel" == args[1] + + +def test_get_ipv6_addresses_success(): + hostname = "example.com" + mock_addrinfo = [ + (None, None, None, None, ("2001:db8::1", None, None, None)), + (None, None, None, None, ("2001:db8::2", None, None, None)), + ] + + with patch("socket.getaddrinfo", return_value=mock_addrinfo): + result = mount_efs.get_ipv6_addresses(hostname) + + assert result == ["2001:db8::1", "2001:db8::2"] + + +def test_get_ipv6_addresses_no_ipv6(): + hostname = "example.com" + + with patch("socket.getaddrinfo", side_effect=socket.gaierror): + result = mount_efs.get_ipv6_addresses(hostname) + + assert result == [] diff --git a/test/mount_efs_test/test_match_device.py b/test/mount_efs_test/test_match_device.py index 157447df..9eccea4f 100644 --- a/test/mount_efs_test/test_match_device.py +++ b/test/mount_efs_test/test_match_device.py @@ -52,12 +52,18 @@ DEFAULT_NFS_OPTIONS = {} FS_ID = "fs-deadbeef" OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} +TEST_SOCKET_GET_ADDR_INFO_RETURN = [ + (socket.AF_INET, socket.SOCK_STREAM, 6, "", ("93.184.216.34", 80)) +] @pytest.fixture(autouse=True) def setup(mocker): mocker.patch("mount_efs.get_target_region", return_value=DEFAULT_REGION) - mocker.patch("socket.gethostbyname") + mocker.patch( + "socket.getaddrinfo", + return_value=TEST_SOCKET_GET_ADDR_INFO_RETURN, + ) def _get_mock_config( diff --git a/test/mount_efs_test/test_write_stunnel_config_file.py b/test/mount_efs_test/test_write_stunnel_config_file.py index 7bb5e267..feaaa83d 100644 --- a/test/mount_efs_test/test_write_stunnel_config_file.py +++ b/test/mount_efs_test/test_write_stunnel_config_file.py @@ -177,11 +177,15 @@ def _get_expected_efs_config_tls( expected_efs_config["connect"] = ( expected_efs_config["connect"] % fallback_ip_address ) + expected_efs_config["verify"] = str(verify) if check_cert_hostname or efs_proxy_enabled: expected_efs_config["checkHost"] = dns_name[dns_name.index(FS_ID) :] + if not efs_proxy_enabled and mount_efs.is_ipv6_address(fallback_ip_address): + expected_efs_config["sni"] = dns_name[dns_name.index(FS_ID) :] + if check_cert_validity and ocsp_override and (not efs_proxy_enabled): expected_efs_config["OCSPaia"] = "yes" @@ -812,3 +816,37 @@ def test_non_tls_mount_with_proxy(mocker, tmpdir): expected_global_config, _get_expected_efs_config_non_tls(), ) + + +def test_write_stunnel_config_with_ipv6_and_legacy_stunnel(mocker, tmpdir): + ca_mocker = mocker.patch("mount_efs.add_tunnel_ca_options") + state_file_dir = str(tmpdir) + + test_ipv6_address = "2001:db8:3333:4444:5555:6666:7777:8888" + + config_file = mount_efs.write_stunnel_config_file( + _get_config(mocker), + state_file_dir, + FS_ID, + MOUNT_POINT, + PORT, + DNS_NAME, + VERIFY_LEVEL, + OCSP_ENABLED, + _get_mount_options_tls(), + DEFAULT_REGION, + fallback_ip_address=test_ipv6_address, + efs_proxy_enabled=False, + ) + + utils.assert_called_once(ca_mocker) + + _validate_config( + config_file, + _get_expected_global_config(FS_ID, MOUNT_POINT, PORT, state_file_dir), + _get_expected_efs_config_tls( + dns_name=DNS_NAME, + fallback_ip_address=test_ipv6_address, + efs_proxy_enabled=False, + ), + ) diff --git a/test/watchdog_test/test_get_aws_security_credentials.py b/test/watchdog_test/test_get_aws_security_credentials.py index b0b9a6b2..ad353e01 100644 --- a/test/watchdog_test/test_get_aws_security_credentials.py +++ b/test/watchdog_test/test_get_aws_security_credentials.py @@ -459,3 +459,33 @@ def test_get_aws_security_credentials_webidentity(mocker): assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL assert credentials["SecretAccessKey"] == SECRET_ACCESS_KEY_VAL assert credentials["Token"] == SESSION_TOKEN_VAL + + +def test_get_aws_security_credentials_pod_identity(mocker): + config = get_fake_config() + token_content = "fake-token" + creds_uri = "http://169.254.170.23/v1/credentials" + token_file = ( + "/var/run/secrets/pods.eks.amazonaws.com/serviceaccount/eks-pod-identity-token" + ) + credentials_source = f"podidentity:{creds_uri},{token_file}" + + response = json.dumps( + { + "AccessKeyId": ACCESS_KEY_ID_VAL, + "SecretAccessKey": SECRET_ACCESS_KEY_VAL, + "Token": SESSION_TOKEN_VAL, + } + ) + + mock_open = mocker.patch("builtins.open", mocker.mock_open(read_data=token_content)) + + mocker.patch("watchdog.url_request_helper", return_value=json.loads(response)) + + credentials = watchdog.get_aws_security_credentials( + config, credentials_source, "us-east-1" + ) + + assert credentials["AccessKeyId"] == ACCESS_KEY_ID_VAL + assert credentials["SecretAccessKey"] == SECRET_ACCESS_KEY_VAL + assert credentials["Token"] == SESSION_TOKEN_VAL From 6dd515cceea3a8e45e9f1939dba6c7864b1696b8 Mon Sep 17 00:00:00 2001 From: Mihir Thakur Date: Thu, 8 May 2025 20:04:54 +0000 Subject: [PATCH 33/51] Fix backtrace version to resolve ubuntu and rhel build issues --- src/proxy/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 3836ca94..1e0e588f 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -30,6 +30,7 @@ tokio = { version = "1.29.0, <1.39", features = ["full"] } tokio-util = "0.7.8" uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics"]} xdr-codec = "0.4.4" +backtrace = "=0.3.74" [dev-dependencies] test-case = "*" From 34e6a520f0127ba0e9d769c7525bec6642d74c7c Mon Sep 17 00:00:00 2001 From: Mihir Thakur Date: Tue, 13 May 2025 18:36:01 +0000 Subject: [PATCH 34/51] Pin Cargo.lock to avoid unexpected error across images --- src/proxy/Cargo.lock | 1798 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1798 insertions(+) create mode 100644 src/proxy/Cargo.lock diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock new file mode 100644 index 00000000..3ee1b189 --- /dev/null +++ b/src/proxy/Cargo.lock @@ -0,0 +1,1798 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "aho-corasick" +version = "0.6.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" +dependencies = [ + "memchr 2.7.4", +] + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "ansi_term" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +dependencies = [ + "winapi", +] + +[[package]] +name = "anyhow" +version = "1.0.98" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "async-trait" +version = "0.1.88" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "atty" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" +dependencies = [ + "hermit-abi 0.1.19", + "libc", + "winapi", +] + +[[package]] +name = "autocfg" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" + +[[package]] +name = "backtrace" +version = "0.3.74" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "bitflags" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" + +[[package]] +name = "bumpalo" +version = "3.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "clap" +version = "2.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c" +dependencies = [ + "ansi_term", + "atty", + "bitflags 1.3.2", + "strsim 0.8.0", + "textwrap", + "unicode-width", + "vec_map", +] + +[[package]] +name = "clap" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "422592638015fe46332afb8fbf9361d9fa2d498d05c0c384e28710b4639e33a5" +dependencies = [ + "atty", + "bitflags 1.3.2", + "clap_derive", + "clap_lex", + "once_cell", + "strsim 0.10.0", + "termcolor", +] + +[[package]] +name = "clap_derive" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677ca5a153ca1804d4bf3e9d45f0f6b5ba4f950de155e373d457cd5f154cca9c" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "clap_lex" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "033f6b7a4acb1f358c742aaca805c939ee73b4c6209ae4318ec7aca81c42e646" +dependencies = [ + "os_str_bytes", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 1.0.109", +] + +[[package]] +name = "destructure_traitobject" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" + +[[package]] +name = "efs-proxy" +version = "2.3.0" +dependencies = [ + "anyhow", + "async-trait", + "backtrace", + "bytes", + "chrono", + "clap 4.0.0", + "fern", + "futures", + "log 0.4.27", + "log4rs", + "nix", + "onc-rpc", + "rand 0.8.5", + "s2n-tls", + "s2n-tls-sys", + "s2n-tls-tokio", + "serde", + "serde_ini", + "tempfile", + "test-case", + "thiserror", + "tokio", + "tokio-util", + "uuid", + "xdr-codec", + "xdrgen", +] + +[[package]] +name = "env_logger" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +dependencies = [ + "log 0.3.9", + "regex", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "error-chain" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9435d864e017c3c6afeac1654189b06cdb491cf2ff73dbf0d73b0f292f42ff8" +dependencies = [ + "backtrace", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fern" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" +dependencies = [ + "log 0.4.27", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr 2.7.4", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "hashbrown" +version = "0.15.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +dependencies = [ + "libc", +] + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "humantime" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log 0.4.27", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "indexmap" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +dependencies = [ + "equivalent", + "hashbrown", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.172" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "lock_api" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "log" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" +dependencies = [ + "log 0.4.27", +] + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +dependencies = [ + "serde", +] + +[[package]] +name = "log-mdc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" + +[[package]] +name = "log4rs" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" +dependencies = [ + "anyhow", + "arc-swap", + "chrono", + "derivative", + "fnv", + "humantime", + "libc", + "log 0.4.27", + "log-mdc", + "once_cell", + "parking_lot", + "rand 0.8.5", + "serde", + "serde-value", + "serde_json", + "serde_yaml", + "thiserror", + "thread-id", + "typemap-ors", + "winapi", +] + +[[package]] +name = "memchr" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "148fab2e51b4f1cfc66da2a7c32981d1d3c083a803978268bb11fe4b86925e7a" +dependencies = [ + "libc", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + +[[package]] +name = "miniz_oxide" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys 0.48.0", +] + +[[package]] +name = "nix" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" +dependencies = [ + "bitflags 1.3.2", + "cfg-if", + "libc", + "memoffset", + "pin-utils", +] + +[[package]] +name = "nom" +version = "3.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05aec50c70fd288702bcd93284a8444607f3292dbdf2a30de5ea5dcdbe72287b" +dependencies = [ + "memchr 1.0.2", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi 0.3.9", + "libc", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr 2.7.4", +] + +[[package]] +name = "onc-rpc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "869ad251376b969679e1cc00d989d59f577c05f33481a1b2e3273147c0f9d615" +dependencies = [ + "byteorder", + "bytes", + "smallvec", + "thiserror", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl-sys" +version = "0.9.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "os_str_bytes" +version = "6.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + +[[package]] +name = "parking_lot" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote 1.0.40", + "syn 1.0.109", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "version_check", +] + +[[package]] +name = "proc-macro2" +version = "1.0.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.3", +] + +[[package]] +name = "redox_syscall" +version = "0.5.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +dependencies = [ + "bitflags 2.9.0", +] + +[[package]] +name = "regex" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" +dependencies = [ + "aho-corasick", + "memchr 2.7.4", + "regex-syntax", + "thread_local", + "utf8-ranges", +] + +[[package]] +name = "regex-syntax" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" +dependencies = [ + "ucd-util", +] + +[[package]] +name = "result" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194d8e591e405d1eecf28819740abed6d719d1a2db87fc0bcdedee9a26d55560" + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustversion" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "s2n-tls" +version = "0.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5820b4f6c7d414c82da63fc13b1e91a9b59e91dc2cc15287c0e1882e61837a20" +dependencies = [ + "errno", + "hex", + "libc", + "pin-project-lite", + "s2n-tls-sys", +] + +[[package]] +name = "s2n-tls-sys" +version = "0.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f32844e751ea7a2469755ab0c6ad5a02156a6b8b4a0202b3355796c7b247258" +dependencies = [ + "cc", + "libc", + "openssl-sys", +] + +[[package]] +name = "s2n-tls-tokio" +version = "0.0.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995bc71beaf1eea056de345d2f45ff45877d26f612b4a7dde343024e41e3025b" +dependencies = [ + "errno", + "libc", + "pin-project-lite", + "s2n-tls", + "tokio", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde-value" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" +dependencies = [ + "ordered-float", + "serde", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "serde_ini" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb236687e2bb073a7521c021949be944641e671b8505a94069ca37b656c81139" +dependencies = [ + "result", + "serde", + "void", +] + +[[package]] +name = "serde_json" +version = "1.0.140" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +dependencies = [ + "itoa", + "memchr 2.7.4", + "ryu", + "serde", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" + +[[package]] +name = "socket2" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "strsim" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "test-case" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb2550dd13afcd286853192af8601920d959b14c401fcece38071d53bf0768a8" +dependencies = [ + "test-case-macros", +] + +[[package]] +name = "test-case-core" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" +dependencies = [ + "cfg-if", + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "test-case-macros" +version = "3.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", + "test-case-core", +] + +[[package]] +name = "textwrap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060" +dependencies = [ + "unicode-width", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "thread-id" +version = "4.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "thread_local" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" +dependencies = [ + "lazy_static 1.5.0", +] + +[[package]] +name = "tokio" +version = "1.38.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68722da18b0fc4a05fdc1120b302b82051265792a1e1b399086e9b204b10ad3d" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-macros" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "tokio-util" +version = "0.7.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "typemap-ors" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a68c24b707f02dd18f1e4ccceb9d49f2058c2fb86384ef9972592904d7a28867" +dependencies = [ + "unsafe-any-ors", +] + +[[package]] +name = "ucd-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abd2fc5d32b590614af8b0a20d837f32eca055edd0bbead59a9cfe80858be003" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + +[[package]] +name = "unsafe-any-ors" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a303d30665362d9680d7d91d78b23f5f899504d4f08b3c4cf08d055d87c0ad" +dependencies = [ + "destructure_traitobject", +] + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "utf8-ranges" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fcfc827f90e53a02eaef5e535ee14266c1d569214c6aa70133a624d8a3164ba" + +[[package]] +name = "uuid" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +dependencies = [ + "getrandom 0.3.3", + "rand 0.9.1", + "uuid-macro-internal", +] + +[[package]] +name = "uuid-macro-internal" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72dcd78c4f979627a754f5522cea6e6a25e55139056535fe6e69c506cd64a862" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "vec_map" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "wasi" +version = "0.14.2+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +dependencies = [ + "wit-bindgen-rt", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log 0.4.27", + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote 1.0.40", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] + +[[package]] +name = "windows-link" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "wit-bindgen-rt" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" +dependencies = [ + "bitflags 2.9.0", +] + +[[package]] +name = "xdr-codec" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48056532577dba856078eaf25193366b884401cdf55da433fe741617d3158fa6" +dependencies = [ + "byteorder", + "error-chain", +] + +[[package]] +name = "xdrgen" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef5855c0e686b4ef5fd0b96920d8e1f6317215798bcbde1ffec477a90d891ed2" +dependencies = [ + "bitflags 0.9.1", + "clap 2.34.0", + "env_logger", + "lazy_static 0.2.11", + "log 0.3.9", + "nom", + "quote 0.3.15", + "xdr-codec", +] + +[[package]] +name = "zerocopy" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +dependencies = [ + "proc-macro2", + "quote 1.0.40", + "syn 2.0.101", +] From 5949545d29ec76fa1825d67111ed2f24363c002c Mon Sep 17 00:00:00 2001 From: anthotse <70171740+anthotse@users.noreply.github.com> Date: Thu, 3 Jul 2025 12:19:39 -0400 Subject: [PATCH 35/51] Update version in amazon-efs-utils.spec to 2.3.1 --- amazon-efs-utils.spec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index a0983b79..f81355d9 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.3.0 +Version : 2.3.1 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems From e539990bc85a926fc8ef27ea16ab90ca224dd862 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Fri, 18 Jul 2025 15:31:00 -0400 Subject: [PATCH 36/51] Fix incorrect package version The previous version, 2.3.1, incorrectly created artifacts named 2.3.0. --- amazon-efs-utils.spec | 5 ++++- build-deb.sh | 2 +- config.ini | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.lock | 4 ++-- src/proxy/Cargo.toml | 2 +- src/watchdog/__init__.py | 2 +- 7 files changed, 11 insertions(+), 8 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index f81355d9..b513f565 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.3.1 +Version : 2.3.2 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -192,6 +192,9 @@ fi %clean %changelog +* Fri Jul 18 2025 Anthony Tse - 2.3.2 +- Fix package version numbering + * Thu Apr 17 2025 Anthony Tse - 2.3.0 - Add support for pod-identity credentials in the credentials chain - Enable mounting with IPv6 when using with the 'stunnel' mount option diff --git a/build-deb.sh b/build-deb.sh index 1d523c1e..7178d643 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.3.0 +VERSION=2.3.2 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 6dde4b66..f9fb0176 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.3.0 +version=2.3.2 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 43d9b662..f3140643 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -86,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.3.0" +VERSION = "2.3.2" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index 3ee1b189..c5ab9793 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -249,7 +249,7 @@ checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" [[package]] name = "efs-proxy" -version = "2.3.0" +version = "2.3.2" dependencies = [ "anyhow", "async-trait", diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 1e0e588f..55a2bd11 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.3.0" +version = "2.3.2" publish = false license = "MIT" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 3db23757..3daee3ca 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.3.0" +VERSION = "2.3.2" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" From e56ea4c10ddf2decb1ed663a32cbfebe14133df2 Mon Sep 17 00:00:00 2001 From: Michael <100072485+oyiz-michael@users.noreply.github.com> Date: Thu, 24 Jul 2025 22:54:14 +0100 Subject: [PATCH 37/51] Add environment variable support for AWS profiles and regions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implements support for standard AWS environment variables: - AWS_PROFILE for specifying AWS profile - AWS_REGION and AWS_DEFAULT_REGION for specifying region This addresses GitHub issues: - #278: AWS Profiles – support for standard environment variables - #166: Support setting region as env var and/or mount option Changes: - Modified get_aws_profile() to check AWS_PROFILE environment variable - Modified get_target_region() to check AWS_REGION and AWS_DEFAULT_REGION - Added comprehensive test coverage for environment variable functionality - Updated README with documentation and examples - Maintains backward compatibility with existing behavior The implementation follows standard AWS CLI precedence: 1. Mount options take highest precedence 2. Environment variables are checked next 3. Config files and instance metadata are fallbacks This improves usability in containerized environments, CI/CD pipelines, and other automated deployment scenarios. --- README.md | 74 +++++++++ src/mount_efs/__init__.py | 12 ++ .../test_environment_variables.py | 151 ++++++++++++++++++ 3 files changed, 237 insertions(+) create mode 100644 test/mount_efs_test/test_environment_variables.py diff --git a/README.md b/README.md index 38964e7e..8d9a5149 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,7 @@ The `efs-utils` package has been verified against the following MacOS distributi - [Step 2. Allow DescribeMountTargets and DescribeAvailabilityZones action in the IAM policy](#step-2-allow-describemounttargets-and-describeavailabilityzones-action-in-the-iam-policy) - [The way to access instance metadata](#the-way-to-access-instance-metadata) - [Use the assumed profile credentials for IAM](#use-the-assumed-profile-credentials-for-iam) + - [Environment Variable Support](#environment-variable-support) - [Enabling FIPS Mode](#enabling-fips-mode) - [License Summary](#license-summary) @@ -645,6 +646,79 @@ sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ sudo mount -t efs -o tls,iam,rolearn="ROLE_ARN",jwtpath="PATH/JWT_TOKEN_FILE" file-system-id efs-mount-point/ ``` +## Environment Variable Support + +Efs-utils supports standard AWS environment variables for configuring credentials and region settings, providing flexibility for different deployment scenarios. + +### AWS Profile Environment Variable + +You can set the AWS profile using the `AWS_PROFILE` environment variable instead of specifying it in the mount command: + +```bash +export AWS_PROFILE=my-profile +sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ +``` + +The precedence order for AWS profile selection is: +1. Mount option: `-o awsprofile=profile-name` +2. Environment variable: `AWS_PROFILE` +3. Default profile from AWS credentials/config files + +### AWS Region Environment Variables + +You can set the AWS region using standard AWS environment variables: + +```bash +# Using AWS_REGION (recommended) +export AWS_REGION=us-west-2 +sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ + +# Using AWS_DEFAULT_REGION (fallback) +export AWS_DEFAULT_REGION=eu-central-1 +sudo mount -t efs -o tls,iam file-system-id efs-mount-point/ +``` + +The precedence order for region selection is: +1. Mount option: `-o region=region-name` +2. Environment variable: `AWS_REGION` +3. Environment variable: `AWS_DEFAULT_REGION` +4. Configuration file setting +5. Instance metadata service +6. Legacy DNS format parsing + +### Examples + +**Using environment variables for cross-region mounting:** + +```bash +export AWS_REGION=us-east-1 +export AWS_PROFILE=cross-region-profile +sudo mount -t efs -o tls,iam fs-1234567890abcdef0:/ /mnt/efs-east +``` + +**Using environment variables in containers or CI/CD:** + +```yaml +apiVersion: v1 +kind: Pod +spec: + containers: + - name: efs-client + env: + - name: AWS_REGION + value: "us-west-2" + - name: AWS_PROFILE + value: "eks-pod-profile" + command: + - mount + - -t + - efs + - -o + - tls,iam + - fs-1234567890abcdef0:/ + - /mnt/efs +``` + ## Enabling FIPS Mode Efs-Utils is able to enter FIPS mode when mounting your file system. To enable FIPS you need to modify the EFS-Utils config file: ```bash diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index f3140643..00270494 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -393,9 +393,15 @@ def _fatal_error(message): message, ) + # Check mount option first if "region" in options: return options.get("region") + # Check environment variable + env_region = os.environ.get("AWS_REGION") or os.environ.get("AWS_DEFAULT_REGION") + if env_region: + return env_region + try: return config.get(CONFIG_SECTION, "region") except NoOptionError: @@ -1073,7 +1079,13 @@ def botocore_credentials_helper(awsprofile): def get_aws_profile(options, use_iam): + # Check mount option first awsprofile = options.get("awsprofile") + + # If not provided, check environment variable + if not awsprofile: + awsprofile = os.environ.get("AWS_PROFILE") + if not awsprofile and use_iam: for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]: aws_credentials_configs = read_config(file_path) diff --git a/test/mount_efs_test/test_environment_variables.py b/test/mount_efs_test/test_environment_variables.py new file mode 100644 index 00000000..3815e92a --- /dev/null +++ b/test/mount_efs_test/test_environment_variables.py @@ -0,0 +1,151 @@ +# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. +# +# Licensed under the MIT License. See the LICENSE accompanying this file +# for the specific language governing permissions and limitations under +# the License. + +import os +import pytest + +import mount_efs + +from .. import utils + +try: + import ConfigParser +except ImportError: + from configparser import ConfigParser + + +def test_get_aws_profile_with_env_variable(mocker): + """Test that AWS_PROFILE environment variable is used when no mount option is provided""" + options = {} + use_iam = True + + # Mock environment variable + mocker.patch.dict(os.environ, {"AWS_PROFILE": "test-profile"}) + + # Mock file reading to return empty configs + mocker.patch("mount_efs.read_config", return_value=ConfigParser()) + + result = mount_efs.get_aws_profile(options, use_iam) + assert result == "test-profile" + + +def test_get_aws_profile_mount_option_takes_precedence(mocker): + """Test that mount option takes precedence over environment variable""" + options = {"awsprofile": "mount-profile"} + use_iam = True + + # Mock environment variable + mocker.patch.dict(os.environ, {"AWS_PROFILE": "env-profile"}) + + result = mount_efs.get_aws_profile(options, use_iam) + assert result == "mount-profile" + + +def test_get_aws_profile_no_env_variable(mocker): + """Test fallback behavior when no environment variable is set""" + options = {} + use_iam = True + + # Ensure AWS_PROFILE is not set + env_vars = {k: v for k, v in os.environ.items() if k != "AWS_PROFILE"} + mocker.patch.dict(os.environ, env_vars, clear=True) + + # Mock config file to have default profile + mock_config = mocker.MagicMock() + mock_config.get.return_value = "fake_access_key" + mocker.patch("mount_efs.read_config", return_value=mock_config) + + result = mount_efs.get_aws_profile(options, use_iam) + assert result == "default" + + +def test_get_target_region_with_aws_region_env(mocker): + """Test that AWS_REGION environment variable is used""" + config = mocker.MagicMock() + options = {} + + # Mock environment variable + mocker.patch.dict(os.environ, {"AWS_REGION": "us-west-2"}) + + result = mount_efs.get_target_region(config, options) + assert result == "us-west-2" + + +def test_get_target_region_with_aws_default_region_env(mocker): + """Test that AWS_DEFAULT_REGION environment variable is used""" + config = mocker.MagicMock() + options = {} + + # Mock environment variables (AWS_REGION not set, AWS_DEFAULT_REGION set) + env_vars = {k: v for k, v in os.environ.items() if k != "AWS_REGION"} + env_vars["AWS_DEFAULT_REGION"] = "eu-central-1" + mocker.patch.dict(os.environ, env_vars, clear=True) + + result = mount_efs.get_target_region(config, options) + assert result == "eu-central-1" + + +def test_get_target_region_mount_option_takes_precedence(mocker): + """Test that region mount option takes precedence over environment variables""" + config = mocker.MagicMock() + options = {"region": "ap-southeast-1"} + + # Mock environment variables + mocker.patch.dict(os.environ, { + "AWS_REGION": "us-west-2", + "AWS_DEFAULT_REGION": "eu-central-1" + }) + + result = mount_efs.get_target_region(config, options) + assert result == "ap-southeast-1" + + +def test_get_target_region_aws_region_precedence_over_default(mocker): + """Test that AWS_REGION takes precedence over AWS_DEFAULT_REGION""" + config = mocker.MagicMock() + options = {} + + # Mock both environment variables + mocker.patch.dict(os.environ, { + "AWS_REGION": "us-west-2", + "AWS_DEFAULT_REGION": "eu-central-1" + }) + + result = mount_efs.get_target_region(config, options) + assert result == "us-west-2" + + +def test_get_target_region_fallback_to_config_file(mocker): + """Test fallback to config file when no environment variables are set""" + config = mocker.MagicMock() + config.get.return_value = "us-east-1" + options = {} + + # Ensure environment variables are not set + env_vars = {k: v for k, v in os.environ.items() + if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"]} + mocker.patch.dict(os.environ, env_vars, clear=True) + + result = mount_efs.get_target_region(config, options) + assert result == "us-east-1" + + +def test_get_target_region_fallback_to_metadata_service(mocker): + """Test fallback to instance metadata when config file fails""" + config = mocker.MagicMock() + config.get.side_effect = mount_efs.NoOptionError("region") + options = {} + + # Ensure environment variables are not set + env_vars = {k: v for k, v in os.environ.items() + if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"]} + mocker.patch.dict(os.environ, env_vars, clear=True) + + # Mock metadata service + mocker.patch("mount_efs.get_region_from_instance_metadata", return_value="us-west-1") + + result = mount_efs.get_target_region(config, options) + assert result == "us-west-1" From ad31b55b3ff12ffb24ebb158c8642b30a175bf93 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 28 Jul 2025 21:07:59 +0000 Subject: [PATCH 38/51] Regenerate Cargo.lock with rust 1.70.0 In the previous commit, Cargo.lock was updated with a more recent rust toolchain, which generated a cargo lock with `version = 4`. This led to build errors with customers using older rust versions. In this CR, we Cargo.lock is regenerated using rust 1.70.0 so that a `version = 3` Cargo.lock is generated. --- src/proxy/Cargo.lock | 289 +++++++++++++++++++++++++++---------------- 1 file changed, 181 insertions(+), 108 deletions(-) diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index c5ab9793..64624272 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "addr2line" @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" @@ -23,7 +23,7 @@ version = "0.6.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" dependencies = [ - "memchr 2.7.4", + "memchr 2.7.5", ] [[package]] @@ -70,7 +70,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -86,9 +86,9 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "backtrace" @@ -119,15 +119,15 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byteorder" @@ -143,9 +143,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.22" +version = "1.2.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" +checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" dependencies = [ "jobserver", "libc", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "chrono" @@ -297,12 +297,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -391,7 +391,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -418,7 +418,7 @@ dependencies = [ "futures-macro", "futures-sink", "futures-task", - "memchr 2.7.4", + "memchr 2.7.5", "pin-project-lite", "pin-utils", "slab", @@ -432,7 +432,7 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", ] [[package]] @@ -455,9 +455,9 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" [[package]] name = "heck" @@ -476,9 +476,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -518,9 +518,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" dependencies = [ "equivalent", "hashbrown", @@ -566,9 +566,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "linux-raw-sys" @@ -578,9 +578,9 @@ checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "memoffset" @@ -664,9 +664,9 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] @@ -678,7 +678,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "windows-sys 0.48.0", ] @@ -715,11 +715,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.3.9", + "hermit-abi 0.5.2", "libc", ] @@ -729,7 +729,7 @@ version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ - "memchr 2.7.4", + "memchr 2.7.5", ] [[package]] @@ -752,9 +752,9 @@ checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-sys" -version = "0.9.108" +version = "0.9.109" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e145e1651e858e820e4860f7b9c5e169bc1d8ce1c86043be79fa7b7634821847" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", @@ -779,9 +779,9 @@ checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -789,9 +789,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -877,9 +877,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" @@ -894,9 +894,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -942,11 +942,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -956,7 +956,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" dependencies = [ "aho-corasick", - "memchr 2.7.4", + "memchr 2.7.5", "regex-syntax", "thread_local", "utf8-ranges", @@ -979,28 +979,28 @@ checksum = "194d8e591e405d1eecf28819740abed6d719d1a2db87fc0bcdedee9a26d55560" [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustix" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "ryu" @@ -1078,7 +1078,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1094,12 +1094,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.141" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" dependencies = [ "itoa", - "memchr 2.7.4", + "memchr 2.7.5", "ryu", "serde", ] @@ -1134,24 +1134,21 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -1182,9 +1179,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote 1.0.40", @@ -1231,7 +1228,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1242,7 +1239,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", "test-case-core", ] @@ -1272,7 +1269,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1321,7 +1318,7 @@ checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1387,24 +1384,26 @@ checksum = "7fcfc827f90e53a02eaef5e535ee14266c1d569214c6aa70133a624d8a3164ba" [[package]] name = "uuid" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "getrandom 0.3.3", - "rand 0.9.1", + "js-sys", + "rand 0.9.2", "uuid-macro-internal", + "wasm-bindgen", ] [[package]] name = "uuid-macro-internal" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dcd78c4f979627a754f5522cea6e6a25e55139056535fe6e69c506cd64a862" +checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1433,9 +1432,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -1468,7 +1467,7 @@ dependencies = [ "log 0.4.27", "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-shared", ] @@ -1490,7 +1489,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -1537,9 +1536,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", @@ -1556,7 +1555,7 @@ checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] @@ -1567,29 +1566,29 @@ checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" [[package]] name = "windows-result" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" dependencies = [ "windows-link", ] @@ -1621,6 +1620,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -1645,13 +1653,30 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -1664,6 +1689,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -1676,6 +1707,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -1688,12 +1725,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -1706,6 +1755,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -1718,6 +1773,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -1730,6 +1791,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -1742,13 +1809,19 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "wit-bindgen-rt" version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -1779,20 +1852,20 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote 1.0.40", - "syn 2.0.101", + "syn 2.0.104", ] From d9efba8148684421d322971c35caf03f6645ee28 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Wed, 30 Jul 2025 14:49:45 +0000 Subject: [PATCH 39/51] Update circle-ci config --- .circleci/config.yml | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 13a80447..05f60c3b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -250,9 +250,6 @@ workflows: - build-deb-package: name: ubuntu-latest image: ubuntu:latest - - build-deb-package: - name: ubuntu16 - image: ubuntu:16.04 - build-deb-package: name: ubuntu18 image: ubuntu:18.04 @@ -265,19 +262,13 @@ workflows: - build-deb-package: name: debian11 image: debian:bullseye - - build-centos-rpm-package: - name: centos-latest - image: centos:latest - - build-centos-rpm-package: - name: centos8 - image: centos:centos8 - build-rpm-package: name: rocky8 image: rockylinux/rockylinux:8 - - build-rpm-package-rustup: + - build-rpm-package: name: amazon-linux-latest image: amazonlinux:latest - - build-rpm-package-rustup: + - build-rpm-package: name: amazon-linux-2 image: amazonlinux:2 - build-rpm-package-rustup: From 5001f270655428d1a4561adb404a64f555b78260 Mon Sep 17 00:00:00 2001 From: samuhale Date: Wed, 30 Jul 2025 14:12:35 +0000 Subject: [PATCH 40/51] Fix AWS Env Variable Test and Code Style Issue --- src/mount_efs/__init__.py | 4 +- .../test_environment_variables.py | 77 ++++++++++--------- 2 files changed, 44 insertions(+), 37 deletions(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 00270494..c694d7b0 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -1081,11 +1081,11 @@ def botocore_credentials_helper(awsprofile): def get_aws_profile(options, use_iam): # Check mount option first awsprofile = options.get("awsprofile") - + # If not provided, check environment variable if not awsprofile: awsprofile = os.environ.get("AWS_PROFILE") - + if not awsprofile and use_iam: for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]: aws_credentials_configs = read_config(file_path) diff --git a/test/mount_efs_test/test_environment_variables.py b/test/mount_efs_test/test_environment_variables.py index 3815e92a..32476f1c 100644 --- a/test/mount_efs_test/test_environment_variables.py +++ b/test/mount_efs_test/test_environment_variables.py @@ -5,6 +5,7 @@ # the License. import os + import pytest import mount_efs @@ -21,13 +22,13 @@ def test_get_aws_profile_with_env_variable(mocker): """Test that AWS_PROFILE environment variable is used when no mount option is provided""" options = {} use_iam = True - + # Mock environment variable mocker.patch.dict(os.environ, {"AWS_PROFILE": "test-profile"}) - + # Mock file reading to return empty configs mocker.patch("mount_efs.read_config", return_value=ConfigParser()) - + result = mount_efs.get_aws_profile(options, use_iam) assert result == "test-profile" @@ -36,10 +37,10 @@ def test_get_aws_profile_mount_option_takes_precedence(mocker): """Test that mount option takes precedence over environment variable""" options = {"awsprofile": "mount-profile"} use_iam = True - + # Mock environment variable mocker.patch.dict(os.environ, {"AWS_PROFILE": "env-profile"}) - + result = mount_efs.get_aws_profile(options, use_iam) assert result == "mount-profile" @@ -48,16 +49,16 @@ def test_get_aws_profile_no_env_variable(mocker): """Test fallback behavior when no environment variable is set""" options = {} use_iam = True - + # Ensure AWS_PROFILE is not set env_vars = {k: v for k, v in os.environ.items() if k != "AWS_PROFILE"} mocker.patch.dict(os.environ, env_vars, clear=True) - + # Mock config file to have default profile mock_config = mocker.MagicMock() mock_config.get.return_value = "fake_access_key" mocker.patch("mount_efs.read_config", return_value=mock_config) - + result = mount_efs.get_aws_profile(options, use_iam) assert result == "default" @@ -66,10 +67,10 @@ def test_get_target_region_with_aws_region_env(mocker): """Test that AWS_REGION environment variable is used""" config = mocker.MagicMock() options = {} - + # Mock environment variable mocker.patch.dict(os.environ, {"AWS_REGION": "us-west-2"}) - + result = mount_efs.get_target_region(config, options) assert result == "us-west-2" @@ -78,12 +79,12 @@ def test_get_target_region_with_aws_default_region_env(mocker): """Test that AWS_DEFAULT_REGION environment variable is used""" config = mocker.MagicMock() options = {} - + # Mock environment variables (AWS_REGION not set, AWS_DEFAULT_REGION set) env_vars = {k: v for k, v in os.environ.items() if k != "AWS_REGION"} env_vars["AWS_DEFAULT_REGION"] = "eu-central-1" mocker.patch.dict(os.environ, env_vars, clear=True) - + result = mount_efs.get_target_region(config, options) assert result == "eu-central-1" @@ -92,13 +93,12 @@ def test_get_target_region_mount_option_takes_precedence(mocker): """Test that region mount option takes precedence over environment variables""" config = mocker.MagicMock() options = {"region": "ap-southeast-1"} - + # Mock environment variables - mocker.patch.dict(os.environ, { - "AWS_REGION": "us-west-2", - "AWS_DEFAULT_REGION": "eu-central-1" - }) - + mocker.patch.dict( + os.environ, {"AWS_REGION": "us-west-2", "AWS_DEFAULT_REGION": "eu-central-1"} + ) + result = mount_efs.get_target_region(config, options) assert result == "ap-southeast-1" @@ -107,13 +107,12 @@ def test_get_target_region_aws_region_precedence_over_default(mocker): """Test that AWS_REGION takes precedence over AWS_DEFAULT_REGION""" config = mocker.MagicMock() options = {} - + # Mock both environment variables - mocker.patch.dict(os.environ, { - "AWS_REGION": "us-west-2", - "AWS_DEFAULT_REGION": "eu-central-1" - }) - + mocker.patch.dict( + os.environ, {"AWS_REGION": "us-west-2", "AWS_DEFAULT_REGION": "eu-central-1"} + ) + result = mount_efs.get_target_region(config, options) assert result == "us-west-2" @@ -123,12 +122,15 @@ def test_get_target_region_fallback_to_config_file(mocker): config = mocker.MagicMock() config.get.return_value = "us-east-1" options = {} - + # Ensure environment variables are not set - env_vars = {k: v for k, v in os.environ.items() - if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"]} + env_vars = { + k: v + for k, v in os.environ.items() + if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"] + } mocker.patch.dict(os.environ, env_vars, clear=True) - + result = mount_efs.get_target_region(config, options) assert result == "us-east-1" @@ -136,16 +138,21 @@ def test_get_target_region_fallback_to_config_file(mocker): def test_get_target_region_fallback_to_metadata_service(mocker): """Test fallback to instance metadata when config file fails""" config = mocker.MagicMock() - config.get.side_effect = mount_efs.NoOptionError("region") + config.get.side_effect = mount_efs.NoOptionError("region", "section") options = {} - + # Ensure environment variables are not set - env_vars = {k: v for k, v in os.environ.items() - if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"]} + env_vars = { + k: v + for k, v in os.environ.items() + if k not in ["AWS_REGION", "AWS_DEFAULT_REGION"] + } mocker.patch.dict(os.environ, env_vars, clear=True) - + # Mock metadata service - mocker.patch("mount_efs.get_region_from_instance_metadata", return_value="us-west-1") - + mocker.patch( + "mount_efs.get_region_from_instance_metadata", return_value="us-west-1" + ) + result = mount_efs.get_target_region(config, options) assert result == "us-west-1" From 1c56f20243cf0e7452e91c9086f24da54fcb411a Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 11 Aug 2025 16:40:50 +0000 Subject: [PATCH 41/51] Remove CentOS 8 and Ubuntu 16.04 from verified Linux distribution list CentOS 8 is dicontinued, and ubuntu 16 reached EOL on April 30, 2021 --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 8d9a5149..9e7e8606 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ The `efs-utils` package has been verified against the following Linux distributi |----------------------| ----- | --------- | | Amazon Linux 2 | `rpm` | `systemd` | | Amazon Linux 2023 | `rpm` | `systemd` | -| CentOS 8 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | | Fedora 29 | `rpm` | `systemd` | @@ -18,7 +17,6 @@ The `efs-utils` package has been verified against the following Linux distributi | Fedora 31 | `rpm` | `systemd` | | Fedora 32 | `rpm` | `systemd` | | Debian 11 | `deb` | `systemd` | -| Ubuntu 16.04 | `deb` | `systemd` | | Ubuntu 18.04 | `deb` | `systemd` | | Ubuntu 20.04 | `deb` | `systemd` | | Ubuntu 22.04 | `deb` | `systemd` | From 1fc4ab307ee54e40358dd3d5cfa73e4a3c36627b Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Mon, 11 Aug 2025 19:10:28 +0000 Subject: [PATCH 42/51] efs-utils v2.3.3-1 release --- amazon-efs-utils.spec | 6 +++++- build-deb.sh | 2 +- config.ini | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.lock | 2 +- src/proxy/Cargo.toml | 2 +- src/watchdog/__init__.py | 2 +- 7 files changed, 11 insertions(+), 7 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index b513f565..17c9efe2 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.3.2 +Version : 2.3.3 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -192,6 +192,10 @@ fi %clean %changelog +* Mon Aug 11 2025 Anthony Tse - 2.3.3 +- Reset Cargo.lock version number to 3. Using version 4 caused issues for customers using older rust versions. +- Add environment variable support for AWS profiles and regions + * Fri Jul 18 2025 Anthony Tse - 2.3.2 - Fix package version numbering diff --git a/build-deb.sh b/build-deb.sh index 7178d643..32b7c087 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.3.2 +VERSION=2.3.3 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index f9fb0176..18d266f5 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.3.2 +version=2.3.3 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index c694d7b0..dc51ff5e 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -86,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.3.2" +VERSION = "2.3.3" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index 64624272..09f3c342 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -249,7 +249,7 @@ checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" [[package]] name = "efs-proxy" -version = "2.3.2" +version = "2.3.3" dependencies = [ "anyhow", "async-trait", diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 55a2bd11..24ecbab7 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.3.2" +version = "2.3.3" publish = false license = "MIT" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 3daee3ca..71ef204e 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.3.2" +VERSION = "2.3.3" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" From 6f43ed157722ff5a65de0a944f15f79e92c24dd4 Mon Sep 17 00:00:00 2001 From: Yangjinan Hu Date: Tue, 28 Oct 2025 20:30:44 +0000 Subject: [PATCH 43/51] efs-utils v2.4.0-1 release --- .circleci/config.yml | 123 +- INSTALL.md | 231 ++++ Makefile | 2 +- README.md | 176 +-- amazon-efs-utils.spec | 12 +- build-deb.sh | 2 +- config.ini | 2 +- src/mount_efs/__init__.py | 56 +- src/proxy/Cargo.lock | 812 ++++++------ src/proxy/Cargo.toml | 18 +- src/proxy/build.rs | 2 +- src/proxy/rust-xdr/xdr-codec/Cargo.toml | 25 + src/proxy/rust-xdr/xdr-codec/README.md | 162 +++ src/proxy/rust-xdr/xdr-codec/src/error.rs | 39 + src/proxy/rust-xdr/xdr-codec/src/lib.rs | 751 ++++++++++++ src/proxy/rust-xdr/xdr-codec/src/record.rs | 263 ++++ src/proxy/rust-xdr/xdr-codec/src/test.rs | 739 +++++++++++ .../rust-xdr/xdr-codec/tests/qc-record.rs | 116 ++ .../rust-xdr/xdr-codec/tests/quickcheck.rs | 318 +++++ .../rust-xdr/xdr-codec/tests/test-record.rs | 174 +++ src/proxy/rust-xdr/xdrgen/Cargo.toml | 39 + src/proxy/rust-xdr/xdrgen/README.md | 97 ++ src/proxy/rust-xdr/xdrgen/src/lib.rs | 176 +++ src/proxy/rust-xdr/xdrgen/src/spec/mod.rs | 1090 +++++++++++++++++ src/proxy/rust-xdr/xdrgen/src/spec/test.rs | 252 ++++ src/proxy/rust-xdr/xdrgen/src/spec/xdr_nom.rs | 953 ++++++++++++++ src/proxy/rust-xdr/xdrgen/src/xdrgen.rs | 42 + src/proxy/rust-xdr/xdrgen/tests/lib.rs | 321 +++++ src/watchdog/__init__.py | 110 +- .../test_get_aws_security_credentials.py | 2 +- .../test_get_nfs_mount_options.py | 8 - .../test_optimize_readahead_window.py | 37 + .../test_verify_and_optimize_readahead.py | 432 +++++++ 33 files changed, 6968 insertions(+), 614 deletions(-) create mode 100644 INSTALL.md create mode 100644 src/proxy/rust-xdr/xdr-codec/Cargo.toml create mode 100644 src/proxy/rust-xdr/xdr-codec/README.md create mode 100644 src/proxy/rust-xdr/xdr-codec/src/error.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/src/lib.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/src/record.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/src/test.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/tests/qc-record.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/tests/quickcheck.rs create mode 100644 src/proxy/rust-xdr/xdr-codec/tests/test-record.rs create mode 100644 src/proxy/rust-xdr/xdrgen/Cargo.toml create mode 100644 src/proxy/rust-xdr/xdrgen/README.md create mode 100644 src/proxy/rust-xdr/xdrgen/src/lib.rs create mode 100644 src/proxy/rust-xdr/xdrgen/src/spec/mod.rs create mode 100644 src/proxy/rust-xdr/xdrgen/src/spec/test.rs create mode 100644 src/proxy/rust-xdr/xdrgen/src/spec/xdr_nom.rs create mode 100644 src/proxy/rust-xdr/xdrgen/src/xdrgen.rs create mode 100644 src/proxy/rust-xdr/xdrgen/tests/lib.rs create mode 100644 test/watchdog_test/test_verify_and_optimize_readahead.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 05f60c3b..b7920761 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -46,11 +46,27 @@ commands: command: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y . "$HOME/.cargo/env" + - run: + name: Install golang + command: | + apt-get -y install wget + ARCH=$(dpkg --print-architecture) + VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) + wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - + echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - run: name: Install dependencies command: | DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata - apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext + if grep -q 'Ubuntu 20.04' /etc/os-release 2>/dev/null; then + apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc-10 g++-10 + update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 + update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100 + elif grep -q 'trixie' /etc/os-release 2>/dev/null; then + apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc-13 g++-13 + else + apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc g++ + fi - run: name: Add local build repo as safe git directory command: | @@ -60,13 +76,21 @@ commands: name: Build DEB command: | . "$HOME/.cargo/env" + export PATH=$PATH:/usr/local/go/bin + if grep -q 'trixie' /etc/os-release 2>/dev/null; then + export CC=gcc-13 + export CXX=g++-13 + fi rustc --version cargo --version + go version + cmake --version ./build-deb.sh - run: name: Install package command: | - DEBIAN_FRONTEND=noninteractive apt-get -y install ./build/amazon-efs-utils*deb + apt-get update + DEBIAN_FRONTEND=noninteractive apt-get -y install --fix-missing ./build/amazon-efs-utils*deb - run: name: Check installed successfully command: | @@ -74,13 +98,31 @@ commands: build-rpm: steps: - checkout + - run: + name: Install golang + command: | + yum -y install wget tar gzip + ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') + VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) + wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - + echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - run: name: Install dependencies command: | - yum -y install rpm-build make systemd rust cargo openssl-devel + if grep -q '^ID="amzn"' /etc/os-release && grep -q '^VERSION_ID="2"' /etc/os-release 2>/dev/null; then + yum -y install rpm-build make systemd rust cargo openssl-devel cmake3 gcc gcc-c++ perl binutils + if [ ! -e /usr/bin/cmake ]; then + ln -sf /usr/bin/cmake3 /usr/bin/cmake + fi + else + yum -y install rpm-build make systemd rust cargo openssl-devel cmake gcc gcc-c++ perl binutils + fi - run: name: Build RPM command: | + export PATH=$PATH:/usr/local/go/bin + go version + cmake --version make rpm - run: name: Install package @@ -104,16 +146,29 @@ commands: name: Install latest Rust command: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + - run: + name: Install golang + command: | + yum -y install wget tar gzip + ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') + VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) + wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - + echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - checkout - run: name: Install dependencies command: | - yum -y install rpm-build make systemd rust cargo openssl-devel + yum -y install rpm-build make systemd rust cargo openssl-devel cmake gcc gcc-c++ gcc13 gcc13-c++ perl binutils - run: name: Build RPM command: | . "$HOME/.cargo/env" + export PATH=$PATH:/usr/local/go/bin + export CC=gcc-13 + export CXX=g++-13 rustc --version + go version + cmake --version make rpm - run: name: Install package @@ -143,16 +198,35 @@ commands: name: Install latest Rust command: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y + - run: + name: Install golang + command: | + zypper install -y wget tar gzip + ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') + VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) + wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - + echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - run: name: Install dependencies command: | zypper install -y --force-resolution rpm-build - zypper install -y make systemd rust cargo openssl-devel + if grep -q 'Tumbleweed' /etc/os-release 2>/dev/null; then + zypper install -y make systemd rust cargo openssl-devel cmake gcc13 gcc13-c++ perl binutils + else + zypper install -y make systemd rust cargo openssl-devel cmake gcc gcc-c++ perl binutils + fi - run: name: Build RPM command: | . "$HOME/.cargo/env" + export PATH=$PATH:/usr/local/go/bin + if grep -q 'Tumbleweed' /etc/os-release 2>/dev/null; then + export CC=gcc-13 + export CXX=g++-13 + fi rustc --version + go version + cmake --version make rpm - run: name: Install package @@ -259,9 +333,18 @@ workflows: - build-deb-package: name: ubuntu22 image: ubuntu:22.04 + - build-deb-package: + name: ubuntu24 + image: ubuntu:24.04 - build-deb-package: name: debian11 image: debian:bullseye + - build-deb-package: + name: debian12 + image: debian:bookworm + - build-deb-package: + name: debian13 + image: debian:trixie - build-rpm-package: name: rocky8 image: rockylinux/rockylinux:8 @@ -272,29 +355,8 @@ workflows: name: amazon-linux-2 image: amazonlinux:2 - build-rpm-package-rustup: - name: fedora29 - image: fedora:29 - - build-rpm-package-rustup: - name: fedora30 - image: fedora:30 - - build-rpm-package-rustup: - name: fedora31 - image: fedora:31 - - build-rpm-package-rustup: - name: fedora32 - image: fedora:32 - - build-rpm-package-rustup: - name: fedora33 - image: fedora:33 - - build-rpm-package-rustup: - name: fedora34 - image: fedora:34 - - build-rpm-package-rustup: - name: fedora35 - image: fedora:35 - - build-rpm-package-rustup: - name: fedora36 - image: fedora:36 + name: fedora41 + image: fedora:41 - build-suse-rpm-package: name: opensuse-leap15.1 image: opensuse/leap:15.1 @@ -309,4 +371,7 @@ workflows: image: opensuse/leap:15.4 - build-suse-rpm-package: name: opensuse-leap-latest - image: opensuse/leap:latest \ No newline at end of file + image: opensuse/leap:latest + - build-suse-rpm-package: + name: opensuse-tumbleweed + image: opensuse/tumbleweed \ No newline at end of file diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 00000000..16f05743 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,231 @@ +# Building efs-utils from Source + +This guide provides detailed instructions for building `efs-utils` from source on various Linux distributions. + +## Build Prerequisites + +Building efs-utils v2.0+ requires the following dependencies: + +* `rust` 1.70+ +* `cargo` +* `go` 1.17.13+ +* `perl` +* `cmake` 3.0+ +* `gcc` and `g++` (or `gcc-c++`) +* `make` +* `git` + +**Recommended Resource Size:** minimum 2 vCPUs, 4GB RAM to ensure sufficient resources for compilation. In AWS EC2, use t3.medium or larger. + +## Installing Rust and Cargo + +If your distribution doesn't provide a rust or cargo package, or it provides versions +that are older than 1.70, then you can install rust and cargo through rustup: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +. "$HOME/.cargo/env" +``` + +## Installing Go + +Ensure you have Go 1.17.13 or later is installed and configured on your system. +Some distributions provide Go packages through package manager, but they may have outdated versions. +```bash +# Try installing from package manager (may not be available or have outdated go version) +# RPM-based +sudo yum update -y +sudo yum -y install golang + +# OpenSUSE/SLES +sudo zypper refresh +sudo zypper install -y go + +# DEB-based +sudo apt-get update +sudo apt-get -y install golang + +# Verify Go 1.17.13 or later is installed +go version +``` + +Refer to the official [Go documentation](https://go.dev/doc/install) for detailed installation instructions of latest Go version. + +## GCC Version Requirements + +**For distributions with GCC 14 or later (Debian 13, Fedora 41/42, RHEL 10, openSUSE Tumbleweed):** + +The AWS-LC FIPS module requires GCC version 13 or earlier. If your distribution uses GCC 14 or later by default, you'll need to use GCC 13 instead. If package manager does not provide GCC version 13 or earlier, follow [instruction](https://gcc.gnu.org/install/) to install desired version of GCC. + +```bash +# Install GCC 13 (if not already installed) +# For Debian 13 +sudo apt-get install -y gcc-13 g++-13 + +# For Fedora 41 +sudo yum -y install gcc13 gcc13-c++ + +# For openSUSE Tumbleweed +sudo zypper install -y gcc13 gcc13-c++ + +# For Fedora 42 and RHEL 10, package manager does not provide GCC 13 or earlier +# Follow offical GCC instruction to install desired version of GCC +# https://gcc.gnu.org/install/ + +# Set GCC 13 as the compiler for the build +export CC=gcc-13 +export CXX=g++-13 +# Then proceed with the normal build steps +``` + +**Ubuntu 20.04, upgrade to use gcc-10 and g++-10** + +```bash +# Install GCC 10 +sudo apt-get -y install gcc-10 g++-10 + +# Set GCC 10 as the compiler for the build +export CC=gcc-10 +export CXX=g++-10 +``` + +**Note:** Alternatively, you can set the system default compiler using `update-alternatives` (requires sudo and affects all applications) + +## CMake version requirement ## + +Building AWS-LC requires CMake 3.0 or later. CMake is typically available through the standard packager manager. + +**Amazon Linux 2 specific:** + +Install `cmake3` instead of `cmake`: + +```bash +sudo yum -y install cmake3 +``` + +## RPM-based Distributions + +### RHEL/CentOS/Amazon Linux/Fedora + +```bash +sudo yum -y install git rpm-build make rust cargo openssl-devel gcc gcc-c++ cmake wget perl # remove gcc gcc-c++ here if you already installed a compatible version following GCC Version Requirements instruction +git clone https://github.com/aws/efs-utils +cd efs-utils +make rpm +sudo yum -y install build/amazon-efs-utils*rpm +``` + +### OpenSUSE/SLES + +```bash +sudo zypper refresh +sudo zypper install -y git binutils rpm-build make rust cargo libopenssl-devel gcc gcc-c++ cmake wget perl # remove gcc gcc-c++ here if you already installed a compatible version following GCC Version Requirements instruction, if you encounter "Choose from above solutions.." in this step, remove -y flag and choose manually. +git clone https://github.com/aws/efs-utils +cd efs-utils +make rpm +sudo zypper --no-gpg-checks install -y build/amazon-efs-utils*rpm +``` + +## DEB-based Distributions + +### Debian/Ubuntu + +```bash +sudo apt-get update +sudo apt-get -y install git binutils rustc cargo libssl-dev pkg-config gettext make gcc g++ cmake wget perl # remove gcc g++ here if you already installed a compatible version following GCC Version Requirements instruction +git clone https://github.com/aws/efs-utils +cd efs-utils +./build-deb.sh +sudo apt-get -y install ./build/amazon-efs-utils*deb +``` + +## Common Build Issues + +### OpenSUSE repository errors ### + +If you encounter repository errors like `File './suse/noarch/bash-completion-2.11-2.1.noarch.rpm' not found on medium 'http://download.opensuse.org/tumbleweed/repo/oss/'` during installation of `git`, run the following commands to re-add repo OSS and NON-OSS, then run the install script above again. + +```bash +sudo zypper ar -f -n OSS http://download.opensuse.org/tumbleweed/repo/oss/ OSS +sudo zypper ar -f -n NON-OSS http://download.opensuse.org/tumbleweed/repo/non-oss/ NON-OSS +sudo zypper refresh +``` + +### `make rpm` fails due to "feature `edition2021` is required" ### + +Update to a version of rust and cargo +that is newer than 1.70. To install a new version of rust and cargo, run +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +. "$HOME/.cargo/env" +``` + +### You installed a new version of rust with the above command, but your system is still using the rust installed by the package manager ### + +When installing rust with the rustup script above, the script will fail if it detects a rust already exists on the system. +Un-install the package manager's rust, and re-install rust through rustup. Once done, you will need to install rust through the package manager again to satisfy +the RPM's dependencies. +```bash +yum remove cargo rust +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +yum install cargo rust +. "$HOME/.cargo/env" +``` + +### When you run `make rpm`, compilation of efs-proxy fails due to `error: linker cc not found` ### + +Make sure that you have a linker installed on your system. For example, on Amazon Linux or RHEL, install gcc with +```bash +yum install gcc +``` + +### Installation Issue - Failed Build Dependencies ### + +If rust dependencies was installed using rustup and the package manager does not have a rust and/or cargo package installed, you may see an error like this. + +``` +error: Failed build dependencies: + cargo is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 + rust is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 +``` + +In this case, the 'make rpm' command in the installation script above should be replaced by 'make rpm-without-system-rust' to remove the rpmbuild dependency check. + +### AWS-LC FIPS module build issue: WARNING: FIPS build is known to fail on GCC >= 14 ### + +For Debian 13, Fedora 41/42, RHEL 10 and openSUSE Tumbleweed, default GCC version is higher than 14, follow instructions in GCC Version Requirements to install a compatiable GCC version. + +### AWS-LC FIPS module build issue: Your compiler (cc) is not supported due to a memcmp related bug reported ### + +For Ubuntu 20.04, GCC installed from package manager on Ubuntu 20.04 show this error during build, follow instructions in GCC Version Requirements to install a compatiable GCC version. + + +## Running Tests + +After building from source, you can run the test suite: + +1. Set up a virtualenv: + +```bash +virtualenv ~/.envs/efs-utils +source ~/.envs/efs-utils/bin/activate +pip install -r requirements.txt +``` + +2. Run tests: + +```bash +make test +``` + +## Verifying Installation + +After installation, verify efs-utils is working: + +```bash +mount.efs --version +``` + +## Next Steps + +See the main [README](efs-utils.README.md) for usage instructions and configuration options. diff --git a/Makefile b/Makefile index bc39b058..6d332427 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ SOURCE_TARBALL = $(PACKAGE_NAME).tar.gz SPECFILE = $(PACKAGE_NAME).spec BUILD_DIR = build/rpmbuild PROXY_VERSION = 2.0.0 -RPM_BUILD_FLAGS ?= --with system_rust +RPM_BUILD_FLAGS ?= --with system_rust --noclean export PYTHONPATH := $(shell pwd)/src .PHONY: clean diff --git a/README.md b/README.md index 9e7e8606..b635bafa 100644 --- a/README.md +++ b/README.md @@ -12,39 +12,27 @@ The `efs-utils` package has been verified against the following Linux distributi | Amazon Linux 2023 | `rpm` | `systemd` | | RHEL 8 | `rpm` | `systemd` | | RHEL 9 | `rpm` | `systemd` | -| Fedora 29 | `rpm` | `systemd` | -| Fedora 30 | `rpm` | `systemd` | -| Fedora 31 | `rpm` | `systemd` | -| Fedora 32 | `rpm` | `systemd` | -| Debian 11 | `deb` | `systemd` | -| Ubuntu 18.04 | `deb` | `systemd` | | Ubuntu 20.04 | `deb` | `systemd` | | Ubuntu 22.04 | `deb` | `systemd` | +| Ubuntu 24.04 | `deb` | `systemd` | | OpenSUSE Leap | `rpm` | `systemd` | -| OpenSUSE Tumbleweed | `rpm` | `systemd` | -| Oracle8 | `rpm` | `systemd` | -| SLES 12 | `rpm` | `systemd` | | SLES 15 | `rpm` | `systemd` | The `efs-utils` package has been verified against the following MacOS distributions: | Distribution | `init` System | |----------------|---------------| -| MacOS Big Sur | `launchd` | -| MacOS Monterey | `launchd` | | MacOS Ventura | `launchd` | | MacOS Sonoma | `launchd` | | MacOS Sequoia | `launchd` | +| MacOS Tahoe | `launchd` | ## README contents - - [Prerequisites](#prerequisites) - - [Optional](#optional) - [Installation](#installation) - [On Amazon Linux distributions](#on-amazon-linux-distributions) - [Install via AWS Systems Manager Distributor](#install-via-aws-systems-manager-distributor) - [On other Linux distributions](#on-other-linux-distributions) - - [On MacOS Big Sur, macOS Monterey and macOS Ventura distribution](#on-macos-big-sur-macos-monterey-and-macos-ventura-distribution) - - [Run tests](#run-tests) + - [On macOS Tahoe, macOS Sequoia, macOS Sonoma and macOS Ventura distribution](#on-macos-tahoe-macos-sequoia-macos-sonoma-and-macos-ventura-distribution) - [Usage](#usage) - [mount.efs](#mountefs) - [MacOS](#macos) @@ -74,20 +62,6 @@ The `efs-utils` package has been verified against the following MacOS distributi - [Enabling FIPS Mode](#enabling-fips-mode) - [License Summary](#license-summary) - -## Prerequisites - -* `nfs-utils` (RHEL/CentOS/Amazon Linux/Fedora) or `nfs-common` (Debian/Ubuntu) -* OpenSSL-devel 1.0.2+ -* Python 3.7/3.8 -* `stunnel` 4.56+ -- `rust` 1.70+ -- `cargo` - -## Optional - -* `botocore` 1.12.0+ - ## Installation ### On Amazon Linux distributions @@ -113,138 +87,28 @@ for more guidance.) ### On other Linux distributions -Other distributions require building the package from source and installing it. - -If your distribution doesn't provide a rust or cargo package, or it provides versions -that are older than 1.70, then you can install rust and cargo through rustup: -```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -. "$HOME/.cargo/env" -``` - -- To build and install an RPM: - -If the distribution is not OpenSUSE or SLES - -```bash -sudo yum -y install git rpm-build make rust cargo openssl-devel -git clone https://github.com/aws/efs-utils -cd efs-utils -make rpm -sudo yum -y install build/amazon-efs-utils*rpm -``` - -Otherwise - -```bash -sudo zypper refresh -sudo zypper install -y git rpm-build make rust cargo openssl-devel -git clone https://github.com/aws/efs-utils -cd efs-utils -make rpm -sudo zypper --no-gpg-checks install -y build/amazon-efs-utils*rpm -``` - -On OpenSUSE, if you see error like `File './suse/noarch/bash-completion-2.11-2.1.noarch.rpm' not found on medium 'http://download.opensuse.org/tumbleweed/repo/oss/'` -during installation of `git`, run the following commands to re-add repo OSS and NON-OSS, then run the install script above again. - -```bash -sudo zypper ar -f -n OSS http://download.opensuse.org/tumbleweed/repo/oss/ OSS -sudo zypper ar -f -n NON-OSS http://download.opensuse.org/tumbleweed/repo/non-oss/ NON-OSS -sudo zypper refresh -``` - -- To build and install a Debian package: - -```bash -sudo apt-get update -sudo apt-get -y install git binutils rustc cargo pkg-config libssl-dev gettext -git clone https://github.com/aws/efs-utils -cd efs-utils -./build-deb.sh -sudo apt-get -y install ./build/amazon-efs-utils*deb -``` - -If your Debian distribution doesn't provide a rust or cargo package, or your distribution provides versions -that are older than 1.70, then you can install rust and cargo through rustup: -```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -. "$HOME/.cargo/env" -``` - -### Common installation issues with efs-utils v2.0.0 -**`make rpm` fails due to "feature `edition2021` is required"**: +Building from source requires Rust 1.70+, Cargo, Go 1.17.13+, CMake 3.0+, GCC/G++, and Perl. -Update to a version of rust and cargo -that is newer than 1.70. To install a new version of rust and cargo, run -```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -. "$HOME/.cargo/env" -``` - -**You installed a new version of rust with the above command, but your system is still using the rust installed by the package manager**: - -When installing rust with the rustup script above, the script will fail if it detects a rust already exists on the system. -Un-install the package manager's rust, and re-install rust through rustup. Once done, you will need to install rust through the package manager again to satisfy -the RPM's dependencies. -```bash -yum remove cargo rust -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -yum install cargo rust -. "$HOME/.cargo/env" -``` - -**When you run `make rpm`, compilation of efs-proxy fails due to `error: linker cc not found`**: - -Make sure that you have a linker installed on your system. For example, on Amazon Linux or RHEL, install gcc with -```bash -yum install gcc -``` - -**Installation Issue - Failed Build Dependencies** - -If rust dependencies was installed using rustup and the package manager does not have a rust and/or cargo package installed, you may see an error like this. - -``` -error: Failed build dependencies: - cargo is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 - rust is needed by amazon-efs-utils-2.1.0-1.el7_9.x86_64 -``` - -In this case, the 'make rpm' command in the installation script above should be replaced by 'make rpm-without-system-rust' to remove the rpmbuild dependency check. +**See [INSTALL.md](INSTALL.md) for detailed build instructions for your distribution.** -### On macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura distribution +### On macOS Tahoe, macOS Sequoia, macOS Sonoma and macOS Ventura distribution -For EC2 Mac instances running macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the +For EC2 Mac instances running macOS Tahoe, macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, you can install amazon-efs-utils from the [homebrew-aws](https://github.com/aws/homebrew-aws) respository. **Note that this will ONLY work on EC2 instances -running macOS Sequoia, macOS Big Sur, macOS Monterey, macOS Sonoma and macOS Ventura, not local Mac computers.** +running macOS Tahoe, macOS Sequoia, macOS Sonoma and macOS Ventura, not local Mac computers.** ```bash brew install amazon-efs-utils ``` -This will install amazon-efs-utils on your EC2 Mac Instance running macOS Big Sur, macOS Monterey and macOS Ventura in the directory `/usr/local/Cellar/amazon-efs-utils`. +This will install amazon-efs-utils in: +- Intel Macs: `/usr/local/Cellar/amazon-efs-utils` +- Apple Silicon Macs: `/opt/homebrew/Cellar/amazon-efs-utils` ***Follow the instructions in caveats when using efs-utils on EC2 Mac instance for the first time.*** To check the package caveats run below command ```bash brew info amazon-efs-utils ``` -#### Run tests - -- [Set up a virtualenv](http://libzx.so/main/learning/2016/03/13/best-practice-for-virtualenv-and-git-repos.html) for efs-utils - -```bash -virtualenv ~/.envs/efs-utils -source ~/.envs/efs-utils/bin/activate -pip install -r requirements.txt -``` - -- Run tests - -```bash -make test -``` - ## Usage ### mount.efs @@ -722,23 +586,9 @@ Efs-Utils is able to enter FIPS mode when mounting your file system. To enable F ```bash sed -i "s/fips_mode_enabled = false/fips_mode_enabled = true/" /etc/amazon/efs/efs-utils.conf ``` -This will enable any potential API call from EFS-Utils to use FIPS endpoints and cause stunnel to enter FIPS mode - -Note: FIPS mode requires that the installed version of OpenSSL is compiled with FIPS. - -To verify that the installed version is compiled with FIPS, look for `OpenSSL X.X.Xx-fips` in the `stunnel -version` command output e.g. -```bash -stunnel -version -``` - -Example output for FIPS compiled stunnel -``` -stunnel 4.56 on x86_64-koji-linux-gnu platform -Compiled/running with OpenSSL 1.0.2k-fips 26 Jan 2017 -Threading:PTHREAD Sockets:POLL,IPv6 SSL:ENGINE,OCSP,FIPS Auth:LIBWRAP -``` +This will enable any potential API call from EFS-Utils to use FIPS endpoints and cause proxy to enter FIPS mode -For more information on how to configure OpenSSL with FIPS see the [OpenSSL FIPS README](https://github.com/openssl/openssl/blob/master/README-FIPS.md). +Efs-Utils is configured to compile with AWS-LC FIPS module by default. For more information on AWS-LC FIPS module see [AWS-LC FIPS README](https://github.com/aws/aws-lc/blob/main/crypto/fipsmodule/FIPS.md) ## License Summary diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 17c9efe2..5eca2886 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.3.3 +Version : 2.4.0 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -49,7 +49,7 @@ Group : Amazon/Tools License : MIT URL : https://aws.amazon.com/efs -BuildArchitectures: x86_64 aarch64 +BuildArch: x86_64 aarch64 Requires : nfs-utils %if 0%{?amzn2} @@ -124,6 +124,10 @@ mv vendor %{_builddir}/%{name}/src/proxy/ %endif %build +# For AL2, cmake3 needs to be set as CMAKE env var +%if 0%{?amzn2} +export CMAKE=/usr/bin/cmake3 +%endif cd %{_builddir}/%{name}/src/proxy cargo build --release --manifest-path %{_builddir}/%{name}/src/proxy/Cargo.toml @@ -192,6 +196,10 @@ fi %clean %changelog +* Fri Oct 03 2025 Yangjinan Hu - 2.4.0 +- Upgrade s2n-tls version in efs-proxy to use AWS-LC +- Add ubuntu24 and macOS Tahoe support efs-utils + * Mon Aug 11 2025 Anthony Tse - 2.3.3 - Reset Cargo.lock version number to 3. Using version 4 caused issues for customers using older rust versions. - Add environment variable support for AWS profiles and regions diff --git a/build-deb.sh b/build-deb.sh index 32b7c087..2eef495a 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.3.3 +VERSION=2.4.0 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 18d266f5..f2e9b000 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.3.3 +version=2.4.0 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index dc51ff5e..968c2738 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -86,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.3.3" +VERSION = "2.4.0" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -95,6 +95,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] +UBUNTU_24_RELEASE = "Ubuntu 24" CLONE_NEWNET = 0x40000000 CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -249,7 +250,6 @@ "verify", "rolearn", "jwtpath", - "fsap", "crossaccount", LEGACY_STUNNEL_MOUNT_OPTION, ] @@ -287,6 +287,7 @@ MACOS_VENTURA_RELEASE = "macOS-13" MACOS_SONOMA_RELEASE = "macOS-14" MACOS_SEQUOIA_RELEASE = "macOS-15" +MACOS_TAHOE_RELEASE = "macOS-26" # Multiplier for max read ahead buffer size @@ -302,11 +303,12 @@ MACOS_VENTURA_RELEASE, MACOS_SONOMA_RELEASE, MACOS_SEQUOIA_RELEASE, + MACOS_TAHOE_RELEASE, ] MAC_OS_PLATFORM_LIST = ["darwin"] -# MacOS Versions : Sequoia - 24.*, Sonoma - 23.*, Ventura - 22.*, Monterey - 21.*, Big Sur - 20.*, Catalina - 19.*, Mojave - 18.*. Catalina and Mojave are not supported for now -MAC_OS_SUPPORTED_VERSION_LIST = ["20", "21", "22", "23", "24"] +# MacOS Versions : Tahoe - 25.*, Sequoia - 24.*, Sonoma - 23.*, Ventura - 22.*, Monterey - 21.*, Big Sur - 20.*, Catalina - 19.*, Mojave - 18.*. Catalina and Mojave are not supported for now +MAC_OS_SUPPORTED_VERSION_LIST = ["20", "21", "22", "23", "24", "25"] AWS_FIPS_ENDPOINT_CONFIG_ENV = "AWS_USE_FIPS_ENDPOINT" ECS_URI_ENV = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" @@ -2000,23 +2002,23 @@ def bootstrap_proxy( preexec_fn=os.setsid, close_fds=True, ) - logging.info( - "Started %s, pid: %d", - "efs-proxy" if efs_proxy_enabled else "stunnel", - tunnel_proc.pid, - ) - update_tunnel_temp_state_file_with_tunnel_pid( - temp_tls_state_file, state_file_dir, tunnel_proc.pid - ) + try: + logging.info( + "Started %s, pid: %d", + "efs-proxy" if efs_proxy_enabled else "stunnel", + tunnel_proc.pid, + ) - if "netns" not in options: - test_tlsport(options["tlsport"]) - else: - with NetNS(nspath=options["netns"]): - test_tlsport(options["tlsport"]) + update_tunnel_temp_state_file_with_tunnel_pid( + temp_tls_state_file, state_file_dir, tunnel_proc.pid + ) - try: + if "netns" not in options: + test_tlsport(options["tlsport"]) + else: + with NetNS(nspath=options["netns"]): + test_tlsport(options["tlsport"]) yield tunnel_proc finally: # The caller of this function should use this function in the context of a `with` statement @@ -3360,7 +3362,8 @@ def verify_tlsport_can_be_connected(tlsport): logging.debug("Trying to connect to 127.0.0.1: %s", tlsport) test_socket.connect(("127.0.0.1", tlsport)) return True - except ConnectionRefusedError: + except Exception as e: + logging.warning("Error connecting to 127.0.0.1:%s, %s", tlsport, e) return False finally: test_socket.close() @@ -4084,6 +4087,7 @@ def optimize_readahead_window(mountpoint, options, config): DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER * int(options["rsize"]) / 1024 ) + system_release_version = get_system_release_version() try: major, minor = decode_device_number(os.stat(mountpoint).st_dev) # modify read_ahead_kb in /sys/class/bdi//read_ahead_kb @@ -4096,6 +4100,20 @@ def optimize_readahead_window(mountpoint, options, config): read_ahead_kb_config_file, str(fixed_readahead_kb), ) + if UBUNTU_24_RELEASE in system_release_version: + # For Ubuntu 24, we use a delayed approach to setting the readahead value. + # This is necessary because on Ubuntu 24, there's a race condition with udev + # rules that can reset our readahead value immediately after we set it. + p = subprocess.Popen( + "sleep 2 && echo %s > %s" + % (fixed_readahead_kb, read_ahead_kb_config_file), + shell=True, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL, + ) + logging.debug("Started background thread for delayed readahead setting") + return + p = subprocess.Popen( "echo %s > %s" % (fixed_readahead_kb, read_ahead_kb_config_file), shell=True, diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index 09f3c342..8c7a20a1 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -4,34 +4,28 @@ version = 3 [[package]] name = "addr2line" -version = "0.24.2" +version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "e7a2e47a1fbe209ee101dd6d61285226744c6c8d3c21c8dc878ba6cb9f467f3a" dependencies = [ "gimli", ] [[package]] -name = "adler2" -version = "2.0.1" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.6.10" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81ce3d38065e618af2d7b77e10c5ad9a069859b4be3c2250f674af3840d9c8a5" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ - "memchr 2.7.5", + "memchr 2.7.6", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -52,9 +46,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arc-swap" @@ -64,13 +58,13 @@ checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] @@ -90,19 +84,78 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" +[[package]] +name = "aws-lc-fips-sys" +version = "0.13.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede71ad84efb06d748d9af3bc500b14957a96282a69a6833b1420dcacb411cc3" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "regex", +] + +[[package]] +name = "aws-lc-rs" +version = "1.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" +dependencies = [ + "aws-lc-fips-sys", + "aws-lc-sys", + "untrusted", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "107a4e9d9cab9963e04e84bb8dee0e25f2a987f9a8bad5ed054abd439caa8f8c" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", +] + [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "b7815ea54e4d821e791162e078acbebfd6d8c8939cd559c9335dceb1c8ca7282" dependencies = [ "addr2line", + "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", +] + +[[package]] +name = "bindgen" +version = "0.72.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" +dependencies = [ + "bitflags 2.10.0", + "cexpr", + "clang-sys", + "itertools", + "log 0.4.28", + "prettyplease", + "proc-macro2", + "quote 1.0.41", + "regex", + "rustc-hash", + "shlex", + "syn 2.0.108", ] [[package]] @@ -119,9 +172,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "bumpalo" @@ -143,28 +196,37 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "cc" -version = "1.2.30" +version = "1.2.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deec109607ca693028562ed836a5f1c4b8bd77755c4e132fc5ce11b0b6211ae7" +checksum = "739eb0f94557554b3ca9a86d2d37bebd49c5e6d0c1d2bda35ba5bdac830befc2" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom 7.1.3", +] + [[package]] name = "cfg-if" -version = "1.0.1" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "chrono" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -172,6 +234,17 @@ dependencies = [ "windows-link", ] +[[package]] +name = "clang-sys" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "clap" version = "2.34.0" @@ -211,7 +284,7 @@ dependencies = [ "heck", "proc-macro-error", "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "syn 1.0.109", ] @@ -224,6 +297,15 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "cmake" +version = "0.1.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" +dependencies = [ + "cc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" @@ -237,7 +319,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "syn 1.0.109", ] @@ -247,25 +329,29 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c877555693c14d2f84191cfd3ad8582790fc52b5e2274b40b59cf5f5cea25c7" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "efs-proxy" -version = "2.3.3" +version = "2.4.0" dependencies = [ "anyhow", "async-trait", - "backtrace", "bytes", "chrono", "clap 4.0.0", "fern", "futures", - "log 0.4.27", + "log 0.4.28", "log4rs", "nix", "onc-rpc", "rand 0.8.5", "s2n-tls", - "s2n-tls-sys", "s2n-tls-tokio", "serde", "serde_ini", @@ -279,30 +365,33 @@ dependencies = [ "xdrgen", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "env_logger" -version = "0.4.3" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" dependencies = [ - "log 0.3.9", + "humantime", + "is-terminal", + "log 0.4.28", "regex", + "termcolor", ] -[[package]] -name = "equivalent" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" - [[package]] name = "errno" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] @@ -326,15 +415,27 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" dependencies = [ - "log 0.4.27", + "log 0.4.28", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127" + [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "futures" version = "0.3.31" @@ -390,8 +491,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] @@ -418,7 +519,7 @@ dependencies = [ "futures-macro", "futures-sink", "futures-task", - "memchr 2.7.5", + "memchr 2.7.6", "pin-project-lite", "pin-utils", "slab", @@ -432,32 +533,38 @@ checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", ] [[package]] name = "getrandom" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasip2", ] [[package]] name = "gimli" -version = "0.31.1" +version = "0.24.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" + +[[package]] +name = "glob" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "hashbrown" -version = "0.15.4" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "heck" @@ -488,21 +595,21 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", - "log 0.4.27", + "log 0.4.28", "wasm-bindgen", "windows-core", ] @@ -518,14 +625,34 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.10.0" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe4cd85333e22411419a0bcae1297d25e58c9443848b11dc6a86fefe8c78a661" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ - "equivalent", + "autocfg", "hashbrown", ] +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi 0.5.2", + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.15" @@ -534,19 +661,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" dependencies = [ "once_cell", "wasm-bindgen", @@ -559,28 +686,38 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73" [[package]] -name = "lazy_static" -version = "1.5.0" +name = "libc" +version = "0.2.177" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" [[package]] -name = "libc" -version = "0.2.174" +name = "libloading" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" +checksum = "d7c4b02199fee7c5d21a5ae7d8cfa79a6ef5bb2fc834d6e9058e89c825efdc55" +dependencies = [ + "cfg-if", + "windows-link", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "lock_api" -version = "0.4.13" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -592,14 +729,14 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" dependencies = [ - "log 0.4.27", + "log 0.4.28", ] [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" dependencies = [ "serde", ] @@ -612,9 +749,9 @@ checksum = "a94d21414c1f4a51209ad204c1776a3d0765002c76c6abcb602a6f09f1e881c7" [[package]] name = "log4rs" -version = "1.3.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0816135ae15bd0391cf284eab37e6e3ee0a6ee63d2ceeb659862bd8d0a984ca6" +checksum = "d36ca1786d9e79b8193a68d480a0907b612f109537115c6ff655a3a1967533fd" dependencies = [ "anyhow", "arc-swap", @@ -623,11 +760,9 @@ dependencies = [ "fnv", "humantime", "libc", - "log 0.4.27", + "log 0.4.28", "log-mdc", - "once_cell", "parking_lot", - "rand 0.8.5", "serde", "serde-value", "serde_json", @@ -649,9 +784,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.5" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "memoffset" @@ -662,13 +797,20 @@ dependencies = [ "autocfg", ] +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" -version = "0.8.9" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" dependencies = [ - "adler2", + "adler", + "autocfg", ] [[package]] @@ -678,7 +820,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "wasi 0.11.1+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] @@ -704,6 +846,16 @@ dependencies = [ "memchr 1.0.2", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr 2.7.6", + "minimal-lexical", +] + [[package]] name = "num-traits" version = "0.2.19" @@ -725,11 +877,11 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "a38f2be3697a57b4060074ff41b44c16870d916ad7877c17696e063257482bc7" dependencies = [ - "memchr 2.7.5", + "memchr 2.7.6", ] [[package]] @@ -750,18 +902,6 @@ version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" -[[package]] -name = "openssl-sys" -version = "0.9.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "ordered-float" version = "2.10.1" @@ -779,9 +919,9 @@ checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" [[package]] name = "parking_lot" -version = "0.12.4" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -789,9 +929,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.11" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", @@ -812,12 +952,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkg-config" -version = "0.3.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" - [[package]] name = "ppv-lite86" version = "0.2.21" @@ -827,6 +961,16 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.108", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -835,7 +979,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "syn 1.0.109", "version_check", ] @@ -847,15 +991,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "version_check", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" dependencies = [ "unicode-ident", ] @@ -868,9 +1012,9 @@ checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" [[package]] name = "quote" -version = "1.0.40" +version = "1.0.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1" dependencies = [ "proc-macro2", ] @@ -937,40 +1081,47 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", ] [[package]] name = "redox_syscall" -version = "0.5.17" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.10.0", ] [[package]] name = "regex" -version = "0.2.11" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9329abc99e39129fcceabd24cf5d85b4671ef7c29c50e972bc5afe32438ec384" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" dependencies = [ "aho-corasick", - "memchr 2.7.5", + "memchr 2.7.6", + "regex-automata", "regex-syntax", - "thread_local", - "utf8-ranges", ] [[package]] -name = "regex-syntax" -version = "0.5.6" +name = "regex-automata" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d707a4fa2637f2dca2ef9fd02225ec7661fe01a53623c1e6515b6916511f7a7" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" dependencies = [ - "ucd-util", + "aho-corasick", + "memchr 2.7.6", + "regex-syntax", ] +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + [[package]] name = "result" version = "1.0.0" @@ -983,24 +1134,30 @@ version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +[[package]] +name = "rustc-hash" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + [[package]] name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.1", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys", - "windows-sys 0.60.2", + "windows-sys 0.52.0", ] [[package]] name = "rustversion" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -1010,9 +1167,9 @@ checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "s2n-tls" -version = "0.0.41" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5820b4f6c7d414c82da63fc13b1e91a9b59e91dc2cc15287c0e1882e61837a20" +checksum = "821c6c037686bbc60273f3c4af20012eecbe5e9b1c4ac3d7f766a1f2464681bf" dependencies = [ "errno", "hex", @@ -1023,20 +1180,20 @@ dependencies = [ [[package]] name = "s2n-tls-sys" -version = "0.0.41" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f32844e751ea7a2469755ab0c6ad5a02156a6b8b4a0202b3355796c7b247258" +checksum = "6a755df740916e2fc0aaf99c6fc0e519028702a75bff018b6b55a735eada406a" dependencies = [ + "aws-lc-rs", "cc", "libc", - "openssl-sys", ] [[package]] name = "s2n-tls-tokio" -version = "0.0.41" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "995bc71beaf1eea056de345d2f45ff45877d26f612b4a7dde343024e41e3025b" +checksum = "0e03f7869a3e06f9dcec01de224bd096320cf89ddf495a32547a0364cbf8de90" dependencies = [ "errno", "libc", @@ -1053,10 +1210,11 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] @@ -1070,15 +1228,24 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] @@ -1094,27 +1261,27 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.141" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b9eff21ebe718216c6ec64e1d9ac57087aad11efc64e32002bce4a0d4c03d3" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", - "memchr 2.7.5", + "memchr 2.7.6", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_yaml" -version = "0.9.34+deprecated" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +checksum = "578a7433b776b56a35785ed5ce9a7e777ac0598aac5a6dd1b4b18a307c7fc71b" dependencies = [ "indexmap", - "itoa", "ryu", "serde", - "unsafe-libyaml", + "yaml-rust", ] [[package]] @@ -1125,18 +1292,18 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.5" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] [[package]] name = "slab" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" @@ -1173,32 +1340,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.104" +version = "2.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" +checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917" dependencies = [ "proc-macro2", - "quote 1.0.40", + "quote 1.0.41", "unicode-ident", ] [[package]] name = "tempfile" -version = "3.20.0" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand", - "getrandom 0.3.3", + "getrandom 0.3.4", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -1227,8 +1394,8 @@ checksum = "adcb7fd841cd518e279be3d5a3eb0636409487998a4aff22f3de87b81e88384f" dependencies = [ "cfg-if", "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] @@ -1238,8 +1405,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", "test-case-core", ] @@ -1268,8 +1435,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] @@ -1282,15 +1449,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "thread_local" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b" -dependencies = [ - "lazy_static 1.5.0", -] - [[package]] name = "tokio" version = "1.38.2" @@ -1317,15 +1475,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] name = "tokio-util" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -1343,17 +1501,11 @@ dependencies = [ "unsafe-any-ors", ] -[[package]] -name = "ucd-util" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abd2fc5d32b590614af8b0a20d837f32eca055edd0bbead59a9cfe80858be003" - [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "462eeb75aeb73aea900253ce739c8e18a67423fadf006037cd3ff27e82748a06" [[package]] name = "unicode-width" @@ -1371,24 +1523,18 @@ dependencies = [ ] [[package]] -name = "unsafe-libyaml" -version = "0.2.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" - -[[package]] -name = "utf8-ranges" -version = "1.0.5" +name = "untrusted" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcfc827f90e53a02eaef5e535ee14266c1d569214c6aa70133a624d8a3164ba" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "uuid" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.3", + "getrandom 0.3.4", "js-sys", "rand 0.9.2", "uuid-macro-internal", @@ -1397,21 +1543,15 @@ dependencies = [ [[package]] name = "uuid-macro-internal" -version = "1.17.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b682e8c381995ea03130e381928e0e005b7c9eb483c6c8682f50e07b33c2b7" +checksum = "d9384a660318abfbd7f8932c34d67e4d1ec511095f95972ddc01e19d7ba8413f" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] -[[package]] -name = "vcpkg" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" - [[package]] name = "vec_map" version = "0.8.2" @@ -1437,68 +1577,55 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasi" -version = "0.14.2+wasi-0.2.4" +name = "wasip2" +version = "1.0.1+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" dependencies = [ - "wit-bindgen-rt", + "wit-bindgen", ] [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", -] - -[[package]] -name = "wasm-bindgen-backend" -version = "0.2.100" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" -dependencies = [ - "bumpalo", - "log 0.4.27", - "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" dependencies = [ - "quote 1.0.40", + "quote 1.0.41", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" dependencies = [ + "bumpalo", "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", - "wasm-bindgen-backend", + "quote 1.0.41", + "syn 2.0.108", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" dependencies = [ "unicode-ident", ] @@ -1521,11 +1648,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.48.0", ] [[package]] @@ -1536,9 +1663,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.2" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ "windows-implement", "windows-interface", @@ -1549,46 +1676,46 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] [[package]] name = "windows-link" -version = "0.1.3" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] name = "windows-result" -version = "0.3.4" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.2" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ "windows-link", ] @@ -1611,24 +1738,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-sys" -version = "0.59.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-sys" -version = "0.60.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" -dependencies = [ - "windows-targets 0.53.3", -] - [[package]] name = "windows-targets" version = "0.48.5" @@ -1653,30 +1762,13 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", + "windows_i686_gnullvm", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.53.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" -dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", -] - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -1689,12 +1781,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -1707,12 +1793,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -1725,24 +1805,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" - [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -1755,12 +1823,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_i686_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" - [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -1773,12 +1835,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -1791,12 +1847,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" - [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -1810,25 +1860,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" - -[[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.1", -] +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "xdr-codec" version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48056532577dba856078eaf25193366b884401cdf55da433fe741617d3158fa6" dependencies = [ "byteorder", "error-chain", @@ -1837,35 +1876,48 @@ dependencies = [ [[package]] name = "xdrgen" version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef5855c0e686b4ef5fd0b96920d8e1f6317215798bcbde1ffec477a90d891ed2" dependencies = [ "bitflags 0.9.1", "clap 2.34.0", "env_logger", - "lazy_static 0.2.11", + "lazy_static", "log 0.3.9", - "nom", + "nom 3.2.1", "quote 0.3.15", "xdr-codec", ] +[[package]] +name = "yaml-rust" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +dependencies = [ + "linked-hash-map", +] + [[package]] name = "zerocopy" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.26" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", - "quote 1.0.40", - "syn 2.0.104", + "quote 1.0.41", + "syn 2.0.108", ] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 24ecbab7..32930c61 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.3.3" +version = "2.4.0" publish = false license = "MIT" @@ -20,17 +20,15 @@ log4rs = { version = "1.2.0", features = ["rolling_file_appender", "compound_pol nix = { version = "0.26.2", features = ["signal"]} onc-rpc = "0.2.3" rand = "0.8.5" -s2n-tls = "0.0" -s2n-tls-tokio = "0.0" -s2n-tls-sys = "0.0" +s2n-tls = {version="^0.3.19",features=["fips"]} +s2n-tls-tokio = "^0.3.19" serde = {version="1.0.175",features=["derive"]} serde_ini = "0.2.0" thiserror = "1.0.44" tokio = { version = "1.29.0, <1.39", features = ["full"] } tokio-util = "0.7.8" uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics"]} -xdr-codec = "0.4.4" -backtrace = "=0.3.74" +xdr-codec = { path = "rust-xdr/xdr-codec"} [dev-dependencies] test-case = "*" @@ -38,4 +36,10 @@ tokio = { version = "1.29.0", features = ["test-util"] } tempfile = "3.10.1" [build-dependencies] -xdrgen = "0.4.4" +xdrgen = { path = "rust-xdr/xdrgen" } + +[lib] +# Library is used only to make symbols visible/reusable by Integration tests +# so we want to disable unit-tests for that target, to avoid running all the unit tests twice +test = false +doctest = false diff --git a/src/proxy/build.rs b/src/proxy/build.rs index 71e8d0da..dbc10216 100644 --- a/src/proxy/build.rs +++ b/src/proxy/build.rs @@ -1,4 +1,4 @@ -extern crate xdrgen; +use xdrgen; fn main() { xdrgen::compile("src/efs_prot.x").expect("xdrgen efs_prot.x failed"); diff --git a/src/proxy/rust-xdr/xdr-codec/Cargo.toml b/src/proxy/rust-xdr/xdr-codec/Cargo.toml new file mode 100644 index 00000000..1d62c787 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "xdr-codec" +version = "0.4.4" +authors = ["Jeremy Fitzhardinge "] +license = "MIT OR Apache-2.0" +description = "XDR encode/decode runtime support. Pairs with xdrgen which generates code from specs." +repository = "https://github.com/jsgf/rust-xdr/tree/master/xdr-codec" +documentation = "https://docs.rs/xdr-codec" +readme = "README.md" +keywords = ["encoding", "protocol", "xdr", "rfc4506", "serialization"] +include = [ "src/**/*.rs", "tests/**/*.rs", "*.md", "Cargo.toml" ] + +[features] +# Enable use of `Pack`/`Unpack` traits for `i8`/`u8`. Normally this is disabled to +# prevent unintended use of `char thing[]` arrays when then intent was `opaque thing[]`. +bytecodec = [] +# For travis +unstable = [] + +[dependencies] +byteorder = "1.0" +error-chain = "0.10" + +[dev-dependencies] +quickcheck = "0.4" diff --git a/src/proxy/rust-xdr/xdr-codec/README.md b/src/proxy/rust-xdr/xdr-codec/README.md new file mode 100644 index 00000000..ac7e8d4d --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/README.md @@ -0,0 +1,162 @@ +# Rust XDR library + +[![Build Status](https://travis-ci.org/jsgf/rust-xdr.svg?branch=master)](https://travis-ci.org/jsgf/rust-xdr) +[![Crates.io](https://img.shields.io/crates/v/xdr-codec.svg)]() +[![Coverage Status](https://coveralls.io/repos/github/jsgf/rust-xdr/badge.svg?branch=master)](https://coveralls.io/github/jsgf/rust-xdr?branch=master) + +This crate provides a set of runtime routines to encode and decode +basic XDR types, which can be used with +[xdrgen's](https://github.com/jsgf/rust-xdrgen) automatically +generated code, or with hand-written codecs. + +This crate also implements XDR-RPC record marking in the form of the +`XdrRecordReader` and `XdrRecordWriter` IO filters. + +## Usage + +The easiest way to use this library is with [xdrgen](https://crates.io/crates/xdrgen), +which takes takes a specification in a `.x` file and generates all the necessary +definitions for you. + +However, you can manually implement the `Pack` and `Unpack` traits for your own +types: + +``` +struct MyType { + a: u32, + b: Vec, +} + +impl Pack for MyType + where W: Write +{ + fn pack(&self, out: &mut W) -> xdr_codec::Result { + let mut sz = 0; + + sz += try!(self.a.pack(out)); + sz += try!(Opaque::borrowed(self.b).pack(out)); + + Ok(sz) + } +} + +impl Unpack for MyType + where R: Read +{ + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let mut rsz = 0; + let ret = MyType { + a: { let (v, sz) = try!(Unpack::unpack(input)); rsz += sz; v }, + b: { let (v, sz) = try!(Opaque::unpack(input)); rsz += sz; v.into_owned() }, + }; + + Ok((ret, rsz)) + } +} +``` + +or alternatively, put the following in src/mytype.x: + +``` +struct MyType { + unsigned int a; + opaque b<>; +} +``` + +then add a build.rs to your Cargo.toml: + +``` +extern crate xdrgen; + +fn main() { + xdrgen::compile("src/mytype.x").expect("xdrgen mytype.x failed"); +} +``` + +then include the generated code in one of your modules: +``` +extern crate xdr_codec; + +// ... + +include!(concat!(env!("OUT_DIR"), "/mytype_xdr.rs")); +``` + +## Documentation + +Complete documentation is [here](https://docs.rs/xdr-codec/). + +## Changes in 0.4.2 + +Implement standard traits for `char`/`unsigned char` (`i8`/`u8` in Rust). + +Also handle `short`/`unsigned short` as an extension in .x files. They are still +represented in memory as `i32`/`u32`. + +## Changes in 0.4 + +Version 0.4 added the `bytecodec` feature, which implements `Pack` and `Unpack` +for byte types (`i8` and `u8`). This is normally unwanted, since bytes suffer from +massive padding on the wire when used individually, or in an array of bytes (`opaque` +is the preferred way to transport compact byte arrays). However, some protocols +are mis-specified to use padded byte arrays, so `bytecodec` is available for them. + +## Changes in 0.2 + +Versions starting with 0.2 introduced a number of breaking changes: + + * `u8` no longer implements `Pack`/`Unpack` + + XDR doesn't directly support encoding individual bytes; if it did, it would + require each one to be padded out to 4 bytes. xdr-codec 0.1 implemented + `Pack` and `Unpack` for `u8` primarily to allow direct use of a `Vec` + as an XDR `opaque<>`. However, this also allowed direct use of + `u8::pack()` which makes it too easy to accidentally generate a malformed + XDR stream without proper padding. + + In 0.2, u8 no longer implements `Pack` and `Unpack`. Instead, xdr-codec + has a `Opaque<'a>(&'a [u8])` wrapper which does. This allows any `[u8]` + slice to be packed and unpacked. + + It also has a set of helper functions for packing and unpacking both + flexible and fixed-sized opaques, strings and general arrays. These make + it straightforward to manage arrays in a way that is robust. This also allows + xdrgen to generate code for fixed-sized arrays that's not completely unrolled + unpack calls. + + (I'm not entirely happy with the proliferation of functions however, so + I'm thinking about a trait-based approach that is more idiomatic Rust. That + may have to be 0.3.) + +* Extensions to XDR record marking + + I added `XdrRecordReaderIter` which allows iteration over records. Previously + all the records in the stream were flattened into a plain byte stream, which + defeats the purpose of the records. `XdrRecordReader` still implements `Read` + so that's still available, but it also implements `IntoIterator` so you can + iterate records. + + The addition of more unit tests (see below) pointed out some poorly thought + out corner cases, so now record generation and use of the EOR marker is more + consistent. + +* More unit tests, including quickcheck generated ones + + I've increased the number of tests, and added quickcheck generated tests + which cleared up a few corner cases. + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](http://www.apache.org/licenses/LICENSE-2.0)) + * MIT license ([LICENSE-MIT](http://opensource.org/licenses/MIT)) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/src/proxy/rust-xdr/xdr-codec/src/error.rs b/src/proxy/rust-xdr/xdr-codec/src/error.rs new file mode 100644 index 00000000..3b4f07a2 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/src/error.rs @@ -0,0 +1,39 @@ +#![allow(deprecated)] + +error_chain! { + foreign_links { + IOError(::std::io::Error); + InvalidUtf8(::std::string::FromUtf8Error); + } + + errors { + InvalidCase(v: i32) { + description("invalid union case") + display("invalid union case: '{}'", v) + } + InvalidEnum(v: i32) { + description("invalid enum value") + display("invalid enum value: '{}'", v) + } + InvalidLen(v: usize) { + description("invalid array len") + display("invalid array len: '{}'", v) + } + } +} + +unsafe impl Sync for Error {} + +impl Error { + pub fn invalidcase(v: i32) -> Error { + ErrorKind::InvalidCase(v).into() + } + + pub fn invalidenum(v: i32) -> Error { + ErrorKind::InvalidEnum(v).into() + } + + pub fn invalidlen(v: usize) -> Error { + ErrorKind::InvalidLen(v).into() + } +} diff --git a/src/proxy/rust-xdr/xdr-codec/src/lib.rs b/src/proxy/rust-xdr/xdr-codec/src/lib.rs new file mode 100644 index 00000000..6f100557 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/src/lib.rs @@ -0,0 +1,751 @@ +//! XDR runtime encoding/decoding +//! +//! This crate provides runtime support for encoding and decoding XDR +//! data. It is intended to be used with code generated by the +//! "xdrgen" crate, but it can also be used with hand-written code. +//! +//! It provides two key traits - `Pack` and `Unpack` - which all +//! encodable types must implement. It also provides the helper +//! functions `pack()` and `unpack()` to simplify the API. +//! +//! By default, this does not implement codecs for `i8` or `u8`. This is because +//! encoding individual bytes is quite inefficient, as they're all padded up to +//! 32 bits (4 bytes). This doesn't matter for individual items, but arrays of +//! bytes should be represented by opaque arrays (static size) or flex arrays +//! (dynamic size) (or strings for character data). +//! +//! However, some protocols are mis-specified to use byte arrays (I'm looking at +//! you, gluster), so the option to support the exists. You can enable byte codec +//! with the `bytecodec` feature. +#![crate_type = "lib"] + +extern crate byteorder; +#[macro_use] +extern crate error_chain; + +use byteorder::{BigEndian, ReadBytesExt, WriteBytesExt}; +use std::borrow::{Borrow, Cow}; +use std::cmp::min; +pub use std::io::{Read, Write}; +use std::ops::Deref; + +pub mod record; + +mod error; +pub use error::*; + +#[cfg(test)] +mod test; + +static PADDING: [u8; 4] = [0; 4]; + +/// Compute XDR padding. +/// +/// Return slice of zero padding needed to bring `sz` up to a multiple of 4. If no padding is needed, +/// it will be a zero-sized slice. +#[inline] +pub fn padding(sz: usize) -> &'static [u8] { + &PADDING[..(4 - (sz % 4)) % 4] +} + +/// Wrapper for XDR opaque data. +/// +/// In XDR terms, "opaque data" is a plain array of bytes, packed as tightly as possible, and then +/// padded to a 4 byte offset. This is different from an array of bytes, where each byte would be +/// padded to 4 bytes when emitted into the array. +#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct Opaque<'a>(pub Cow<'a, [u8]>); + +impl<'a> Opaque<'a> { + pub fn owned(v: Vec) -> Opaque<'a> { + Opaque(Cow::Owned(v)) + } + pub fn borrowed(v: &'a [u8]) -> Opaque<'a> { + Opaque(Cow::Borrowed(v)) + } +} + +impl<'a> Deref for Opaque<'a> { + type Target = [u8]; + fn deref(&self) -> &[u8] { + self.0.deref() + } +} + +impl<'a> From<&'a [u8]> for Opaque<'a> { + fn from(v: &'a [u8]) -> Self { + Opaque::borrowed(v) + } +} + +/// Serialization (packing) helper. +/// +/// Helper to serialize any type implementing `Pack` into an implementation of `std::io::Write`. +pub fn pack>(val: &T, out: &mut Out) -> Result<()> { + val.pack(out).map(|_| ()) +} + +/// Pack a fixed-size array. +/// +/// As the size is fixed, it doesn't need to be encoded. `sz` is in units of array elements. +/// If the `val` is too large, it is truncated; it is too small, then the array is padded out with +/// default values (if provided). If the array is too small and there's no pad/default value, then it fails +/// with `Error::InvalidLen`. +pub fn pack_array(val: &[T], sz: usize, out: &mut Out, defl: Option<&T>) -> Result +where + Out: Write, + T: Pack, +{ + let mut vsz = 0; + let val = &val[..min(sz, val.len())]; + + for v in val { + vsz += v.pack(out)?; + } + assert!(vsz % 4 == 0); + + if val.len() < sz { + if let Some(defl) = defl { + for _ in val.len()..sz { + vsz += defl.pack(out)?; + } + } else { + bail!(ErrorKind::InvalidLen(sz)); + } + } + Ok(vsz) +} + +/// Pack a fixed-size byte array +/// +/// As size is fixed, it doesn't need to be encoded. `sz` is in bytes (and array elements, which are u8) +/// If the array is too large, it is truncated; if its too small its padded with `0x00`. +pub fn pack_opaque_array(val: &[u8], sz: usize, out: &mut Out) -> Result { + let mut vsz; + let val = &val[..min(sz, val.len())]; + + vsz = val.len(); + out.write_all(val)?; + + let p = padding(sz); + for _ in val.len()..(sz + p.len()) { + out.write_u8(0)?; + vsz += 1; + } + + Ok(vsz) +} + +/// Pack a dynamically sized array, with size limit check. +/// +/// This packs an array of packable objects, and also applies an optional size limit. +#[inline] +pub fn pack_flex>( + val: &[T], + maxsz: Option, + out: &mut Out, +) -> Result { + if maxsz.map_or(false, |m| val.len() > m) { + bail!(ErrorKind::InvalidLen(maxsz.unwrap())); + } + + val.pack(out) +} + +/// Pack a dynamically sized opaque array, with size limit check. +/// +/// This packs an array of packable objects, and also applies an optional size limit. +#[inline] +pub fn pack_opaque_flex( + val: &[u8], + maxsz: Option, + out: &mut Out, +) -> Result { + if maxsz.map_or(false, |m| val.len() > m) { + bail!(ErrorKind::InvalidLen(maxsz.unwrap())); + } + + Opaque::borrowed(val).pack(out) +} + +/// Pack a string with size limit check. +#[inline] +pub fn pack_string(val: &str, maxsz: Option, out: &mut Out) -> Result { + pack_opaque_flex(val.as_bytes(), maxsz, out) +} + +/// Unpack a fixed-sized array +/// +/// Unpack a fixed-size array of elements. The results are placed in `array`, but the actual wire-size of +/// the array is `arraysz`. If the supplied `array` is too large, the remainer is filled in with the +/// default value (if provided); if it is too small, the excess elements are discarded. +/// +/// If the provided array is too large and there is no default, then decoding fails with an `InvalidLen` error. +/// All the elements in `array` will be initialized after a successful return. +pub fn unpack_array( + input: &mut In, + array: &mut [T], + arraysz: usize, + defl: Option<&T>, +) -> Result +where + In: Read, + T: Unpack + Clone, +{ + #[inline] + fn set(p: &mut T, v: T) { + *p = v + } + #[inline] + fn drop(_: &mut T) {} + + unpack_array_with(input, array, arraysz, set, drop, defl) +} + +/// Specialized variant of `unpack_array` which initializes the element via a callback. This is primarily +/// so that the array can be uninitialized, and we initialize it element at a time with `ptr::write()`. +#[inline] +pub fn unpack_array_with( + input: &mut In, + array: &mut [T], + arraysz: usize, + set: fn(&mut T, T), + drop: fn(&mut T), + defl: Option<&T>, +) -> Result +where + In: Read, + T: Unpack + Clone, +{ + let mut rsz = 0; + let sz = min(arraysz, array.len()); + + // If we fail part way through then return the error and the index we got up to + // so we can clean up the entries we did initialize. + let res = (|| { + for (idx, elem) in (&mut array[..sz]).into_iter().enumerate() { + let (v, sz) = match Unpack::unpack(input) { + Ok(v) => v, + Err(e) => return Some((idx, e)), + }; + rsz += sz; + set(elem, v); + } + None + })(); + if let Some((idx, err)) = res { + for elem in &mut array[..idx] { + drop(elem) + } + return Err(err); + } + + // Fill in excess array entries with default values + if arraysz < array.len() { + if let Some(defl) = defl { + for elem in &mut array[arraysz..] { + set(elem, defl.clone()); + } + } else { + bail!(ErrorKind::InvalidLen(arraysz)); + } + } + + // Mop up unused array entries on the wire + if arraysz > array.len() { + for _ in array.len()..arraysz { + let (_, sz) = T::unpack(input)?; + rsz += sz; + } + } + assert!(rsz % 4 == 0); + + Ok(rsz) +} + +/// Unpack a fixed-sized opaque array +/// +/// Unpack a fixed-size array of raw bytes. The results are placed in `bytes`, but the actual wire-size of +/// the array is `bytesz`. If the supplied `bytes` is too large, the remainer is filled in with 0x00; +/// if it is too small, the excess elements are discarded. +/// +/// All the bytes in `bytes` will be initialized after a successful call. +pub fn unpack_opaque_array( + input: &mut In, + bytes: &mut [u8], + bytesz: usize, +) -> Result { + let sz = min(bytesz, bytes.len()); + let mut rsz = 0; + + while rsz < sz { + let r = input.read(&mut bytes[rsz..])?; + rsz += r; + } + + // Fill in excess + if sz < bytes.len() { + for b in &mut bytes[sz..] { + *b = 0; + } + } + + // Mop up unused data on the wire and padding + let p = padding(bytesz).len(); + if bytes.len() < bytesz + p { + for _ in bytes.len()..(bytesz + p) { + let _ = input.read_u8()?; + rsz += 1; + } + } + + Ok(rsz) +} + +/// Unpack a (perhaps) length-limited array +pub fn unpack_flex>( + input: &mut In, + maxsz: Option, +) -> Result<(Vec, usize)> { + let (elems, mut sz) = Unpack::unpack(input)?; + + if maxsz.map_or(false, |m| elems > m) { + bail!(ErrorKind::InvalidLen(maxsz.unwrap())); + } + + let mut out = Vec::with_capacity(elems); + + for _ in 0..elems { + let (e, esz) = Unpack::unpack(input)?; + out.push(e); + sz += esz; + } + + let p = padding(sz); + for _ in 0..p.len() { + let _ = input.read_u8()?; + } + sz += p.len(); + + Ok((out, sz)) +} + +/// Unpack a (perhaps) length-limited opaque array +/// +/// Unpack an XDR encoded array of bytes, with an optional maximum length. +pub fn unpack_opaque_flex( + input: &mut In, + maxsz: Option, +) -> Result<(Vec, usize)> { + let (elems, mut sz) = Unpack::unpack(input)?; + + if maxsz.map_or(false, |m| elems > m) { + bail!(ErrorKind::InvalidLen(maxsz.unwrap())); + } + + let mut out = Vec::with_capacity(elems); + + sz += input.take(elems as u64).read_to_end(&mut out)?; + + let p = padding(sz); + for _ in 0..p.len() { + let _ = input.read_u8()?; + } + sz += p.len(); + + Ok((out, sz)) +} + +/// Unpack (perhaps) length-limited string +pub fn unpack_string(input: &mut In, maxsz: Option) -> Result<(String, usize)> { + let (v, sz) = unpack_opaque_flex(input, maxsz)?; + + String::from_utf8(v).map_err(Error::from).map(|s| (s, sz)) +} + +/// Basic packing trait. +/// +/// This trait is used to implement XDR packing any Rust type into a +/// `Write` stream. It returns the number of bytes the encoding took. +/// +/// This crate provides a number of implementations for all the basic +/// XDR types, and generated code will generally compose them to pack +/// structures, unions, etc. +/// +/// Streams generated by `Pack` can be consumed by `Unpack`. +pub trait Pack { + fn pack(&self, out: &mut Out) -> Result; +} + +#[cfg(feature = "bytecodec")] +impl Pack for u8 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_u32::(*self as u32) + .map_err(Error::from) + .map(|_| 4) + } +} + +#[cfg(feature = "bytecodec")] +impl Pack for i8 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_i32::(*self as i32) + .map_err(Error::from) + .map(|_| 4) + } +} + +impl Pack for u32 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_u32::(*self) + .map_err(Error::from) + .map(|_| 4) + } +} + +impl Pack for i32 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_i32::(*self) + .map_err(Error::from) + .map(|_| 4) + } +} + +impl Pack for u64 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_u64::(*self) + .map_err(Error::from) + .map(|_| 8) + } +} + +impl Pack for i64 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_i64::(*self) + .map_err(Error::from) + .map(|_| 8) + } +} + +impl Pack for f32 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_f32::(*self) + .map_err(Error::from) + .map(|_| 4) + } +} + +impl Pack for f64 { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + out.write_f64::(*self) + .map_err(Error::from) + .map(|_| 8) + } +} + +impl Pack for bool { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + (*self as u32).pack(out) + } +} + +impl Pack for () { + #[inline] + fn pack(&self, _out: &mut Out) -> Result { + Ok(0) + } +} + +impl Pack for usize { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + (*self as u32).pack(out) + } +} + +impl> Pack for [T] { + fn pack(&self, out: &mut Out) -> Result { + let len = self.len(); + + let mut sz = len.pack(out)?; + for it in self { + sz += it.pack(out)?; + } + + let p = padding(sz); + if p.len() > 0 { + out.write_all(p)?; + sz += p.len(); + } + + Ok(sz) + } +} + +impl> Pack for Vec { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + if self.len() > u32::max_value() as usize { + return Err(ErrorKind::InvalidLen(self.len()).into()); + } + + (&self[..]).pack(out) + } +} + +impl<'a, Out: Write> Pack for Opaque<'a> { + fn pack(&self, out: &mut Out) -> Result { + let mut sz; + let data: &[u8] = self.0.borrow(); + + if data.len() > u32::max_value() as usize { + return Err(ErrorKind::InvalidLen(data.len()).into()); + } + + sz = data.len().pack(out)?; + + out.write_all(data)?; + sz += data.len(); + + let p = padding(sz); + if p.len() > 0 { + out.write_all(p)?; + sz += p.len(); + } + + Ok(sz) + } +} + +impl Pack for str { + #[inline] + fn pack(&self, out: &mut Out) -> Result { + Opaque::borrowed(self.as_bytes()).pack(out) + } +} + +impl> Pack for Option { + fn pack(&self, out: &mut Out) -> Result { + match self { + &None => false.pack(out), + &Some(ref v) => { + let sz = true.pack(out)? + v.pack(out)?; + Ok(sz) + } + } + } +} + +impl> Pack for Box { + fn pack(&self, out: &mut Out) -> Result { + let t: &T = self.borrow(); + t.pack(out) + } +} + +impl<'a, Out: Write, T> Pack for Cow<'a, T> +where + T: 'a + Pack + ToOwned, +{ + fn pack(&self, out: &mut Out) -> Result { + let t: &T = self.borrow(); + t.pack(out) + } +} + +/// Deserialization (unpacking) helper function +/// +/// This function will read encoded bytes from `input` (a `Read` +/// implementation) and return a fully constructed type (or an +/// error). This relies on type inference to determine which type is +/// to be unpacked, so its up to the calling envionment to clarify +/// this. (Generally it falls out quite naturally.) +pub fn unpack>(input: &mut In) -> Result { + T::unpack(input).map(|(v, _)| v) +} + +/// Basic unpacking trait +/// +/// This trait is used to unpack a type from an XDR encoded byte +/// stream (encoded with `Pack`). It returns the decoded instance and +/// the number of bytes consumed from the input. +/// +/// This crate provides implementations for all the basic XDR types, +/// as well as for arrays. +pub trait Unpack: Sized { + fn unpack(input: &mut In) -> Result<(Self, usize)>; +} + +#[cfg(feature = "bytecodec")] +impl Unpack for u8 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_u32::() + .map_err(Error::from) + .map(|v| (v as u8, 4)) + } +} + +#[cfg(feature = "bytecodec")] +impl Unpack for i8 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_i32::() + .map_err(Error::from) + .map(|v| (v as i8, 4)) + } +} + +impl Unpack for u32 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_u32::() + .map_err(Error::from) + .map(|v| (v, 4)) + } +} + +impl Unpack for i32 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_i32::() + .map_err(Error::from) + .map(|v| (v, 4)) + } +} + +impl Unpack for u64 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_u64::() + .map_err(Error::from) + .map(|v| (v, 8)) + } +} + +impl Unpack for i64 { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_i64::() + .map_err(Error::from) + .map(|v| (v, 8)) + } +} + +impl Unpack for f32 { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_f32::() + .map_err(Error::from) + .map(|v| (v, 4)) + } +} + +impl Unpack for f64 { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + input + .read_f64::() + .map_err(Error::from) + .map(|v| (v, 8)) + } +} + +impl Unpack for bool { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + i32::unpack(input).and_then(|(v, sz)| match v { + 0 => Ok((false, sz)), + 1 => Ok((true, sz)), + v => Err(ErrorKind::InvalidEnum(v).into()), + }) + } +} + +impl Unpack for () { + #[inline] + fn unpack(_input: &mut In) -> Result<(Self, usize)> { + Ok(((), 0)) + } +} + +impl Unpack for usize { + #[inline] + fn unpack(input: &mut In) -> Result<(Self, usize)> { + u32::unpack(input).map(|(v, sz)| (v as usize, sz)) + } +} + +impl> Unpack for Vec { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + unpack_flex(input, None) + } +} + +impl Unpack for String { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let (v, sz) = unpack_opaque_flex(input, None)?; + String::from_utf8(v).map_err(Error::from).map(|s| (s, sz)) + } +} + +impl<'a, In: Read> Unpack for Opaque<'a> { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let (len, mut sz) = usize::unpack(input)?; + let mut v = Vec::new(); + sz += input.by_ref().take(len as u64).read_to_end(&mut v)?; + + let p = padding(sz); + for _ in 0..p.len() { + let _ = input.read_u8()?; + sz += 1; + } + + Ok((Opaque(Cow::Owned(v)), sz)) + } +} + +impl> Unpack for Option { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let (have, mut sz) = Unpack::unpack(input)?; + let ret = if have { + let (v, osz) = Unpack::unpack(input)?; + sz += osz; + Some(v) + } else { + None + }; + Ok((ret, sz)) + } +} + +impl> Unpack for Box { + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let (b, sz) = Unpack::unpack(input)?; + Ok((Box::new(b), sz)) + } +} + +impl<'a, In: Read, T> Unpack for Cow<'a, T> +where + T: 'a + Unpack + ToOwned, +{ + fn unpack(input: &mut In) -> Result<(Self, usize)> { + let (b, sz) = Unpack::unpack(input)?; + Ok((Cow::Owned(b), sz)) + } +} diff --git a/src/proxy/rust-xdr/xdr-codec/src/record.rs b/src/proxy/rust-xdr/xdr-codec/src/record.rs new file mode 100644 index 00000000..33a07b76 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/src/record.rs @@ -0,0 +1,263 @@ +//! XDR record marking +//! +//! This module implements wrappers for `Write` and `BufRead` which +//! implement "Record Marking" from [RFC1831](https://tools.ietf.org/html/rfc1831.html#section-10), +//! used for encoding XDR structures onto a bytestream such as TCP. +//! +//! The format is simple - each record is broken up into one or more +//! record fragments. Each record fragment is prefixed with a 32-bit +//! big-endian value. The low 31 bits is the fragment size, and the +//! top bit is the "end of record" marker, indicating the last +//! fragment of the record. +//! +//! There's no magic number or other way to determine whether a stream +//! is using record marking; both ends must agree. +use std::cmp::min; +use std::io::{self, BufRead, Read, Write}; + +use error::*; + +use super::{pack, unpack, Error}; + +const LAST_REC: u32 = 1u32 << 31; + +fn mapioerr(xdrerr: Error) -> io::Error { + match xdrerr { + Error(ErrorKind::IOError(ioerr), _) => ioerr, + other => io::Error::new(io::ErrorKind::Other, other), + } +} + +/// Read records from a bytestream. +/// +/// Reads will read up to the end of the current fragment, and not +/// beyond. The `BufRead` trait doesn't otherwise allow for record +/// boundaries to be deliniated. Callers can use the `eor` method to +/// determine record ends. +#[derive(Debug)] +pub struct XdrRecordReader { + size: usize, // record size + consumed: usize, // bytes consumed + eor: bool, // is last record + + reader: R, // reader +} + +impl XdrRecordReader { + /// Wrapper a record reader around an existing implementation of + /// `BufRead`, such as `BufReader`. + pub fn new(rd: R) -> XdrRecordReader { + XdrRecordReader { + size: 0, + consumed: 0, + eor: false, + reader: rd, + } + } + + // read next record, returns true on EOF + fn nextrec(&mut self) -> io::Result { + assert_eq!(self.consumed, self.size); + + let rechdr: u32 = match unpack(&mut self.reader) { + Ok(v) => v, + Err(Error(ErrorKind::IOError(ref err), _)) + if err.kind() == io::ErrorKind::UnexpectedEof => + { + return Ok(true) + } + Err(e) => return Err(mapioerr(e)), + }; + + self.size = (rechdr & !LAST_REC) as usize; + self.consumed = 0; + self.eor = (rechdr & LAST_REC) != 0; + + Ok(false) + } + + fn totremains(&self) -> usize { + self.size - self.consumed + } + + /// Current fragment is the end of the record. + pub fn eor(&self) -> bool { + self.eor + } +} + +impl Read for XdrRecordReader { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + let nread = { + let data = self.fill_buf()?; + let len = min(buf.len(), data.len()); + + (&data[..len]).read(buf)? + }; + + self.consume(nread); + Ok(nread) + } +} + +impl BufRead for XdrRecordReader { + fn fill_buf(&mut self) -> io::Result<&[u8]> { + while self.totremains() == 0 { + if self.nextrec()? { + return Ok(&[]); + } + } + + let remains = self.totremains(); + let data = self.reader.fill_buf()?; + Ok(&data[..min(data.len(), remains)]) + } + + fn consume(&mut self, sz: usize) { + assert!(sz <= self.totremains()); + self.consumed += sz; + self.reader.consume(sz); + } +} + +impl IntoIterator for XdrRecordReader { + type Item = io::Result>; + type IntoIter = XdrRecordReaderIter; + + fn into_iter(self) -> Self::IntoIter { + XdrRecordReaderIter(Some(self)) + } +} + +/// Iterator over records in the stream. +/// +/// Each iterator result is either: +/// +/// * A complete record, or +/// * an IO error. +/// +/// It will return an IO error once, and then end the iterator. +/// A short read or an unterminated record will also end the iterator. It will not return a partial +/// record. +#[derive(Debug)] +pub struct XdrRecordReaderIter(Option>); + +impl Iterator for XdrRecordReaderIter { + type Item = io::Result>; + + fn next(&mut self) -> Option { + if let Some(mut rr) = self.0.take() { + let mut buf = Vec::new(); + + // loop over fragments until we get a complete record + loop { + // Do we need next fragment? + if rr.totremains() == 0 { + match rr.nextrec() { + Err(e) => return Some(Err(e)), // IO error + Ok(true) => return None, // EOF + Ok(false) => (), // keep going + } + } + + let remains = rr.totremains(); + let eor = rr.eor(); + + match rr.by_ref().take(remains as u64).read_to_end(&mut buf) { + Ok(sz) if sz == remains => (), // OK, keep going + Ok(_) => return None, // short read + Err(e) => return Some(Err(e)), // error + }; + + if eor { + break; + } + } + self.0 = Some(rr); + Some(Ok(buf)) + } else { + None + } + } +} + +const WRBUF: usize = 65536; + +/// Write records into a bytestream. +/// +/// Flushes the current buffer as end of record when destroyed. +pub struct XdrRecordWriter { + buf: Vec, // accumulated record fragment + bufsz: usize, // max fragment size + eor: bool, // last fragment was eor + writer: W, // writer we're passing on to +} + +impl XdrRecordWriter { + /// Create a new `XdrRecordWriter` wrapped around a `Write` + /// implementation, using a default buffer size (64k). + pub fn new(w: W) -> XdrRecordWriter { + XdrRecordWriter::with_buffer(w, WRBUF) + } + + /// Create an instance with a specific buffer size. Panics if the + /// size is zero. + pub fn with_buffer(w: W, bufsz: usize) -> XdrRecordWriter { + if bufsz == 0 { + panic!("bufsz must be non-zero") + } + XdrRecordWriter { + buf: Vec::with_capacity(bufsz), + bufsz: bufsz, + eor: false, + writer: w, + } + } + + /// Flush the current buffer. If `eor` is true, the end of record + /// marker is set. + pub fn flush_eor(&mut self, eor: bool) -> io::Result<()> { + if !eor && self.buf.len() == 0 { + return Ok(()); + } + + let rechdr = self.buf.len() as u32 | (if eor { LAST_REC } else { 0 }); + + pack(&rechdr, &mut self.writer).map_err(mapioerr)?; + let _ = self.writer.write_all(&self.buf).map(|_| ())?; + self.buf.truncate(0); + + self.eor = eor; + self.writer.flush() + } +} + +impl Drop for XdrRecordWriter { + fn drop(&mut self) { + if self.buf.len() > 0 || !self.eor { + let _ = self.flush_eor(true); + } + } +} + +impl Write for XdrRecordWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + let mut off = 0; + + while off < buf.len() { + let chunk = &buf[off..off + min(buf.len() - off, self.bufsz)]; + if self.buf.len() + chunk.len() > self.bufsz { + self.flush()?; + } + + self.buf.extend(chunk); + off += chunk.len(); + } + + Ok(off) + } + + fn flush(&mut self) -> io::Result<()> { + self.flush_eor(false) + } +} diff --git a/src/proxy/rust-xdr/xdr-codec/src/test.rs b/src/proxy/rust-xdr/xdr-codec/src/test.rs new file mode 100644 index 00000000..db739b19 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/src/test.rs @@ -0,0 +1,739 @@ +// Don't rustfmt in here to avoid trashing vec![] formatting +#![cfg_attr(rustfmt, rustfmt_skip)] + +use std::io::Cursor; +use super::{Error, ErrorKind, Pack, Unpack, Opaque, + pack_flex, pack_opaque_flex, pack_string, pack_array, pack_opaque_array, + unpack_array, unpack_opaque_array, unpack_string, unpack_flex, unpack_opaque_flex}; + + +#[cfg(feature = "bytecodec")] +#[test] +fn basic_8() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0u8.pack(&mut out).unwrap(), 4); + assert_eq!(100u8.pack(&mut out).unwrap(), 4); + assert_eq!((-1i8).pack(&mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x64, + 0xff, 0xff, 0xff, 0xff, ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0u8, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (100u8, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (-1i8, 4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0i8.pack(&mut out).unwrap(), 4); + assert_eq!((-123i8).pack(&mut out).unwrap(), 4); + assert_eq!((-128i8).pack(&mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xff, 0x85, + 0xff, 0xff, 0xff, 0x80 ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0i8, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (-123i8, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), ((1<<7) as i8, 4)); + } +} + +#[test] +fn basic_32() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0u32.pack(&mut out).unwrap(), 4); + assert_eq!(1000u32.pack(&mut out).unwrap(), 4); + assert_eq!(823987423u32.pack(&mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x03, 0xe8, + 0x31, 0x1d, 0x0c, 0xdf, ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0u32, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (1000u32, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (823987423u32, 4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0i32.pack(&mut out).unwrap(), 4); + assert_eq!((-1238i32).pack(&mut out).unwrap(), 4); + assert_eq!(((1i32<<31) as i32).pack(&mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xfb, 0x2a, + 0x80, 0x00, 0x00, 0x00 ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0i32, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (-1238i32, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), ((1<<31) as i32, 4)); + } +} + +#[test] +fn basic_64() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0u64.pack(&mut out).unwrap(), 8); + assert_eq!(0x0011223344556677u64.pack(&mut out).unwrap(), 8); + assert_eq!(0xff00ff00ff00ff00u64.pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 24); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, + 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00 ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0u64, 8)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (4822678189205111u64, 8)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (18374966859414961920u64, 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(0i64.pack(&mut out).unwrap(), 8); + assert_eq!((-2938928374982749237i64).pack(&mut out).unwrap(), 8); + assert_eq!(((1i64<<63) as i64).pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 24); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xd7, 0x36, 0xd4, 0x36, 0xcc, 0xd6, 0x53, 0xcb, + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 ]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (0i64, 8)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (-2938928374982749237i64, 8)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), ((1i64<<63) as i64, 8)); + } +} + +#[test] +fn basic_bool() { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(true.pack(&mut out).unwrap(), 4); + assert_eq!(false.pack(&mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0, 0, 0, 1, 0, 0, 0, 0]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (true, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (false, 4)); + + let bad = vec![0, 0, 0, 2]; + let mut input = Cursor::new(bad); + match bool::unpack(&mut input) { + Err(Error(ErrorKind::InvalidEnum(_), _)) => (), + res => panic!("bad result {:?}", res), + } +} + +#[test] +fn basic_string() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!("foo!".pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x04, 0x66, 0x6f, 0x6f, 0x21]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (String::from("foo!"), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!("foo".pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x03, 0x66, 0x6f, 0x6f, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (String::from("foo"), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!("foobar".pack(&mut out).unwrap(), 12); + assert_eq!("piff".pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 20); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x06, 0x66, 0x6f, 0x6f, 0x62, 0x61, 0x72, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x04, 0x70, 0x69, 0x66, 0x66]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (String::from("foobar"), 12)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (String::from("piff"), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(pack_string("foo!", Some(10), &mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x04, 0x66, 0x6f, 0x6f, 0x21]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (String::from("foo!"), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + match pack_string("foo!", Some(2), &mut out) { + Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("bad result {:?}", e), + } + } +} + +#[test] +fn basic_flex() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22, 0x33, 0x44].pack(&mut out).unwrap(), 4*4 + 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*4 + 4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x44]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22, 0x33, 0x44], 4*4+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22].pack(&mut out).unwrap(), 2*4+4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 2*4+4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22], 4*2+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22, 0x00].pack(&mut out).unwrap(), 3*4+4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 3*4+4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22, 0x00], 3*4+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22, 0x33].pack(&mut out).unwrap(), 3*4+4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 3*4+4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22, 0x33], 3*4+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22, 0x33, 0x44, 0x55].pack(&mut out).unwrap(), 4*5+4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*5+4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, + 0x00, 0x00, 0x00, 0x44, + 0x00, 0x00, 0x00, 0x55]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22, 0x33, 0x44, 0x55], 5*4+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(pack_flex(&vec![0x11u32, 0x22, 0x33, 0x44, 0x55], Some(10), &mut out).unwrap(), 4*5+4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*5+4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, + 0x00, 0x00, 0x00, 0x44, + 0x00, 0x00, 0x00, 0x55]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (vec![0x11u32, 0x22, 0x33, 0x44, 0x55], 5*4+4)); + } + + { + let mut out = Cursor::new(Vec::new()); + + match pack_flex(&vec![0x11u32, 0x22, 0x33, 0x44, 0x55], Some(4), &mut out) { + Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("bad result {:?}", e) + } + } +} + +#[test] +fn basic_opaque_flex() { + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44]).pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x04, 0x11, 0x22, 0x33, 0x44]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44]), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22]).pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x02, 0x11, 0x22, 0x00, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22]), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22, 0x00]).pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x03, 0x11, 0x22, 0x00, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22, 0x00]), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22, 0x33]).pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x03, 0x11, 0x22, 0x33, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22, 0x33]), 8)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55]).pack(&mut out).unwrap(), 12); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x05, 0x11, 0x22, 0x33, 0x44, 0x55, 0x00, 0x00, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55]), 12)); + } + + { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(pack_opaque_flex(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55], Some(10), &mut out).unwrap(), 12); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x05, 0x11, 0x22, 0x33, 0x44, 0x55, 0x00, 0x00, 0x00]); + + let mut input = Cursor::new(v); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55]), 12)); + } + + { + let mut out = Cursor::new(Vec::new()); + + match pack_opaque_flex(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55], Some(3), &mut out) { + Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("bad result {:?}", e), + } + } +} + +#[test] +fn bounded_flex() { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(vec![0x11u32, 0x22, 0x33, 0x44, 0x55].pack(&mut out).unwrap(), 4*5+4); + + let v = out.into_inner(); + + { + let mut input = Cursor::new(v.clone()); + assert_eq!(unpack_flex(&mut input, Some(10)).unwrap(), (vec![0x11u32, 0x22, 0x33, 0x44, 0x55], 5*4+4)); + } + { + let mut input = Cursor::new(v.clone()); + match unpack_flex::<_, Vec>(&mut input, Some(4)) { + Result::Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("Unexpected {:?}", e), + } + } +} + +#[test] +fn bounded_opaque_flex() { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(Opaque::borrowed(&vec![0x11u8, 0x22, 0x33, 0x44, 0x55]).pack(&mut out).unwrap(), 12); + + let v = out.into_inner(); + + { + let mut input = Cursor::new(v.clone()); + assert_eq!(unpack_opaque_flex(&mut input, Some(10)).unwrap(), (vec![0x11u8, 0x22, 0x33, 0x44, 0x55], 12)); + } + { + let mut input = Cursor::new(v.clone()); + match unpack_opaque_flex(&mut input, Some(4)) { + Result::Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("Unexpected {:?}", e), + } + } +} + +#[test] +fn bounded_string() { + let mut out = Cursor::new(Vec::new()); + + assert_eq!(String::from("hello, world").pack(&mut out).unwrap(), 16); + + let v = out.into_inner(); + + { + let mut input = Cursor::new(v.clone()); + assert_eq!(unpack_string(&mut input, Some(16)).expect("unpack_string failed"), + (String::from("hello, world"), 16)); + } + { + let mut input = Cursor::new(v.clone()); + match unpack_string(&mut input, Some(5)) { + Result::Err(Error(ErrorKind::InvalidLen(_), _)) => (), + e => panic!("Unexpected {:?}", e), + } + } +} + +#[test] +fn basic_array() { + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u32, 0x22, 0x33]; + + + assert_eq!(pack_array(&a, a.len(), &mut out, Some(&0)).unwrap(), 3*4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 3*4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33]); + + let mut input = Cursor::new(v); + let mut b = [0u32; 3]; + let bsz = unpack_array(&mut input, &mut b[..], 3, Some(&0)).expect("unpack failed"); + assert_eq!(bsz, 4*3); + assert_eq!(&a[..], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u32, 0x22, 0x33, 0x44]; + + assert_eq!(pack_array(&a, a.len(), &mut out, Some(&0)).unwrap(), 4*4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, 0x00, 0x00, 0x00, 0x44]); + + let mut input = Cursor::new(v); + let mut b = [0u32; 3]; + let bsz = unpack_array(&mut input, &mut b[..], 4, Some(&0)).expect("unpack_array"); + assert_eq!(bsz, 4*4); + assert_eq!(&a[..3], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u32, 0x22, 0x33, 0x44, 0x55]; + + assert_eq!(pack_array(&a, a.len(), &mut out, Some(&0)).unwrap(), 5*4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*5); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, + 0x00, 0x00, 0x00, 0x44, + 0x00, 0x00, 0x00, 0x55]); + + let mut input = Cursor::new(v); + let mut b = [0u32; 5]; + let bsz = unpack_array(&mut input, &mut b[..], a.len(), Some(&0)).expect("unpack_array"); + assert_eq!(bsz, 5*4); + assert_eq!(&a[..], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u32, 0x22, 0x33, 0x44, 0x55]; + + assert_eq!(pack_array(&a, 4, &mut out, Some(&0)).unwrap(), 4*4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, + 0x00, 0x00, 0x00, 0x44]); + + let mut input = Cursor::new(v); + let mut b = [0u32; 4]; + let bsz = unpack_array(&mut input, &mut b[..], 4, Some(&0)).expect("unpack_array"); + assert_eq!(bsz, 4*4); + assert_eq!(&a[..4], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u32, 0x22, 0x33]; + + assert_eq!(pack_array(&a, 4, &mut out, Some(&0)).unwrap(), 4*4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4*4); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x11, + 0x00, 0x00, 0x00, 0x22, + 0x00, 0x00, 0x00, 0x33, + 0x00, 0x00, 0x00, 0x00]); + + let mut input = Cursor::new(v); + let mut b = [0u32; 4]; + let bsz = unpack_array(&mut input, &mut b[..], 4, Some(&0)).expect("unpack_array"); + assert_eq!(bsz, 4*4); + assert_eq!(vec![0x11,0x22,0x33,0x00], b); + } +} + +#[test] +fn basic_opaque_array() { + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u8, 0x22, 0x33]; + + + assert_eq!(pack_opaque_array(&a, a.len(), &mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4); + assert_eq!(v, vec![0x11, 0x22, 0x33, 0x00]); + + let mut input = Cursor::new(v); + let mut b = [0u8; 3]; + let bsz = unpack_opaque_array(&mut input, &mut b[..], 3).expect("unpack opaque failed"); + assert_eq!(bsz, 4); + assert_eq!(&a[..], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u8, 0x22, 0x33, 0x44]; + + assert_eq!(pack_opaque_array(&a, a.len(), &mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4); + assert_eq!(v, vec![0x11, 0x22, 0x33, 0x44]); + + let mut input = Cursor::new(v); + let mut b = [0u8; 4]; + let bsz = unpack_opaque_array(&mut input, &mut b[..], 4).expect("unpack_opaque_array"); + assert_eq!(bsz, 4); + assert_eq!(&a[..], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u8, 0x22, 0x33, 0x44, 0x55]; + + assert_eq!(pack_opaque_array(&a, a.len(), &mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 8); + assert_eq!(v, vec![0x11, 0x22, 0x33, 0x44, 0x55, 0x00, 0x00, 0x00]); + + let mut input = Cursor::new(v); + let mut b = [0u8; 5]; + let bsz = unpack_opaque_array(&mut input, &mut b[..], a.len()).expect("unpack_opaque_array"); + assert_eq!(bsz, 8); + assert_eq!(&a[..], &b[..]); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u8, 0x22, 0x33, 0x44, 0x55]; + + assert_eq!(pack_opaque_array(&a, 4, &mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4); + assert_eq!(v, vec![0x11, 0x22, 0x33, 0x44]); + + let mut input = Cursor::new(v); + let mut b = [0u8; 5]; + let bsz = unpack_opaque_array(&mut input, &mut b[..], 4).expect("unpack_opaque_array"); + assert_eq!(bsz, 4); + assert_eq!(&a[..4], &b[..4]); + assert_eq!(b[4], 0); + } + + { + let mut out = Cursor::new(Vec::new()); + let a = [0x11u8, 0x22, 0x33]; + + assert_eq!(pack_opaque_array(&a, 4, &mut out).unwrap(), 4); + + let v = out.into_inner(); + + assert_eq!(v.len(), 4); + assert_eq!(v, vec![0x11, 0x22, 0x33, 0x00]); + + let mut input = Cursor::new(v); + let mut b = [0u8; 4]; + let bsz = unpack_opaque_array(&mut input, &mut b[..], 4).expect("unpack_opaque_array"); + assert_eq!(bsz, 4); + assert_eq!(vec![0x11, 0x22, 0x33, 0x00], b); + } +} + +#[test] +fn basic_option() { + let mut out = Cursor::new(Vec::new()); + let none: Option = None; + let some: Option = Some(0x11223344_u32); + + assert_eq!(none.pack(&mut out).unwrap(), 4); + assert_eq!(some.pack(&mut out).unwrap(), 8); + + let v = out.into_inner(); + + assert_eq!(v.len(), 12); + assert_eq!(v, vec![0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x11, 0x22, 0x33, 0x44,]); + + let mut input = Cursor::new(v); + assert_eq!(Option::::unpack(&mut input).unwrap(), (None, 4)); + assert_eq!(Unpack::unpack(&mut input).unwrap(), (Some(0x11223344_u32), 8)); + + let bad = vec![0, 0, 0, 2]; + let mut input = Cursor::new(bad); + + match Option::::unpack(&mut input) { + Err(Error(ErrorKind::InvalidEnum(_), _)) => (), + res => panic!("bad result {:?}", res), + } +} diff --git a/src/proxy/rust-xdr/xdr-codec/tests/qc-record.rs b/src/proxy/rust-xdr/xdr-codec/tests/qc-record.rs new file mode 100644 index 00000000..c22a50ed --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/tests/qc-record.rs @@ -0,0 +1,116 @@ +extern crate quickcheck; + +use std::io::{Cursor, Write}; + +use quickcheck::{quickcheck, TestResult}; + +use xdr_codec::record::{XdrRecordReader, XdrRecordWriter}; +use xdr_codec::Pack; + +// Make sure XdrRecordWriter writes the right stuff +fn check_writerec(bufsz: usize, eor: bool, ref bytes: Vec) -> TestResult { + const EOR: u32 = 1 << 31; + + if bufsz == 0 { + return TestResult::discard(); + } + + // Make an expected serialization into fragments + let mut expected = Vec::new(); + let nchunks = (bytes.len() + bufsz - 1) / bufsz; + + for (idx, c) in bytes.chunks(bufsz).enumerate() { + let mut len = c.len() as u32; + if nchunks - 1 == idx && eor { + len |= EOR; + } + + if let Err(e) = len.pack(&mut expected) { + return TestResult::error(format!("pack failed: {:?}", e)); + } + expected.extend(c); + } + if !eor || nchunks == 0 { + if let Err(e) = EOR.pack(&mut expected) { + return TestResult::error(format!("eor pack failed: {:?}", e)); + } + } + + // Write the same data with XdrRecordWriter + let mut buf = Vec::new(); + { + let mut xw = XdrRecordWriter::with_buffer(&mut buf, bufsz); + if let Err(e) = xw.write(bytes) { + return TestResult::error(format!("xw write failed: {:?}", e)); + } + if let Err(e) = xw.flush_eor(eor) { + return TestResult::error(format!("xw flush_eor failed: {:?}", e)); + } + } + + if buf != expected { + println!( + "eor {} bufsz {} bytes {:?} len {}", + eor, + bufsz, + bytes, + bytes.len() + ); + println!("expected {:?} len {}", expected, expected.len()); + println!(" buf {:?} len {}", buf, buf.len()); + } + + TestResult::from_bool(buf == expected) +} + +#[test] +fn record_writerec() { + quickcheck(check_writerec as fn(usize, bool, Vec) -> TestResult); +} + +// Make sure record structure survives a round trip +fn check_codec(bufsz: usize, ref records: Vec>) -> TestResult { + if bufsz == 0 { + return TestResult::discard(); + } + + let mut buf = Vec::new(); + + for rec in records { + let mut xw = XdrRecordWriter::with_buffer(&mut buf, bufsz); + + if let Err(e) = xw.write(rec) { + return TestResult::error(format!("xw write failed: {:?}", e)); + } + } + + { + let cur = Cursor::new(buf); + let xr = XdrRecordReader::new(cur); + + for (res, orig) in xr.into_iter().zip(records) { + match res { + Err(e) => return TestResult::error(format!("xr failed {:?}", e)), + Ok(ref rx) => { + if rx != orig { + println!( + "bufsz {} mismatch orig {:?}, len {}", + bufsz, + orig, + orig.len() + ); + println!(" rx {:?}, len {}", rx, rx.len()); + return TestResult::failed(); + } + } + } + } + } + + TestResult::passed() +} + +#[test] +fn record_codec() { + quickcheck(check_codec as fn(usize, Vec>) -> TestResult); +} diff --git a/src/proxy/rust-xdr/xdr-codec/tests/quickcheck.rs b/src/proxy/rust-xdr/xdr-codec/tests/quickcheck.rs new file mode 100644 index 00000000..7c696de9 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/tests/quickcheck.rs @@ -0,0 +1,318 @@ +extern crate quickcheck; + +use std::fmt::Debug; +use std::io::Cursor; +use std::iter; + +use quickcheck::{quickcheck, Arbitrary}; +use xdr_codec::{ + pack_array, pack_opaque_array, padding, unpack_array, unpack_opaque_array, Error, ErrorKind, + Pack, Unpack, +}; + +// Output of packing is a multiple of 4 +fn pack(v: T) -> bool +where + T: PartialEq + Pack>>, +{ + let mut data = Cursor::new(Vec::new()); + + let sz = v.pack(&mut data).expect("pack failed"); + sz % 4 == 0 +} + +// Packing something then unpacking returns the same value +fn codec(v: T) -> bool +where + T: PartialEq + Pack>> + Unpack>>, +{ + let mut data = Cursor::new(Vec::new()); + + let psz = v.pack(&mut data).expect("pack failed"); + + let mut data = Cursor::new(data.into_inner()); + let (uv, usz) = T::unpack(&mut data).expect("unpack failed"); + + psz == usz && v == uv +} + +// Packing something then unpacking returns the same value +fn short_unpack(v: T) -> bool +where + T: PartialEq + Pack>> + Unpack>>, +{ + let mut data = Cursor::new(Vec::new()); + + let psz = v.pack(&mut data).expect("pack failed"); + + // truncate data to make sure unpacking fails + let data = data.into_inner(); + assert_eq!(psz, data.len()); + let data = Vec::from(&data[..data.len() - 1]); + + let mut data = Cursor::new(data); + match T::unpack(&mut data) { + Err(Error(ErrorKind::IOError(_), _)) => true, + _ => false, + } +} + +fn quickcheck_pack_t() +where + T: PartialEq + Pack>> + Unpack>> + Arbitrary + Debug, +{ + quickcheck(pack as fn(T) -> bool); + quickcheck(pack as fn(Vec) -> bool); + quickcheck(pack as fn(Option) -> bool); + quickcheck(pack as fn(Vec>) -> bool); + quickcheck(pack as fn(Option>) -> bool); +} + +fn quickcheck_codec_t() +where + T: PartialEq + Pack>> + Unpack>> + Arbitrary + Debug, +{ + quickcheck(codec as fn(T) -> bool); + quickcheck(codec as fn(Vec) -> bool); + quickcheck(codec as fn(Option) -> bool); + quickcheck(codec as fn(Vec>) -> bool); + quickcheck(codec as fn(Option>) -> bool); +} + +fn quickcheck_short_unpack_t() +where + T: PartialEq + Pack>> + Unpack>> + Arbitrary + Debug, +{ + quickcheck(short_unpack as fn(T) -> bool); + quickcheck(short_unpack as fn(Vec) -> bool); + quickcheck(short_unpack as fn(Option) -> bool); + quickcheck(short_unpack as fn(Vec>) -> bool); + quickcheck(short_unpack as fn(Option>) -> bool); +} + +#[test] +fn quickcheck_pack_ui32() { + quickcheck_pack_t::(); + quickcheck_pack_t::(); + quickcheck_pack_t::(); +} + +#[test] +fn quickcheck_pack_iu64() { + quickcheck_pack_t::(); + quickcheck_pack_t::(); +} + +#[test] +fn quickcheck_pack_float() { + quickcheck_pack_t::(); + quickcheck_pack_t::(); +} + +#[test] +fn quickcheck_codec_ui32() { + quickcheck_codec_t::(); + quickcheck_codec_t::(); + quickcheck_codec_t::(); +} + +#[test] +fn quickcheck_codec_iu64() { + quickcheck_codec_t::(); + quickcheck_codec_t::(); +} + +#[test] +fn quickcheck_codec_float() { + quickcheck_codec_t::(); + quickcheck_codec_t::(); +} + +#[test] +fn quickcheck_short_unpack_ui32() { + quickcheck_short_unpack_t::(); + quickcheck_short_unpack_t::(); + quickcheck_short_unpack_t::(); +} + +#[test] +fn quickcheck_short_unpack_iu64() { + quickcheck_short_unpack_t::(); + quickcheck_short_unpack_t::(); +} + +#[test] +fn quickcheck_short_unpack_float() { + quickcheck_short_unpack_t::(); + quickcheck_short_unpack_t::(); +} + +fn check_array(arraysz: usize, rxsize: usize, data: Vec, defl: Option) -> bool { + let mut buf = Vec::new(); + + // pack data we have into the array + let tsz = match pack_array(&data[..], arraysz, &mut buf, defl.as_ref()) { + Ok(tsz) if data.len() >= arraysz || defl.is_some() => tsz, + e @ Err(Error(ErrorKind::InvalidLen(_), _)) => { + let pass = defl.is_none() && data.len() < arraysz; + if !pass { + println!( + "pack_array failed {:?}, defl {:?} data.len {} arraysz {}", + e, + defl, + data.len(), + arraysz + ) + } + return pass; + } + Err(e) => { + println!("pack_array failed {:?}", e); + return false; + } + Ok(tsz) => { + println!( + "pack_array unexpected success tsz {} data.len {} arraysz {} defl {:?}", + tsz, + data.len(), + arraysz, + defl + ); + return false; + } + }; + if tsz != arraysz * 4 { + println!("tsz {} arraysz*4 {}", tsz, arraysz * 4); + return false; + } + if buf.len() != tsz { + println!("buf.len {} tsz {}", buf.len(), tsz); + return false; + } + + // if data is shorter than array, then serialized is padded with zero + // XXX padding isn't necessarily zero + //if data.len() < arraysz { + // assert!(defl.is_some()); + // if buf[data.len()*4..].iter().any(|b| *b != defl.unwrap()) { println!("nonzero pad"); return false } + //} + + let mut recv: Vec = iter::repeat(0xffff_ffff_u32).take(rxsize).collect(); + let mut cur = Cursor::new(buf); + + // unpack rxsize elements + let rsz = match unpack_array(&mut cur, &mut recv[..], arraysz, defl.as_ref()) { + Ok(rsz) if recv.len() <= arraysz || defl.is_some() => rsz, // normal success + Err(Error(ErrorKind::InvalidLen(_), _)) => return defl.is_none() && recv.len() > arraysz, // expected if recv is too big and there's no default + Err(e) => { + println!("unpack_array failed {:?}", e); + return false; + } + Ok(rsz) => { + println!( + "unpack_array unexpected success rsz {} recv.len {} arraysz {} defl {:?}", + rsz, + recv.len(), + arraysz, + defl + ); + return false; + } + }; + if rsz != arraysz * 4 { + println!("rsz {} arraysz*4 {}", rsz, arraysz * 4); + return false; + } + + // data and recv must match their common prefix up to arraysz + if data + .iter() + .zip(recv.iter().take(arraysz)) + .any(|(d, r)| *d != *r) + { + println!("nonmatching\ndata {:?}\nrecv {:?}", data, recv); + return false; + } + + // if recv is larger than array, then tail is defaulted + if rxsize > arraysz { + assert!(defl.is_some()); + if recv[arraysz..].iter().any(|v| *v != defl.unwrap()) { + println!("nondefault tail"); + return false; + } + } + + true +} + +#[test] +fn quickcheck_array() { + quickcheck(check_array as fn(usize, usize, Vec, Option) -> bool); +} + +fn check_opaque(arraysz: usize, rxsize: usize, data: Vec) -> bool { + let mut buf = Vec::new(); + + // pack data we have into the array + let tsz = pack_opaque_array(&data[..], arraysz, &mut buf).expect("pack_array failed"); + if tsz != arraysz + padding(arraysz).len() { + println!( + "tsz {} arraysz+pad {}", + tsz, + arraysz + padding(arraysz).len() + ); + return false; + } + if buf.len() != tsz { + println!("buf.len {} tsz {}", buf.len(), tsz); + return false; + } + + // if data is shorter than array, then serialized is padded with zero + if data.len() < arraysz { + if buf[data.len()..].iter().any(|b| *b != 0) { + println!("nonzero pad"); + return false; + } + } + + let mut recv: Vec = iter::repeat(0xff).take(rxsize).collect(); + let mut cur = Cursor::new(buf); + + // unpack rxsize elements + let rsz = unpack_opaque_array(&mut cur, &mut recv[..], arraysz).expect("unpack_array failed"); + if rsz != arraysz + padding(arraysz).len() { + println!( + "rsz {} arraysz+pad {}", + rsz, + arraysz + padding(arraysz).len() + ); + return false; + } + + // data and recv must match their common prefix up to arraysz + if data + .iter() + .zip(recv.iter().take(arraysz)) + .any(|(d, r)| *d != *r) + { + println!("nonmatching\ndata {:?}\nrecv {:?}", data, recv); + return false; + } + + // if recv is larger than array, then tail is zero + if rxsize > arraysz { + if recv[arraysz..].iter().any(|v| *v != 0) { + println!("nondefault tail"); + return false; + } + } + + true +} + +#[test] +fn quickcheck_opaque() { + quickcheck(check_opaque as fn(usize, usize, Vec) -> bool); +} diff --git a/src/proxy/rust-xdr/xdr-codec/tests/test-record.rs b/src/proxy/rust-xdr/xdr-codec/tests/test-record.rs new file mode 100644 index 00000000..81b10183 --- /dev/null +++ b/src/proxy/rust-xdr/xdr-codec/tests/test-record.rs @@ -0,0 +1,174 @@ +// Don't rustfmt in here to avoid trashing vec![] formatting +#![cfg_attr(rustfmt, rustfmt_skip)] + +use std::io::{Cursor, Read, Write}; + +use xdr_codec::record::{XdrRecordReader, XdrRecordWriter}; + +#[test] +fn recread_full() { + let inbuf = vec![128, 0, 0, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let cur = Cursor::new(inbuf); + + let mut recread = XdrRecordReader::new(cur); + let mut buf = vec![0; 20]; + + assert_eq!(recread.read(&mut buf[..]).unwrap(), 10); + assert_eq!( + buf, + vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ); + assert!(recread.eor()); +} + +#[test] +fn recread_short() { + let inbuf = vec![128, 0, 0, 10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]; + let cur = Cursor::new(inbuf); + + let mut recread = XdrRecordReader::new(cur); + let mut buf = vec![0; 5]; + + assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); + assert!(recread.eor()); + assert_eq!(buf, vec![0, 1, 2, 3, 4]); + + assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); + assert!(recread.eor()); + assert_eq!(buf, vec![5, 6, 7, 8, 9]); +} + +#[test] +fn recread_half() { + let inbuf = vec![0, 0, 0, 5, 0, 1, 2, 3, 4, 128, 0, 0, 5, 5, 6, 7, 8, 9]; + let cur = Cursor::new(inbuf); + + let mut recread = XdrRecordReader::new(cur); + let mut buf = vec![0; 10]; + + assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); + assert_eq!(buf, vec![0, 1, 2, 3, 4, 0, 0, 0, 0, 0]); + assert!(!recread.eor()); + + assert_eq!(recread.read(&mut buf[..]).unwrap(), 5); + assert_eq!(buf, vec![5, 6, 7, 8, 9, 0, 0, 0, 0, 0]); + assert!(recread.eor()); +} + +#[test] +fn recread_iter() { + let inbuf = vec![ + 0, + 0, + 0, + 5, + 0, + 1, + 2, + 3, + 4, + 128, + 0, + 0, + 5, + 5, + 6, + 7, + 8, + 9, + 128, + 0, + 0, + 1, + 99, + ]; + let cur = Cursor::new(inbuf); + let recread = XdrRecordReader::new(cur); + + let expected = vec![vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9], vec![99]]; + let got: Vec<_> = recread.into_iter().map(|r| r.expect("IO error")).collect(); + + assert_eq!(expected, got); +} + +#[test] +fn read_zerorec() { + let inbuf = vec![0, 0, 0, 0, 0, 0, 0, 0, 128, 0, 0, 0]; + + let cur = Cursor::new(inbuf); + let mut recread = XdrRecordReader::new(cur); + + let mut buf = [0; 100]; + assert_eq!(recread.read(&mut buf).unwrap(), 0); + assert!(recread.eor()); +} + +#[test] +#[should_panic(expected = "must be non-zero")] +fn zerosz() { + let buf = Vec::new(); + let _ = XdrRecordWriter::with_buffer(buf, 0); +} + +#[test] +fn smallrec() { + let mut buf = Vec::new(); + + { + let mut xw = XdrRecordWriter::new(&mut buf); + + assert_eq!(write!(xw, "hello").unwrap(), ()); + } + + assert_eq!(buf, vec![128, 0, 0, 5, 104, 101, 108, 108, 111]) +} + +#[test] +fn largerec() { + let mut buf = Vec::new(); + + { + let mut xw = XdrRecordWriter::with_buffer(&mut buf, 3); + + assert_eq!(write!(xw, "hello").unwrap(), ()); + } + + assert_eq!(buf, vec![0, 0, 0, 3, 104, 101, 108, 128, 0, 0, 2, 108, 111]) +} + +#[test] +fn largerec_flush() { + let mut buf = Vec::new(); + + { + let mut xw = XdrRecordWriter::with_buffer(&mut buf, 10); + + assert_eq!(write!(xw, "hel").unwrap(), ()); + xw.flush().unwrap(); + assert_eq!(write!(xw, "lo").unwrap(), ()); + xw.flush().unwrap(); + } + + assert_eq!( + buf, + vec![ + 0, + 0, + 0, + 3, + 104, + 101, + 108, + 0, + 0, + 0, + 2, + 108, + 111, + 128, + 0, + 0, + 0, + ] + ) +} diff --git a/src/proxy/rust-xdr/xdrgen/Cargo.toml b/src/proxy/rust-xdr/xdrgen/Cargo.toml new file mode 100644 index 00000000..8887416d --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "xdrgen" +version = "0.4.4" +authors = ["Jeremy Fitzhardinge "] +license = "MIT/Apache-2.0" +description = "XDR codec generator from specification. Designed for use with xdr-codec." +repository = "https://github.com/jsgf/rust-xdr/tree/master/xdrgen" +documentation = "https://docs.rs/xdrgen/" +readme = "README.md" +keywords = ["encoding", "protocol", "xdr", "rfc4506", "serialization"] +include = [ "src/**/*.rs", "tests/**/*.rs", "*.md", "Cargo.toml" ] + +[[bin]] +name = "xdrgen" +path = "src/xdrgen.rs" +test = false +bench = false +doc = false + +[features] +unstable = [] + +[dependencies] +log = "0.3" +env_logger = "0.10" +nom = { version="3.1", features=["verbose-errors"] } +quote = "0.3" +clap = "2.24" +lazy_static = "0.2" +bitflags = "0.9" + +[dependencies.xdr-codec] +path = "../xdr-codec" +version = "0.4" + + +[dev-dependencies] +tempdir = "0.3" +error-chain = "0.10" diff --git a/src/proxy/rust-xdr/xdrgen/README.md b/src/proxy/rust-xdr/xdrgen/README.md new file mode 100644 index 00000000..cda8d20d --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/README.md @@ -0,0 +1,97 @@ +# Rust XDR library + +[![Build Status](https://travis-ci.org/jsgf/rust-xdr.svg?branch=master)](https://travis-ci.org/jsgf/rust-xdr) +[![Crates.io](https://img.shields.io/crates/v/xdrgen.svg)](https://crates.io/crates/xdrgen) +[![Coverage Status](https://coveralls.io/repos/github/jsgf/promising-future/badge.svg?branch=master)](https://coveralls.io/github/jsgf/promising-future?branch=master) + +This crate provides xdrgen, which takes an XDR specification in a .x +file, and produces Rust code to serialize and deserialize the +specified types. It is intended to be used in conjunction with +[xdr-codec](https://github.com/jsgf/rust-xdr-codec). + +The syntax of the .x file follows +[RFC4506](https://tools.ietf.org/html/rfc4506.html). This has type definitions +for XDR but does not include RPC protocol specifications. Correspondingly, +xdrgen does not support auto-generation of RPC clients/servers. + +## Changes in 0.4.0 + +- Now uses the `quote` package, so it will work on stable Rust +- Detects the use of Rust keywords in XDR specifications, and appends a `_` to them. + +## Usage + +Usage is straightforward. You can generate the Rust code from a spec a build.rs: + +``` +extern crate xdrgen; + +fn main() { + xdrgen::compile("src/simple.x").expect("xdrgen simple.x failed"); +} +``` + +This code can then be included into a module: + +``` +mod simple { + use xdr_codec; + + #[allow(dead_code)] + include!(concat!(env!("OUT_DIR"), "/simple_xdr.rs")); +} +``` + +Once you have this, you can call `mytype.pack(&mut output)`, and +`let mything: MyThing = xdr_codec::unpack(&mut input)?;`. + +The serializers require your types to implement the `Pack` and `Unpack` +traits, and generate code to write to `std::io::Write` implementation, and +read from `std::io::Read`. + +All types and fields are generated public, so you can control their access +outside your module or crate. If your spec references other types which are +not defined within the spec, then you can define them within the module +as well, either by aliasing them with other defined types, or implementing +the `Pack` and `Unpack` traits yourself. + +Use can use xdr-codec's `XdrRecordReader` and `XdrRecordWriter` types as IO +filters that implement XDR-RPC record marking. + +More [documentation for xdrgen +here](https://docs.rs/xdrgen/). See the +[documentation for +xdr-codec](https://docs.rs/xdr-codec/) for more +details about using the generated types and code. + +## Limitations + +There are currently a few limitations: + * The generated code uses identifiers as specified in the .x file, so the + Rust code will not use normal formatting conventions. + * Generated code follows no formatting convention - use rustfmt if desired. + * XDR has discriminated unions, which are a good match for Rust enums. + However, it also supports a `default` case if an unknown discriminator + is encountered. This crate supports this for unpacking, but not for + packing, as Rust does not allow enums to have unknown values. + * The generated code uses `#[derive(Debug, Clone, ...)]` to generate + implementations for common traits. However, rustc only supports `#[derive]` + on fixed-size arrays with 0..32 elements; if you have an array larger than + this, the generated code will fail to compile. Right now, the only workaround + is to manually implement `Pack` and `Unpack` for such types. + (TODO: add an option to omit derived traits.) + +## License + +Licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](http://www.apache.org/licenses/LICENSE-2.0)) + * MIT license ([LICENSE-MIT](http://opensource.org/licenses/MIT)) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any +additional terms or conditions. diff --git a/src/proxy/rust-xdr/xdrgen/src/lib.rs b/src/proxy/rust-xdr/xdrgen/src/lib.rs new file mode 100644 index 00000000..c1e2cc08 --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/src/lib.rs @@ -0,0 +1,176 @@ +//! XDR codec generation +//! +//! This crate provides library interfaces for programatically generating Rust code to implement +//! RFC4506 XDR encoding/decoding, as well as a command line tool "xdrgen". +//! +//! It is intended to be used with the "xdr-codec" crate, which provides the runtime library for +//! encoding/decoding primitive types, strings, opaque data and arrays. + +#![recursion_limit = "128"] + +extern crate xdr_codec as xdr; + +#[macro_use] +extern crate quote; + +#[macro_use] +extern crate lazy_static; + +#[macro_use] +extern crate log; + +#[macro_use] +extern crate nom; + +#[macro_use] +extern crate bitflags; + +use std::env; +use std::fmt::Display; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::result; + +use xdr::Result; + +mod spec; +use spec::{Emit, Emitpack, Symtab}; + +fn result_option(resopt: result::Result, E>) -> Option> { + match resopt { + Ok(None) => None, + Ok(Some(v)) => Some(Ok(v)), + Err(e) => Some(Err(e)), + } +} + +/// Generate Rust code from an RFC4506 XDR specification +/// +/// `infile` is simply a string used in error messages; it may be empty. `input` is a read stream of +/// the specification, and `output` is where the generated code is sent. +pub fn generate(infile: &str, mut input: In, mut output: Out) -> Result<()> +where + In: Read, + Out: Write, +{ + let mut source = String::new(); + + input.read_to_string(&mut source)?; + + let xdr = match spec::specification(&source) { + Ok(defns) => Symtab::new(&defns), + Err(e) => return Err(xdr::Error::from(format!("parse error: {}", e))), + }; + + let xdr = xdr; + + let res: Vec<_> = { + let consts = xdr + .constants() + .filter_map(|(c, &(v, ref scope))| { + if scope.is_none() { + Some(spec::Const(c.clone(), v)) + } else { + None + } + }) + .map(|c| c.define(&xdr)); + + let typespecs = xdr + .typespecs() + .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) + .map(|c| c.define(&xdr)); + + let typesyns = xdr + .typesyns() + .map(|(n, ty)| spec::Typesyn(n.clone(), ty.clone())) + .map(|c| c.define(&xdr)); + + let packers = xdr + .typespecs() + .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) + .filter_map(|c| result_option(c.pack(&xdr))); + + let unpackers = xdr + .typespecs() + .map(|(n, ty)| spec::Typespec(n.clone(), ty.clone())) + .filter_map(|c| result_option(c.unpack(&xdr))); + + consts + .chain(typespecs) + .chain(typesyns) + .chain(packers) + .chain(unpackers) + .collect::>>()? + }; + + let _ = writeln!( + output, + r#" +// GENERATED CODE +// +// Generated from {} by xdrgen. +// +// DO NOT EDIT + +"#, + infile + ); + + for it in res { + let _ = writeln!(output, "{}\n", it.as_str()); + } + + Ok(()) +} + +/// Simplest possible way to generate Rust code from an XDR specification. +/// +/// It is intended for use in a build.rs script: +/// +/// ```ignore +/// extern crate xdrgen; +/// +/// fn main() { +/// xdrgen::compile("src/simple.x").unwrap(); +/// } +/// ``` +/// +/// Output is put into OUT_DIR, and can be included: +/// +/// ```ignore +/// mod simple { +/// use xdr_codec; +/// +/// include!(concat!(env!("OUT_DIR"), "/simple_xdr.rs")); +/// } +/// ``` +/// +/// If your specification uses types which are not within the specification, you can provide your +/// own implementations of `Pack` and `Unpack` for them. +pub fn compile

(infile: P) -> Result<()> +where + P: AsRef + Display, +{ + let input = File::open(&infile)?; + + let mut outdir = PathBuf::from(env::var("OUT_DIR").unwrap_or(String::from("."))); + let outfile = PathBuf::from(infile.as_ref()) + .file_stem() + .unwrap() + .to_owned() + .into_string() + .unwrap() + .replace("-", "_"); + + outdir.push(&format!("{}_xdr.rs", outfile)); + + let output = File::create(outdir)?; + + generate( + infile.as_ref().as_os_str().to_str().unwrap_or(""), + input, + output, + ) +} diff --git a/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs b/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs new file mode 100644 index 00000000..e259ae08 --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs @@ -0,0 +1,1090 @@ +#![allow(deprecated)] + +use std::collections::btree_map::{BTreeMap, Iter}; +use std::collections::{HashMap, HashSet}; +use std::io::{stderr, Write}; + +use std::result; + +use quote::{self, ToTokens, Tokens}; + +mod xdr_nom; + +use xdr::Error; + +pub type Result = result::Result; + +pub use self::xdr_nom::specification; + +use super::result_option; + +bitflags! { + pub struct Derives: u32 { + const COPY = 1 << 0; + const CLONE = 1 << 1; + const DEBUG = 1 << 2; + const EQ = 1 << 3; + const PARTIALEQ = 1 << 4; + } +} + +impl ToTokens for Derives { + fn to_tokens(&self, toks: &mut Tokens) { + if self.is_empty() { + return; + } + + toks.append("#[derive("); + + let mut der = Vec::new(); + + if self.contains(COPY) { + der.push(quote!(Copy)) + } + if self.contains(CLONE) { + der.push(quote!(Clone)) + } + if self.contains(DEBUG) { + der.push(quote!(Debug)) + } + if self.contains(EQ) { + der.push(quote!(Eq)) + } + if self.contains(PARTIALEQ) { + der.push(quote!(PartialEq)) + } + + toks.append_separated(der, ","); + toks.append(")]"); + } +} + +lazy_static! { + static ref KEYWORDS: HashSet<&'static str> = { + let kws = [ + "abstract", "alignof", "as", "become", "box", "break", "const", "continue", "crate", + "do", "else", "enum", "extern", "false", "final", "fn", "for", "if", "impl", "in", + "let", "loop", "macro", "match", "mod", "move", "mut", "offsetof", "override", "priv", + "proc", "pub", "pure", "ref", "return", "Self", "self", "sizeof", "static", "struct", + "super", "trait", "true", "type", "typeof", "unsafe", "unsized", "use", "virtual", + "where", "while", "yield", + ]; + + kws.iter().map(|x| *x).collect() + }; +} + +fn quote_ident>(id: S) -> quote::Ident { + let id = id.as_ref(); + + if (*KEYWORDS).contains(id) { + quote::Ident::new(format!("{}_", id)) + } else { + quote::Ident::new(id) + } +} + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub enum Value { + Ident(String), + Const(i64), +} + +impl Value { + fn ident>(id: S) -> Value { + Value::Ident(id.as_ref().to_string()) + } + + fn as_ident(&self) -> quote::Ident { + match self { + &Value::Ident(ref id) => quote_ident(id), + &Value::Const(val) => quote::Ident::new(format!( + "Const{}{}", + (if val < 0 { "_" } else { "" }), + val.abs() + )), + } + } + + fn as_i64(&self, symtab: &Symtab) -> Option { + symtab.value(self) + } + + fn as_token(&self, symtab: &Symtab) -> Tokens { + match self { + &Value::Const(c) => quote!(#c), + &Value::Ident(ref id) => { + let tok = quote_ident(id.as_str()); + if let Some((_, Some(ref scope))) = symtab.getconst(id) { + let scope = quote_ident(scope); + quote!(#scope :: #tok) + } else { + quote!(#tok) + } + } + } + } +} + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub enum Type { + UInt, + Int, + UHyper, + Hyper, + Float, + Double, + Quadruple, + Bool, + + // Special array elements + Opaque, // binary + String, // text + + // Compound types + Enum(Vec), + Struct(Vec), + Union(Box, Vec, Option>), + + Option(Box), + Array(Box, Value), + Flex(Box, Option), + + // Type reference (may be external) + Ident(String, Option), +} + +impl Type { + fn array(ty: Type, sz: Value) -> Type { + Type::Array(Box::new(ty), sz) + } + + fn flex(ty: Type, sz: Option) -> Type { + Type::Flex(Box::new(ty), sz) + } + + fn option(ty: Type) -> Type { + Type::Option(Box::new(ty)) + } + + fn union((d, c, dfl): (Decl, Vec, Option)) -> Type { + Type::Union(Box::new(d), c, dfl.map(Box::new)) + } + + fn ident>(id: S) -> Type { + Type::Ident(id.as_ref().to_string(), None) + } + + fn ident_with_derives>(id: S, derives: Derives) -> Type { + Type::Ident(id.as_ref().to_string(), Some(derives)) + } + + fn is_boxed(&self, symtab: &Symtab) -> bool { + use self::Type::*; + + match self { + _ if self.is_prim(symtab) => false, + &Array(_, _) | &Flex(_, _) | &Option(_) => false, + &Ident(ref name, _) => { + if let Some(ty) = symtab.typespec(name) { + ty.is_boxed(symtab) + } else { + true + } + } + _ => true, + } + } + + fn is_prim(&self, symtab: &Symtab) -> bool { + use self::Type::*; + + match self { + &Int | &UInt | &Hyper | &UHyper | &Float | &Double | &Quadruple | &Bool => true, + + &Ident(ref id, _) => match symtab.typespec(id) { + None => false, + Some(ref ty) => ty.is_prim(symtab), + }, + + _ => false, + } + } + + fn derivable(&self, symtab: &Symtab, memo: Option<&mut HashMap>) -> Derives { + use self::Type::*; + let mut memoset = HashMap::new(); + + let memo = match memo { + None => &mut memoset, + Some(m) => m, + }; + + if let Some(res) = memo.get(self) { + return *res; + } + + // No derives unless we can prove we have some + memo.insert(self.clone(), Derives::empty()); + + let set = match self { + &Array(ref ty, ref len) => { + let ty = ty.as_ref(); + let set = match ty { + &Opaque | &String => EQ | PARTIALEQ | COPY | CLONE | DEBUG, + ref ty => ty.derivable(symtab, Some(memo)), + }; + match len.as_i64(symtab) { + Some(v) if v <= 32 => set, + _ => Derives::empty(), // no #[derive] for arrays > 32 + } + } + &Flex(ref ty, ..) => { + let set = ty.derivable(symtab, Some(memo)); + set & !COPY // no Copy, everything else OK + } + &Enum(_) => EQ | PARTIALEQ | COPY | CLONE | DEBUG, + &Option(ref ty) => ty.derivable(symtab, Some(memo)), + &Struct(ref fields) => fields + .iter() + .fold(Derives::all(), |a, f| a & f.derivable(symtab, memo)), + + &Union(_, ref cases, ref defl) => { + cases + .iter() + .map(|c| &c.1) + .fold(Derives::all(), |a, c| a & c.derivable(symtab, memo)) + & defl + .as_ref() + .map_or(Derives::all(), |d| d.derivable(symtab, memo)) + } + + &Ident(_, Some(derives)) => derives, + + &Ident(ref id, None) => { + match symtab.typespec(id) { + None => Derives::empty(), // unknown, really + Some(ref ty) => ty.derivable(symtab, Some(memo)), + } + } + + &Float | &Double => PARTIALEQ | COPY | CLONE | DEBUG, + ty if ty.is_prim(symtab) => Derives::all(), + + _ => Derives::all() & !COPY, + }; + + memo.insert(self.clone(), set); + set + } + + fn packer(&self, val: Tokens, symtab: &Symtab) -> Result { + use self::Type::*; + + let res = match self { + &Enum(_) => quote!((*#val as i32).pack(out)?), + + &Flex(ref ty, ref maxsz) => { + let ty = ty.as_ref(); + let maxsz = match maxsz { + &None => quote!(None), + &Some(ref mx) => { + let mx = mx.as_token(symtab); + quote!(Some(#mx as usize)) + } + }; + match ty { + &Opaque => quote!(xdr_codec::pack_opaque_flex(&#val, #maxsz, out)?), + &String => quote!(xdr_codec::pack_string(&#val, #maxsz, out)?), + _ => quote!(xdr_codec::pack_flex(&#val, #maxsz, out)?), + } + } + + &Array(ref ty, _) => { + let ty = ty.as_ref(); + match ty { + &Opaque | &String => { + quote!(xdr_codec::pack_opaque_array(&#val[..], #val.len(), out)?) + } + _ => quote!(xdr_codec::pack_array(&#val[..], #val.len(), out, None)?), + } + } + + _ => quote!(#val.pack(out)?), + }; + + trace!("packed {:?} val {:?} => {:?}", self, val, res); + Ok(res) + } + + fn is_syn(&self) -> bool { + use self::Type::*; + + match self { + &Opaque | &String | &Option(_) | &Ident(..) | &Int | &UInt | &Hyper | &UHyper + | &Float | &Double | &Quadruple | &Bool => true, + _ => false, + } + } + + fn unpacker(&self, symtab: &Symtab) -> Tokens { + use self::Type::*; + + match self { + &Array(ref ty, ref value) => { + let ty = ty.as_ref(); + let value = value.as_token(symtab); + + match ty { + &Opaque | &String => { + quote!({ + let mut buf: [u8; #value as usize] = unsafe { ::std::mem::uninitialized() }; + let sz = xdr_codec::unpack_opaque_array(input, &mut buf[..], #value as usize)?; + (buf, sz) + }) + } + ty => { + let ty = ty.as_token(symtab).unwrap(); + // Create the return array as uninitialized, since we don't know what to initialize it until + // we can deserialize values. We don't even have a guaranteed value we can populate it with, since + // the type may not implement Default (and it would be a waste anyway, since they're going to be + // replaced). + // + // However, having an uninitialized array makes for lots of awkward corner cases. + // Even in the common case, we can't simply use `unpack_array`, as it will replace each element + // by assignment, but that will Drop any existing value - but in this case that will be undefined + // as they're uninitialized. So we need to use `unpack_array_with` that allows us to specify a function + // which does the initializing assignment. In this case we use `ptr::write` which overwrites memory + // without Dropping the current contents. + // + // With that solved, we also need to deal with the error cases, where the array could be partially + // initialized. For this case, `unpack_array_with` also takes a drop function which deinitializes + // the partially initialized elements, so the array is left uninitialized in the failure case. + // We can then just use `mem::forget` to dispose of the whole thing. + // + // We also need to catch panics to make sure the buf is forgotten. It may be partially initialized then + // it may leak, but that's better than calling Drop on uninitialized elements. + quote!({ + #[inline] + fn uninit_ptr_setter(p: &mut T, v: T) { + unsafe { ::std::ptr::write(p, v) } + } + #[inline] + fn uninit_ptr_dropper(p: &mut T) { + unsafe { ::std::ptr::drop_in_place(p) } + } + let mut buf: [#ty; #value as usize] = unsafe { ::std::mem::uninitialized() }; + let res = ::std::panic::catch_unwind( + ::std::panic::AssertUnwindSafe(|| + xdr_codec::unpack_array_with( + input, &mut buf[..], #value as usize, uninit_ptr_setter, uninit_ptr_dropper, None))); + + let sz = match res { + Ok(Ok(sz)) => sz, + Ok(Err(err)) => { ::std::mem::forget(buf); return Err(err); } + Err(panic) => { ::std::mem::forget(buf); ::std::panic::resume_unwind(panic); } + }; + (buf, sz) + }) + } + } + } + + &Flex(ref ty, ref maxsz) => { + let ty = ty.as_ref(); + let maxsz = match maxsz { + &None => quote!(None), + &Some(ref mx) => { + let mx = mx.as_token(symtab); + quote!(Some(#mx as usize)) + } + }; + + match ty { + &String => quote!(xdr_codec::unpack_string(input, #maxsz)?), + &Opaque => quote!(xdr_codec::unpack_opaque_flex(input, #maxsz)?), + _ => quote!(xdr_codec::unpack_flex(input, #maxsz)?), + } + } + + _ => quote!(xdr_codec::Unpack::unpack(input)?), + } + } + + fn as_token(&self, symtab: &Symtab) -> Result { + use self::Type::*; + + let ret = match self { + &Int => quote!(i32), + &UInt => quote!(u32), + &Hyper => quote!(i64), + &UHyper => quote!(u64), + &Float => quote!(f32), + &Double => quote!(f64), + &Quadruple => quote!(f128), + &Bool => quote!(bool), + + &String => quote!(String), + &Opaque => quote!(Vec), + + &Option(ref ty) => { + let ty = ty.as_ref(); + let tok = ty.as_token(symtab)?; + if ty.is_boxed(symtab) { + quote!(Option>) + } else { + quote!(Option<#tok>) + } + } + + &Array(ref ty, ref sz) => { + let ty = ty.as_ref(); + match ty { + &String | &Opaque => { + let sztok = sz.as_token(symtab); + quote!([u8; #sztok as usize]) + } + ref ty => { + let tytok = ty.as_token(symtab)?; + let sztok = sz.as_token(symtab); + quote!([#tytok; #sztok as usize]) + } + } + } + + &Flex(ref ty, _) => { + let ty = ty.as_ref(); + match ty { + &String => quote!(String), + &Opaque => quote!(Vec), + ref ty => { + let tok = ty.as_token(symtab)?; + quote!(Vec<#tok>) + } + } + } + + &Ident(ref name, _) => { + let id = quote_ident(name.as_str()); + quote!(#id) + } + + _ => return Err(format!("can't have unnamed type {:?}", self).into()), + }; + Ok(ret) + } +} + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub struct EnumDefn(pub String, pub Option); + +impl EnumDefn { + fn new>(id: S, val: Option) -> EnumDefn { + EnumDefn(id.as_ref().to_string(), val) + } +} + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub struct UnionCase(Value, Decl); + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub enum Decl { + Void, + Named(String, Type), +} + +impl Decl { + fn named>(id: S, ty: Type) -> Decl { + Decl::Named(id.as_ref().to_string(), ty) + } + + fn name_as_ident(&self) -> Option<(quote::Ident, &Type)> { + use self::Decl::*; + match self { + &Void => None, + &Named(ref name, ref ty) => Some((quote_ident(name), ty)), + } + } + + fn as_token(&self, symtab: &Symtab) -> Result> { + use self::Decl::*; + match self { + &Void => Ok(None), + &Named(ref name, ref ty) => { + let nametok = quote_ident(name.as_str()); + let mut tok = ty.as_token(symtab)?; + if false && ty.is_boxed(symtab) { + tok = quote!(Box<#tok>) + }; + Ok(Some((nametok, tok))) + } + } + } + + fn derivable(&self, symtab: &Symtab, memo: &mut HashMap) -> Derives { + use self::Decl::*; + match self { + &Void => Derives::all(), + &Named(_, ref ty) => ty.derivable(symtab, Some(memo)), + } + } +} + +// Specification of a named type +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub struct Typespec(pub String, pub Type); + +// Named synonym for a type +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub struct Typesyn(pub String, pub Type); + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub struct Const(pub String, pub i64); + +#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone)] +pub enum Defn { + Typespec(String, Type), + Typesyn(String, Type), + Const(String, i64), +} + +impl Defn { + fn typespec>(id: S, ty: Type) -> Defn { + Defn::Typespec(id.as_ref().to_string(), ty) + } + + fn typesyn>(id: S, ty: Type) -> Defn { + Defn::Typesyn(id.as_ref().to_string(), ty) + } + + fn constant>(id: S, v: i64) -> Defn { + Defn::Const(id.as_ref().to_string(), v) + } +} + +pub trait Emit { + fn define(&self, symtab: &Symtab) -> Result; +} + +pub trait Emitpack: Emit { + fn pack(&self, symtab: &Symtab) -> Result>; + fn unpack(&self, symtab: &Symtab) -> Result>; +} + +impl Emit for Const { + fn define(&self, _: &Symtab) -> Result { + let name = quote_ident(&self.0); + let val = &self.1; + + Ok(quote!(pub const #name: i64 = #val;)) + } +} + +impl Emit for Typesyn { + fn define(&self, symtab: &Symtab) -> Result { + let ty = &self.1; + let name = quote_ident(&self.0); + let tok = ty.as_token(symtab)?; + Ok(quote!(pub type #name = #tok;)) + } +} + +impl Emit for Typespec { + fn define(&self, symtab: &Symtab) -> Result { + use self::Type::*; + + let name = quote_ident(&self.0); + let ty = &self.1; + + let ret = match ty { + &Enum(ref edefs) => { + let defs: Vec<_> = edefs + .iter() + .filter_map(|&EnumDefn(ref field, _)| { + if let Some((val, Some(_))) = symtab.getconst(field) { + Some((quote_ident(field), val as isize)) + } else { + None + } + }) + .map(|(field, val)| quote!(#field = #val,)) + .collect(); + + let derive = ty.derivable(symtab, None); + quote!(#derive pub enum #name { #(#defs)* }) + } + + &Struct(ref decls) => { + let decls: Vec<_> = decls + .iter() + .filter_map(|decl| result_option(decl.as_token(symtab))) + .map(|res| res.map(|(field, ty)| quote!(pub #field: #ty,))) + .collect::>>()?; + + let derive = ty.derivable(symtab, None); + quote! { + #derive + pub struct #name { #(#decls)* } + } + } + + &Union(ref selector, ref cases, ref defl) => { + let selector = selector.as_ref(); + use self::Decl::*; + use self::Value::*; + + let labelfields = false; // true - include label in enum branch + + // return true if case is compatible with the selector + let compatcase = |case: &Value| { + let seltype = match selector { + &Void => return false, + &Named(_, ref ty) => ty, + }; + + match case { + &Const(val) if val < 0 => match seltype { + &Int | &Hyper => true, + _ => false, + }, + + &Const(_) => match seltype { + &Int | &Hyper | &UInt | &UHyper => true, + _ => false, + }, + + &Ident(ref id) => { + if *seltype == Bool { + id == "TRUE" || id == "FALSE" + } else { + if let &Type::Ident(ref selname, _) = seltype { + match symtab.getconst(id) { + Some((_, Some(ref scope))) => scope == selname, + _ => false, + } + } else { + false + } + } + } + } + }; + + let mut cases: Vec<_> = cases + .iter() + .map(|&UnionCase(ref val, ref decl)| { + if !compatcase(val) { + return Err(Error::from(format!( + "incompat selector {:?} case {:?}", + selector, val + ))); + } + + let label = val.as_ident(); + + match decl { + &Void => Ok(quote!(#label,)), + &Named(ref name, ref ty) => { + let mut tok = ty.as_token(symtab)?; + if false && ty.is_boxed(symtab) { + tok = quote!(Box<#tok>) + }; + if labelfields { + let name = quote_ident(name); + Ok(quote!(#label { #name : #tok },)) + } else { + Ok(quote!(#label(#tok),)) + } + } + } + }) + .collect::>>()?; + + if let &Some(ref def_val) = defl { + let def_val = def_val.as_ref(); + match def_val { + &Named(ref name, ref ty) => { + let mut tok = ty.as_token(symtab)?; + if ty.is_boxed(symtab) { + tok = quote!(Box<#tok>) + }; + if labelfields { + let name = quote_ident(name); + cases.push(quote!(default { #name: #tok },)) + } else { + cases.push(quote!(default(#tok),)) + } + } + &Void => cases.push(quote!(default,)), + } + } + + let derive = ty.derivable(symtab, None); + quote! { + #derive + pub enum #name { #(#cases)* } + } + } + + &Flex(..) | &Array(..) => { + let tok = ty.as_token(symtab)?; + let derive = ty.derivable(symtab, None); + quote! { + #derive + pub struct #name(pub #tok); + } + } + + _ => { + let tok = ty.as_token(symtab)?; + quote!(pub type #name = #tok;) + } + }; + Ok(ret) + } +} + +impl Emitpack for Typespec { + fn pack(&self, symtab: &Symtab) -> Result> { + use self::Decl::*; + use self::Type::*; + + let name = quote_ident(&self.0); + let ty = &self.1; + let mut directive = quote!(); + + let body: Tokens = match ty { + &Enum(_) => { + directive = quote!(#[inline]); + ty.packer(quote!(self), symtab)? + } + + &Struct(ref decl) => { + let decls: Vec<_> = decl + .iter() + .filter_map(|d| match d { + &Void => None, + &Named(ref name, ref ty) => Some((quote_ident(name), ty)), + }) + .map(|(field, ty)| { + let p = ty.packer(quote!(self.#field), symtab).unwrap(); + quote!(#p + ) + }) + .collect(); + quote!(#(#decls)* 0) + } + + &Union(_, ref cases, ref defl) => { + let mut matches: Vec<_> = cases + .iter() + .filter_map(|&UnionCase(ref val, ref decl)| { + let label = val.as_ident(); + let disc = val.as_token(symtab); + + let ret = match decl { + &Void => quote!(&#name::#label => (#disc as i32).pack(out)?,), + &Named(_, ref ty) => { + let pack = match ty.packer(quote!(val), symtab) { + Err(_) => return None, + Ok(p) => p, + }; + quote!(&#name::#label(ref val) => (#disc as i32).pack(out)? + #pack,) + } + }; + Some(ret) + }) + .collect(); + + if let &Some(ref decl) = defl { + let decl = decl.as_ref(); + // Can't cast a value-carrying enum to i32 + let default = match decl { + &Void => { + quote! { + &#name::default => return Err(xdr_codec::Error::invalidcase(-1)), + } + } + &Named(_, _) => { + quote! { + &#name::default(_) => return Err(xdr_codec::Error::invalidcase(-1)), + } + } + }; + + matches.push(default) + } + + quote!(match self { #(#matches)* }) + } + + // Array and Flex types are wrapped in tuple structs + &Flex(..) | &Array(..) => ty.packer(quote!(self.0), symtab)?, + + &Ident(_, _) => return Ok(None), + + _ => { + if ty.is_prim(symtab) { + return Ok(None); + } else { + ty.packer(quote!(self), symtab)? + } + } + }; + + trace!("body {:?}", body); + + Ok(Some(quote! { + impl xdr_codec::Pack for #name { + #directive + fn pack(&self, out: &mut Out) -> xdr_codec::Result { + Ok(#body) + } + } + })) + } + + fn unpack(&self, symtab: &Symtab) -> Result> { + use self::Decl::*; + use self::Type::*; + + let name = quote_ident(&self.0); + let ty = &self.1; + let mut directive = quote!(); + + let body = match ty { + &Enum(ref defs) => { + directive = quote!(#[inline]); + let matchdefs: Vec<_> = defs + .iter() + .filter_map(|&EnumDefn(ref name, _)| { + let tok = quote_ident(name); + if let Some((ref _val, ref scope)) = symtab.getconst(name) { + // let val = *val as i32; + if let &Some(ref scope) = scope { + let scope = quote_ident(scope); + // Some(quote!(#val => #scope :: #tok,)) + Some(quote!(x if x == #scope :: #tok as i32 => #scope :: #tok,)) + } else { + // Some(quote!(#val => #tok,)) + Some(quote!(x if x == #tok as i32 => #tok,)) + } + } else { + println!("unknown ident {}", name); + None + } + }) + .collect(); + + quote!({ + let (e, esz): (i32, _) = xdr_codec::Unpack::unpack(input)?; + sz += esz; + match e { + #(#matchdefs)* + e => return Err(xdr_codec::Error::invalidenum(e)) + } + }) + } + + &Struct(ref decls) => { + let decls: Vec<_> = decls + .iter() + .filter_map(|decl| decl.name_as_ident()) + .map(|(field, ty)| { + let unpack = ty.unpacker(symtab); + quote!(#field: { let (v, fsz) = #unpack; sz += fsz; v },) + }) + .collect(); + + quote!(#name { #(#decls)* }) + } + + &Union(ref sel, ref cases, ref defl) => { + let sel = sel.as_ref(); + let mut matches: Vec<_> = + cases.iter() + .map(|&UnionCase(ref val, ref decl)| { + let label = val.as_ident(); + let disc = match val.as_i64(symtab) { + Some(v) => v as i32, + None => return Err(Error::from(format!("discriminant value {:?} unknown", val))), + }; + + let ret = match decl { + //&Void => quote!(#disc => #name::#label,), + &Void => quote!(x if x == (#disc as i32) => #name::#label,), + &Named(_, ref ty) => { + let unpack = ty.unpacker(symtab); + //quote!(#disc => #name::#label({ let (v, fsz) = #unpack; sz += fsz; v }),) + quote!(x if x == (#disc as i32) => #name::#label({ let (v, fsz) = #unpack; sz += fsz; v }),) + }, + }; + Ok(ret) + }) + .collect::>>()?; + + if let &Some(ref decl) = defl { + let decl = decl.as_ref(); + let defl = match decl { + &Void => quote!(_ => #name::default), + &Named(_, ref ty) => { + let unpack = ty.unpacker(symtab); + quote!(_ => #name::default({ + let (v, csz) = #unpack; + sz += csz; + v + })) + } + }; + + matches.push(defl); + } else { + let defl = quote!(v => return Err(xdr_codec::Error::invalidcase(v as i32))); + matches.push(defl); + } + + let selunpack = match sel { + &Void => panic!("void switch selector?"), + &Named(_, ref ty) => ty.unpacker(symtab), + }; + + quote!(match { let (v, dsz): (i32, _) = #selunpack; sz += dsz; v } { #(#matches)* }) + } + + &Option(_) => ty.unpacker(symtab), + + &Flex(_, _) | &Array(_, _) => { + let unpk = ty.unpacker(symtab); + quote!({ let (v, usz) = #unpk; sz = usz; #name(v) }) + } + + &Ident(_, _) => return Ok(None), + + _ if ty.is_prim(symtab) => return Ok(None), + _ => return Err(Error::from(format!("unimplemented ty={:?}", ty))), + }; + + Ok(Some(quote! { + impl xdr_codec::Unpack for #name { + #directive + fn unpack(input: &mut In) -> xdr_codec::Result<(#name, usize)> { + let mut sz = 0; + Ok((#body, sz)) + } + } + })) + } +} + +#[derive(Debug, Clone)] +pub struct Symtab { + consts: BTreeMap)>, + typespecs: BTreeMap, + typesyns: BTreeMap, +} + +impl Symtab { + pub fn new(defns: &Vec) -> Symtab { + let mut ret = Symtab { + consts: BTreeMap::new(), + typespecs: BTreeMap::new(), + typesyns: BTreeMap::new(), + }; + + ret.update_consts(&defns); + + ret + } + + fn update_consts(&mut self, defns: &Vec) { + for defn in defns { + match defn { + &Defn::Typespec(ref name, ref ty) => { + self.deftype(name, ty); + self.update_enum_consts(name, ty); + } + + &Defn::Const(ref name, val) => self.defconst(name, val, None), + + &Defn::Typesyn(ref name, ref ty) => { + self.deftypesyn(name, ty); + } + } + } + } + + fn update_enum_consts(&mut self, scope: &String, ty: &Type) { + let mut err = stderr(); + let mut prev = -1; + + if let &Type::Enum(ref edefn) = ty { + for &EnumDefn(ref name, ref maybeval) in edefn { + let v = match maybeval { + &None => prev + 1, + &Some(ref val) => match self.value(val) { + Some(c) => c, + None => { + let _ = writeln!(&mut err, "Unknown value {:?}", val); + continue; + } + }, + }; + + prev = v; + + // println!("enum {} -> {}", name, v); + self.defconst(name, v, Some(scope.clone())); + } + } + } + + fn defconst>(&mut self, name: S, val: i64, scope: Option) { + self.consts.insert(From::from(name.as_ref()), (val, scope)); + } + + fn deftype>(&mut self, name: S, ty: &Type) { + self.typespecs.insert(From::from(name.as_ref()), ty.clone()); + } + + pub fn deftypesyn>(&mut self, name: S, ty: &Type) { + self.typesyns.insert(From::from(name.as_ref()), ty.clone()); + } + + pub fn getconst(&self, name: &String) -> Option<(i64, Option)> { + match self.consts.get(name) { + None => None, + Some(c) => Some(c.clone()), + } + } + + pub fn value(&self, val: &Value) -> Option { + match val { + &Value::Const(c) => Some(c), + &Value::Ident(ref id) => self.getconst(id).map(|(v, _)| v), + } + } + + pub fn typespec(&self, name: &String) -> Option<&Type> { + match self.typespecs.get(name) { + None => match self.typesyns.get(name) { + None => None, + Some(ty) => Some(ty), + }, + Some(ty) => Some(ty), + } + } + + pub fn constants(&self) -> Iter)> { + self.consts.iter() + } + + pub fn typespecs(&self) -> Iter { + self.typespecs.iter() + } + + pub fn typesyns(&self) -> Iter { + self.typesyns.iter() + } +} + +#[cfg(test)] +mod test; diff --git a/src/proxy/rust-xdr/xdrgen/src/spec/test.rs b/src/proxy/rust-xdr/xdrgen/src/spec/test.rs new file mode 100644 index 00000000..31824e9a --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/src/spec/test.rs @@ -0,0 +1,252 @@ +use super::super::generate; +use super::specification; +use std::io::Cursor; + +#[test] +fn typedef_void() { + let s = specification( + r#" +typedef void; /* syntactically defined, semantically meaningless */ +"#, + ); + + println!("spec {:?}", s); + assert!(s.is_err()) +} + +#[test] +fn kwishnames() { + let kws = vec![ + "bool", + "case", + "const", + "default", + "double", + "enum", + "float", + "hyper", + "int", + "opaque", + "quadruple", + "string", + "struct", + "switch", + "typedef", + "union", + "unsigned", + "void", + ]; + let specs = vec![ + "const {}x = 1;", + "struct {}x { int i; };", + "struct foo { int {}x; };", + "typedef int {}x;", + "union {}x switch (int x) { case 1: void; };", + "union x switch (int {}x) { case 1: void; };", + "union x switch (int y) { case 1: int {}x; };", + ]; + + for sp in &specs { + for kw in &kws { + let spec = sp.replace("{}", kw); + let s = specification(&spec); + println!("spec {} => {:?}", spec, s); + assert!(s.is_ok()) + } + } +} + +#[test] +fn kwnames() { + let kws = vec![ + "bool", + "case", + "const", + "default", + "double", + "enum", + "float", + "hyper", + "int", + "opaque", + "quadruple", + "string", + "struct", + "switch", + "typedef", + "union", + "unsigned", + "void", + ]; + let specs = vec![ + "const {} = 1;", + "struct {} { int i; };", + "struct foo { int {}; };", + "typedef int {};", + "union {} switch (int x) { case 1: void; };", + "union x switch (int {}) { case 1: void; };", + "union x switch (int y) { case 1: int {}; };", + ]; + + for sp in &specs { + for kw in &kws { + let spec = sp.replace("{}", kw); + let s = specification(&spec); + println!("spec {} => {:?}", spec, s); + assert!(s.is_err()) + } + } +} + +#[test] +fn inline_struct() { + let spec = r#" + struct thing { + struct { int a; int b; } thing; + }; +"#; + let s = specification(spec); + + println!("spec {:?}", s); + assert!(s.is_ok()); + + let g = generate("", Cursor::new(spec.as_bytes()), Vec::new()); + assert!(g.is_err()); +} + +#[test] +fn inline_union() { + let spec = r#" + struct thing { + union switch(int x) { case 0: int a; case 1: int b; } thing; + }; +"#; + let s = specification(spec); + + println!("spec {:?}", s); + assert!(s.is_ok()); + + let g = generate("", Cursor::new(spec.as_bytes()), Vec::new()); + assert!(g.is_err()); +} + +#[test] +fn case_type() { + let specs = vec![ + "enum Foo { A, B, C }; union Bar switch (Foo x) { case A: void; case B: void; case C: void; };", + "union Bar switch (int x) { case 1: void; case 2: void; case 3: void; };", + ]; + + for sp in specs { + let s = specification(sp); + println!("spec sp \"{}\" => {:?}", sp, s); + assert!(s.is_ok()); + + let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); + assert!(g.is_ok()); + } +} + +#[test] +fn case_type_mismatch() { + let specs = vec![ + "enum Foo { A, B, C}; union Bar switch (Foo x) { case 1: void; case 2: void; case 3: void; };", + "enum Foo { A, B, C}; union Bar switch (int x) { case A: void; case B: void; case C: void; };", + ]; + + for sp in specs { + let s = specification(sp); + println!("spec sp \"{}\" => {:?}", sp, s); + assert!(s.is_ok()); + + let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); + assert!(g.is_err()); + } +} + +#[test] +fn constants() { + let specs = vec![ + "const A = 0;", + "const A = 0x0;", + "const A = 00;", + "const A = -0;", + "const A = 0x123;", + "const A = 0123;", + "const A = -0123;", + "const A = 123;", + "const A = -123;", + ]; + + for sp in specs { + let s = specification(sp); + println!("spec sp \"{}\" => {:?}", sp, s); + assert!(s.is_ok()); + + let g = generate("", Cursor::new(sp.as_bytes()), Vec::new()); + assert!(g.is_ok()); + } +} + +#[test] +fn union_simple() { + let s = specification( + r#" +union foo switch (int x) { +case 0: + int val; +}; +"#, + ); + println!("spec {:?}", s); + assert!(s.is_ok()) +} + +#[test] +fn union_default() { + let s = specification( + r#" +union foo switch (int x) { +case 0: + int val; +default: + void; +}; +"#, + ); + println!("spec {:?}", s); + assert!(s.is_ok()) +} + +#[test] +fn union_default_nonempty() { + let s = specification( + r#" +union foo switch (int x) { +case 0: + int val; +default: + bool bye; +}; +"#, + ); + println!("spec {:?}", s); + assert!(s.is_ok()) +} + +#[test] +fn fallthrough_case() { + let s = specification( + r#" +union foo switch (int x) { + case 0: + case 1: + int val; + case 2: + void; +}; +"#, + ); + println!("spec {:?}", s); + assert!(s.is_ok()) +} diff --git a/src/proxy/rust-xdr/xdrgen/src/spec/xdr_nom.rs b/src/proxy/rust-xdr/xdrgen/src/spec/xdr_nom.rs new file mode 100644 index 00000000..1f68dec4 --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/src/spec/xdr_nom.rs @@ -0,0 +1,953 @@ +// Grammar for a .x file specifying XDR type codecs. Does not include any RPC syntax. Should match RFC4506. +use nom::IResult::*; +use nom::{is_digit, is_space, not_line_ending, Err, ErrorKind, IResult, Needed}; + +use std::str; + +use super::{Decl, Defn, EnumDefn, Type, UnionCase, Value}; +use super::{CLONE, COPY, DEBUG, EQ, PARTIALEQ}; + +#[inline] +fn ignore(_: T) -> () { + () +} + +// Complete tag +fn ctag>(input: &[u8], tag: T) -> IResult<&[u8], &[u8]> { + complete!(input, tag!(tag.as_ref())) +} + +fn eof(input: &[u8]) -> IResult<&[u8], ()> { + if input.len() == 0 { + IResult::Done(input, ()) + } else { + IResult::Error(Err::Position(ErrorKind::Eof, input)) + } +} + +pub fn specification(input: &str) -> Result, String> { + match spec(input.as_bytes()) { + Done(_, spec) => Ok(spec), + Error(Err::Position(kind, input)) => Err(format!( + "{:?}: {}", + kind, + String::from(str::from_utf8(input).unwrap()) + )), + Error(err) => Err(format!("Error: {:?}", err)), + Incomplete(need) => Err(format!("Incomplete {:?}", need)), + } +} + +named!( + spec>, + do_parse!( + opt!(directive) >> + defns: many0!(definition) >> + spaces >> eof >> + (defns)) +); + +#[test] +fn test_spec() { + assert_eq!(spec(&b"#include "[..]), Done(&b""[..], vec!())); + + assert_eq!( + spec(&b"// hello\n#include "[..]), + Done(&b""[..], vec!()) + ); + + assert_eq!( + spec(&b"#include \ntypedef int foo;"[..]), + Done(&b""[..], vec!(Defn::typesyn("foo", Type::Int))) + ); + + assert_eq!( + spec( + &br#" +/* test file */ +#define foo bar +const mip = 123; +% passthrough +typedef int foo; +struct bar { + int a; + int b; +}; +#include "other" +enum bop { a = 2, b = 1 }; +"#[..] + ), + Done( + &b""[..], + vec!( + Defn::constant("mip", 123), + Defn::typesyn("foo", Type::Int), + Defn::typespec( + "bar", + Type::Struct(vec!( + Decl::named("a", Type::Int), + Decl::named("b", Type::Int) + )) + ), + Defn::typespec( + "bop", + Type::Enum(vec!( + EnumDefn::new("a", Some(Value::Const(2))), + EnumDefn::new("b", Some(Value::Const(1))) + )) + ) + ) + ) + ); +} + +named!( + definition, + alt!(type_def => { |t| t } | + const_def => { |c| c }) +); + +fn is_hexdigit(ch: u8) -> bool { + match ch as char { + '0'..='9' | 'A'..='F' | 'a'..='f' => true, + _ => false, + } +} + +fn is_octdigit(ch: u8) -> bool { + match ch as char { + '0'..='7' => true, + _ => false, + } +} + +fn digit bool>(input: &[u8], isdigit: F) -> IResult<&[u8], &[u8]> { + for (idx, item) in input.iter().enumerate() { + if !isdigit(*item) { + if idx == 0 { + return Error(Err::Position(ErrorKind::Digit, input)); + } else { + return Done(&input[idx..], &input[0..idx]); + } + } + } + Incomplete(Needed::Unknown) +} + +named!(lbrace, preceded!(spaces, apply!(ctag, "{"))); +named!(rbrace, preceded!(spaces, apply!(ctag, "}"))); +named!(lbrack, preceded!(spaces, apply!(ctag, "["))); +named!(rbrack, preceded!(spaces, apply!(ctag, "]"))); +named!(lparen, preceded!(spaces, apply!(ctag, "("))); +named!(rparen, preceded!(spaces, apply!(ctag, ")"))); +named!(lt, preceded!(spaces, apply!(ctag, "<"))); +named!(gt, preceded!(spaces, apply!(ctag, ">"))); +named!(colon, preceded!(spaces, apply!(ctag, ":"))); +named!(semi, preceded!(spaces, apply!(ctag, ";"))); +named!(comma, preceded!(spaces, apply!(ctag, ","))); +named!(eq, preceded!(spaces, apply!(ctag, "="))); +named!(star, preceded!(spaces, apply!(ctag, "*"))); + +named!( + hexnumber, + do_parse!( + apply!(ctag, "0x") >> + val: map_res!(apply!(digit, is_hexdigit), str::from_utf8) >> + (i64::from_str_radix(val, 16).unwrap()) + ) +); + +named!( + octnumber, + do_parse!( + sign: opt!(apply!(ctag, "-")) >> + apply!(ctag, "0") >> + val: opt!(map_res!(apply!(digit, is_octdigit), str::from_utf8)) >> + (i64::from_str_radix(val.unwrap_or("0"), 8).unwrap() * (if sign.is_some() { -1 } else { 1 })) + ) +); + +named!( + decnumber, + do_parse!( + sign: opt!(apply!(ctag, "-")) >> + val: map_res!(apply!(digit, is_digit), str::from_utf8) >> + (i64::from_str_radix(val, 10).unwrap() * (if sign.is_some() { -1 } else { 1 })) + ) +); + +named!( + number, + preceded!(spaces, alt!(hexnumber | octnumber | decnumber)) +); + +#[test] +fn test_nums() { + // Complete number + assert_eq!(number(&b"0x12344+"[..]), Done(&b"+"[..], 0x12344)); + assert_eq!(number(&b"012344+"[..]), Done(&b"+"[..], 0o12344)); + assert_eq!(number(&b"-012344+"[..]), Done(&b"+"[..], -0o12344)); + assert_eq!(number(&b"12344+"[..]), Done(&b"+"[..], 12344)); + assert_eq!(number(&b"-12344+"[..]), Done(&b"+"[..], -12344)); + assert_eq!(number(&b"0+"[..]), Done(&b"+"[..], 0)); + assert_eq!(number(&b"-0+"[..]), Done(&b"+"[..], 0)); + + // Space prefix number + assert_eq!(number(&b" 0x12344+"[..]), Done(&b"+"[..], 0x12344)); + assert_eq!(number(&b" 012344+"[..]), Done(&b"+"[..], 0o12344)); + assert_eq!(number(&b" -012344+"[..]), Done(&b"+"[..], -0o12344)); + assert_eq!(number(&b" 12344+"[..]), Done(&b"+"[..], 12344)); + assert_eq!(number(&b" -12344+"[..]), Done(&b"+"[..], -12344)); + assert_eq!(number(&b" 0+"[..]), Done(&b"+"[..], 0)); + assert_eq!(number(&b" -0+"[..]), Done(&b"+"[..], 0)); + + // Incomplete number + assert_eq!(number(&b"0x12344"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"012344"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"-012344"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"12344"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"-12344"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"0"[..]), Incomplete(Needed::Unknown)); + assert_eq!(number(&b"-0"[..]), Incomplete(Needed::Unknown)); +} + +fn token(input: &[u8]) -> IResult<&[u8], &[u8]> { + let input = ws(input); + + for (idx, item) in input.iter().enumerate() { + match *item as char { + 'a'..='z' | 'A'..='Z' | '_' => continue, + '0'..='9' if idx > 0 => continue, + _ => { + if idx > 0 { + return Done(&input[idx..], &input[0..idx]); + } else { + return Error(Err::Position(ErrorKind::AlphaNumeric, input)); + } + } + } + } + Incomplete(Needed::Unknown) +} + +macro_rules! kw { + ($fnname:ident, $kw:expr) => { + fn $fnname(input: &[u8]) -> IResult<&[u8], ()> { + match token(input) { + Done(rest, val) => { + if val == $kw { + Done(rest, ()) + } else { + Error(Err::Position(ErrorKind::Custom(0), input)) + } + } + Error(e) => Error(e), + Incomplete(_) => { + // If its either incomplete but longer that what we're looking for, or what we + // have doesn't match, then its not for us. + if input.len() > $kw.len() || input != &$kw[..input.len()] { + Error(Err::Position(ErrorKind::Custom(0), input)) + } else { + Incomplete(Needed::Size($kw.len() - input.len())) + } + } + } + } + }; +} + +kw!(kw_bool, b"bool"); +kw!(kw_case, b"case"); +kw!(kw_char, b"char"); // special case - part time keyword +kw!(kw_const, b"const"); +kw!(kw_default, b"default"); +kw!(kw_double, b"double"); +kw!(kw_enum, b"enum"); +kw!(kw_float, b"float"); +kw!(kw_hyper, b"hyper"); +kw!(kw_int, b"int"); +kw!(kw_long, b"long"); // special case - part time keyword +kw!(kw_opaque, b"opaque"); +kw!(kw_quadruple, b"quadruple"); +kw!(kw_short, b"short"); // special case - part time keyword +kw!(kw_string, b"string"); +kw!(kw_struct, b"struct"); +kw!(kw_switch, b"switch"); +kw!(kw_typedef, b"typedef"); +kw!(kw_union, b"union"); +kw!(kw_unsigned, b"unsigned"); +kw!(kw_void, b"void"); + +named!( + keyword<()>, + alt!( + kw_bool + | kw_case + | kw_const + | kw_default + | kw_double + | kw_enum + | kw_float + | kw_hyper + | kw_int + | kw_opaque + | kw_quadruple + | kw_string + | kw_struct + | kw_switch + | kw_typedef + | kw_union + | kw_unsigned + | kw_void + ) +); + +#[test] +fn test_kw() { + let kws = vec![ + "bool", + "case", + "const", + "default", + "double", + "enum", + "float", + "hyper", + "int", + "opaque", + "quadruple", + "string", + "struct", + "switch", + "typedef", + "union", + "unsigned", + "void", + ]; + + for k in &kws { + println!("testing \"{}\"", k); + match keyword((*k).as_bytes()) { + Incomplete(_) => (), + err => panic!("failed \"{}\": {:?}", k, err), + } + } + + for k in &kws { + println!("testing \"{} \"", k); + match keyword((String::from(*k) + " ").as_bytes()) { + Done(rest, ()) if rest == &b" "[..] => (), + err => panic!("failed \"{} \": {:?}", k, err), + } + } + + for k in &kws { + println!("testing \"{}x \"", k); + match keyword((String::from(*k) + "x ").as_bytes()) { + Error(_) => (), + err => panic!("failed \"{}x \": {:?}", k, err), + } + } + + for k in &kws { + println!("testing \"{}x \"", k); + match keyword((String::from(" ") + *k + " ").as_bytes()) { + Done(rest, ()) if rest == &b" "[..] => (), + err => panic!("failed \" {} \": {:?}", k, err), + } + } + + for nk in &vec!["boo", "in", "inx", "booll"] { + match keyword((*nk).as_bytes()) { + e @ Done(..) => panic!("{:?} => {:?}", nk, e), + e => println!("{:?} => {:?}", nk, e), + } + } +} + +fn ident(input: &[u8]) -> IResult<&[u8], &str> { + // Grab an identifier and make sure it isn't a keyword + match token(input) { + Done(rest, val) => match keyword(input) { + Done(..) => Error(Err::Position(ErrorKind::Custom(1), val)), + Error(..) | Incomplete(..) => Done(rest, str::from_utf8(val).unwrap()), + }, + Error(e) => Error(e), + Incomplete(need) => Incomplete(need), + } +} + +#[test] +fn test_ident() { + assert_eq!(ident(&b"foo "[..]), Done(&b" "[..], "foo")); + assert_eq!(ident(&b" foo "[..]), Done(&b" "[..], "foo")); + assert_eq!( + ident(&b" bool "[..]), + Error(Err::Position(ErrorKind::Custom(1), &b"bool"[..])) + ); +} + +named!( + blockcomment<()>, + do_parse!(apply!(ctag, "/*") >> take_until_and_consume!(&b"*/"[..]) >> (())) +); + +// `linecomment`, and `directive` end at eol, but do not consume it +named!( + linecomment<()>, + do_parse!(apply!(ctag, "//") >> opt!(not_line_ending) >> peek!(alt!(eol | eof)) >> (())) +); + +// Directive should always follow eol unless its the first thing in the file +named!( + directive<()>, + do_parse!( + opt!(whitespace) + >> alt!(apply!(ctag, "#") | apply!(ctag, "%")) + >> opt!(not_line_ending) + >> peek!(alt!(eol | eof)) + >> (()) + ) +); + +#[test] +fn test_comments() { + assert_eq!(blockcomment(&b"/* foo */bar"[..]), Done(&b"bar"[..], ())); + assert_eq!( + blockcomment(&b"/* blip /* foo */bar"[..]), + Done(&b"bar"[..], ()) + ); + assert_eq!( + blockcomment(&b"x"[..]), + Error(Err::Position(ErrorKind::Tag, &b"x"[..])) + ); + assert_eq!(linecomment(&b"// foo\nbar"[..]), Done(&b"\nbar"[..], ())); + assert_eq!(linecomment(&b"// foo bar\n "[..]), Done(&b"\n "[..], ())); + assert_eq!( + linecomment(&b"x"[..]), + Error(Err::Position(ErrorKind::Tag, &b"x"[..])) + ); + + assert_eq!(directive(&b"#define foo bar\n "[..]), Done(&b"\n "[..], ())); + assert_eq!( + directive(&b"%#define foo bar\n "[..]), + Done(&b"\n "[..], ()) + ); + + assert_eq!( + directive(&b"x"[..]), + Error(Err::Position(ErrorKind::Alt, &b"x"[..])) + ); + + assert_eq!( + preceded!(&b"\n#define x\n"[..], eol, directive), + Done(&b"\n"[..], ()) + ); +} + +named!( + eol<()>, + map!( + alt!( + apply!(ctag, "\n") + | apply!(ctag, "\r\n") + | apply!(ctag, "\u{2028}") + | apply!(ctag, "\u{2029}") + ), + ignore + ) +); + +named!(whitespace<()>, map!(take_while1!(is_space), ignore)); + +// `spaces` consumes spans of space and tab characters interpolated +// with comments, c-preproc and passthrough lines. +named!( + spaces<()>, + map!( + many0!(alt!( + do_parse!(eol >> opt!(complete!(directive)) >> (())) + | whitespace + | blockcomment + | linecomment + )), + ignore + ) +); + +fn ws(input: &[u8]) -> &[u8] { + match spaces(input) { + Done(rest, _) => rest, + _ => input, + } +} + +#[test] +fn test_spaces() { + assert_eq!(eol(&b"\nx"[..]), Done(&b"x"[..], ())); + assert_eq!(eol(&b"\r\nx"[..]), Done(&b"x"[..], ())); + assert_eq!(eol(&b"\nx"[..]), Done(&b"x"[..], ())); + + assert_eq!( + whitespace(&b"x"[..]), + Error(Err::Position(ErrorKind::TakeWhile1, &b"x"[..])) + ); + assert_eq!(whitespace(&b" x"[..]), Done(&b"x"[..], ())); + assert_eq!(whitespace(&b" x"[..]), Done(&b"x"[..], ())); + assert_eq!(whitespace(&b"\tx"[..]), Done(&b"x"[..], ())); + assert_eq!(whitespace(&b" \tx"[..]), Done(&b"x"[..], ())); + assert_eq!(whitespace(&b"\t x"[..]), Done(&b"x"[..], ())); + + assert_eq!(spaces(&b"x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"\nx"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b" x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b" x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"\n\n x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"\r\n x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"//foo\n x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"/*\n*/ x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"\n#define a b\n x"[..]), Done(&b"x"[..], ())); + assert_eq!(spaces(&b"\n%foo a b\n x"[..]), Done(&b"x"[..], ())); +} + +named!(enum_type_spec>, preceded!(kw_enum, enum_body)); + +named!( + enum_body>, + do_parse!( + lbrace >> + b: separated_nonempty_list!(comma, enum_assign) >> + rbrace >> + (b) + ) +); + +named!( + enum_assign, + do_parse!( + id: ident >> + v: opt!(preceded!(eq, value)) >> + (EnumDefn::new(id, v)) + ) +); + +named!( + value, + alt!(number => { |c| Value::Const(c) } | + ident => { |id| Value::ident(id) } + ) +); + +named!( + struct_type_spec>, + preceded!(kw_struct, struct_body) +); + +named!( + struct_body>, + do_parse!( + lbrace >> + decls: many1!(terminated!(declaration, semi)) >> + rbrace >> + (decls) + ) +); + +named!( + union_type_spec<(Decl, Vec, Option)>, + do_parse!(kw_union >> body:union_body >> (body)) +); + +named!( + union_body<(Decl, Vec, Option)>, + do_parse!( + kw_switch >> lparen >> decl:declaration >> rparen >> + lbrace >> + ucss: many1!(union_case) >> + dfl: opt!(union_default) >> + rbrace >> + (decl, ucss.into_iter().flat_map(|v| v).collect(), dfl) + ) +); + +named!( + union_case>, + do_parse!( + vs: many1!(do_parse!(kw_case >> v:value >> colon >> (v))) >> + decl: declaration >> semi >> + (vs.into_iter().map(|v| UnionCase(v, decl.clone())).collect()) + ) +); + +named!( + union_default, + do_parse!( + kw_default >> colon >> + decl: declaration >> semi >> + (decl) + ) +); + +named!( + declaration, + alt!(kw_void => { |_| Decl::Void } | + nonvoid_declaration) +); + +named!( + nonvoid_declaration, + alt!( + do_parse!(ty: array_type_spec >> id: ident >> lbrack >> sz:value >> rbrack >> + (Decl::named(id, Type::array(ty, sz)))) + | do_parse!(ty: array_type_spec >> id: ident >> lt >> sz:opt!(value) >> gt >> + (Decl::named(id, Type::flex(ty, sz)))) + | do_parse!(ty: type_spec >> star >> id: ident >> + (Decl::named(id, Type::option(ty)))) + | do_parse!(ty: type_spec >> id: ident >> + (Decl::named(id, ty))) + ) +); + +named!( + array_type_spec, + alt!(kw_opaque => { |_| Type::Opaque } | + kw_string => { |_| Type::String } | + type_spec + ) +); + +#[test] +fn test_decls() { + assert_eq!(declaration(&b"void "[..]), Done(&b" "[..], Decl::Void)); + + assert_eq!( + declaration(&b"int foo;"[..]), + Done(&b";"[..], Decl::named("foo", Type::Int)) + ); + assert_eq!( + declaration(&b"int foo[123] "[..]), + Done( + &b" "[..], + Decl::named("foo", Type::Array(Box::new(Type::Int), Value::Const(123))) + ) + ); + + assert_eq!( + declaration(&b"int foo<123> "[..]), + Done( + &b" "[..], + Decl::named( + "foo", + Type::Flex(Box::new(Type::Int), Some(Value::Const(123))) + ) + ) + ); + assert_eq!( + declaration(&b"int foo<> "[..]), + Done( + &b" "[..], + Decl::named("foo", Type::Flex(Box::new(Type::Int), None)) + ) + ); + assert_eq!( + declaration(&b"int *foo "[..]), + Done( + &b" "[..], + Decl::named("foo", Type::Option(Box::new(Type::Int))) + ) + ); + + assert_eq!( + declaration(&b"opaque foo[123] "[..]), + Done( + &b" "[..], + Decl::named( + "foo", + Type::Array(Box::new(Type::Opaque), Value::Const(123)) + ) + ) + ); + assert_eq!( + declaration(&b"opaque foo<123> "[..]), + Done( + &b" "[..], + Decl::named( + "foo", + Type::Flex(Box::new(Type::Opaque), Some(Value::Const(123))) + ) + ) + ); + assert_eq!( + declaration(&b"opaque foo<> "[..]), + Done( + &b" "[..], + Decl::named("foo", Type::Flex(Box::new(Type::Opaque), None)) + ) + ); + + assert_eq!( + declaration(&b"string foo<123> "[..]), + Done( + &b" "[..], + Decl::named( + "foo", + Type::Flex(Box::new(Type::String), Some(Value::Const(123))) + ) + ) + ); + assert_eq!( + declaration(&b"string foo<> "[..]), + Done( + &b" "[..], + Decl::named("foo", Type::Flex(Box::new(Type::String), None)) + ) + ); +} + +named!( + type_spec, + preceded!( + spaces, + alt!( + do_parse!(kw_unsigned >> kw_int >> (Type::UInt)) | + do_parse!(kw_unsigned >> kw_long >> (Type::UInt)) | // backwards compat with rpcgen + do_parse!(kw_unsigned >> kw_char >> // backwards compat with rpcgen + (Type::ident_with_derives("u8", COPY | CLONE | EQ | PARTIALEQ | DEBUG))) | + do_parse!(kw_unsigned >> kw_short >> (Type::UInt)) | // backwards compat with rpcgen + do_parse!(kw_unsigned >> kw_hyper >> (Type::UHyper)) | + kw_unsigned => { |_| Type::UInt } | // backwards compat with rpcgen + kw_long => { |_| Type::Int } | // backwards compat with rpcgen + kw_char => { // backwards compat with rpcgen + |_| Type::ident_with_derives("i8", COPY | CLONE | EQ | PARTIALEQ | DEBUG) + } | + kw_short => { |_| Type::Int } | // backwards compat with rpcgen + kw_int => { |_| Type::Int } | + kw_hyper => { |_| Type::Hyper } | + kw_float => { |_| Type::Float } | + kw_double => { |_| Type::Double } | + kw_quadruple => { |_| Type::Quadruple } | + kw_bool => { |_| Type::Bool } | + enum_type_spec => { |defns| Type::Enum(defns) } | + struct_type_spec => { |defns| Type::Struct(defns) } | + do_parse!(kw_struct >> id:ident >> (Type::ident(id))) | // backwards compat with rpcgen + union_type_spec => { |u| Type::union(u) } | + ident => { |id| Type::ident(id) } + ) + ) +); + +#[test] +fn test_type() { + assert_eq!(type_spec(&b"int "[..]), Done(&b" "[..], Type::Int)); + assert_eq!( + type_spec(&b"unsigned int "[..]), + Done(&b" "[..], Type::UInt) + ); + assert_eq!( + type_spec(&b"unsigned\nint "[..]), + Done(&b" "[..], Type::UInt) + ); + assert_eq!( + type_spec(&b"unsigned/* foo */int "[..]), + Done(&b" "[..], Type::UInt) + ); + assert_eq!( + type_spec(&b"unsigned//\nint "[..]), + Done(&b" "[..], Type::UInt) + ); + + assert_eq!( + type_spec(&b"unsigned hyper "[..]), + Done(&b" "[..], Type::UHyper) + ); + + assert_eq!( + type_spec(&b"unsigned char "[..]), + Done( + &b" "[..], + Type::Ident("u8".into(), Some(COPY | CLONE | EQ | PARTIALEQ | DEBUG)) + ) + ); + assert_eq!( + type_spec(&b"unsigned short "[..]), + Done(&b" "[..], Type::UInt) + ); + + assert_eq!(type_spec(&b" hyper "[..]), Done(&b" "[..], Type::Hyper)); + assert_eq!(type_spec(&b" double "[..]), Done(&b" "[..], Type::Double)); + assert_eq!( + type_spec(&b"// thing\nquadruple "[..]), + Done(&b" "[..], Type::Quadruple) + ); + assert_eq!( + type_spec(&b"// thing\n bool "[..]), + Done(&b" "[..], Type::Bool) + ); + + assert_eq!( + type_spec(&b"char "[..]), + Done( + &b" "[..], + Type::Ident("i8".into(), Some(COPY | CLONE | EQ | PARTIALEQ | DEBUG)) + ) + ); + + assert_eq!(type_spec(&b"short "[..]), Done(&b" "[..], Type::Int)); + + assert_eq!( + type_spec(&b"struct { int a; int b; } "[..]), + Done( + &b" "[..], + Type::Struct(vec!( + Decl::named("a", Type::Int), + Decl::named("b", Type::Int) + )) + ) + ); + + assert_eq!( + type_spec(&b"union switch (int a) { case 1: void; case 2: int a; default: void; } "[..]), + Done( + &b" "[..], + Type::Union( + Box::new(Decl::named("a", Type::Int)), + vec!( + UnionCase(Value::Const(1), Decl::Void), + UnionCase(Value::Const(2), Decl::named("a", Type::Int)) + ), + Some(Box::new(Decl::Void)) + ) + ) + ); +} + +#[test] +fn test_enum() { + assert_eq!( + type_spec(&b"enum { a, b, c } "[..]), + Done( + &b" "[..], + Type::Enum(vec!( + EnumDefn::new("a", None), + EnumDefn::new("b", None), + EnumDefn::new("c", None) + )) + ) + ); + + assert_eq!( + type_spec(&b"enum { a = 1, b, c } "[..]), + Done( + &b" "[..], + Type::Enum(vec!( + EnumDefn::new("a", Some(Value::Const(1))), + EnumDefn::new("b", None), + EnumDefn::new("c", None) + )) + ) + ); + + assert_eq!( + type_spec(&b"enum { a = Bar, b, c } "[..]), + Done( + &b" "[..], + Type::Enum(vec!( + EnumDefn::new("a", Some(Value::ident("Bar"))), + EnumDefn::new("b", None), + EnumDefn::new("c", None) + )) + ) + ); + + assert_eq!( + type_spec(&b"enum { } "[..]), + Error(Err::Position(ErrorKind::Alt, &b"enum { } "[..])) + ); +} + +named!( + const_def, + do_parse!( + kw_const >> id:ident >> eq >> v:number >> semi >> + (Defn::constant(id, v))) +); + +#[test] +fn test_const() { + assert_eq!( + const_def(&b"const foo = 123;"[..]), + Done(&b""[..], Defn::constant("foo", 123)) + ); +} + +named!( + type_def, + alt!( + do_parse!(kw_typedef >> decl: nonvoid_declaration >> semi >> + ({ + match decl.clone() { + Decl::Named(name, ty) => { + if ty.is_syn() { + Defn::typesyn(name, ty) + } else { + Defn::typespec(name, ty) + } + }, + Decl::Void => panic!("void non-void declaration?"), + } + }) + ) | do_parse!(kw_enum >> id:ident >> e:enum_body >> semi >> (Defn::typespec(id, Type::Enum(e)))) + | do_parse!(kw_struct >> id:ident >> s:struct_body >> semi >> (Defn::typespec(id, Type::Struct(s)))) + | do_parse!(kw_union >> id:ident >> u:union_body >> semi >> (Defn::typespec(id, Type::union(u)))) + ) +); + +#[test] +fn test_typedef() { + assert_eq!( + type_def(&b"typedef int foo;"[..]), + Done(&b""[..], Defn::typesyn("foo", Type::Int)) + ); + assert_eq!( + type_def(&b"typedef unsigned int foo;"[..]), + Done(&b""[..], Defn::typesyn("foo", Type::UInt)) + ); + assert_eq!( + type_def(&b"typedef int foo<>;"[..]), + Done( + &b""[..], + Defn::typespec("foo", Type::Flex(Box::new(Type::Int), None)) + ) + ); + + assert_eq!( + type_def(&b"enum foo { a };"[..]), + Done( + &b""[..], + Defn::typespec("foo", Type::Enum(vec!(EnumDefn::new("a", None)))) + ) + ); + + assert_eq!( + type_def(&b"struct foo { int a; };"[..]), + Done( + &b""[..], + Defn::typespec("foo", Type::Struct(vec!(Decl::named("a", Type::Int)))) + ) + ); + + assert_eq!( + type_def(&b"union foo switch(int a) { case 1: int a; };"[..]), + Done( + &b""[..], + Defn::typespec( + "foo", + Type::Union( + Box::new(Decl::named("a", Type::Int)), + vec!(UnionCase(Value::Const(1), Decl::named("a", Type::Int))), + None + ) + ) + ) + ); +} diff --git a/src/proxy/rust-xdr/xdrgen/src/xdrgen.rs b/src/proxy/rust-xdr/xdrgen/src/xdrgen.rs new file mode 100644 index 00000000..d3a90181 --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/src/xdrgen.rs @@ -0,0 +1,42 @@ +#![crate_type = "bin"] + +extern crate clap; +extern crate env_logger; +extern crate xdrgen; + +use std::fs::File; +use std::io::{stderr, stdin, stdout}; +use std::io::{BufReader, Write}; + +use clap::App; + +use xdrgen::generate; + +fn main() { + let _ = env_logger::init(); + + let matches = App::new("XDR code generator") + .version(env!("CARGO_PKG_VERSION")) + .arg_from_usage("[FILE] 'Set .x file'") + .get_matches(); + + let output = stdout(); + let mut err = stderr(); + + let res = if let Some(fname) = matches.value_of("FILE") { + let f = match File::open(fname) { + Ok(f) => f, + Err(e) => { + let _ = writeln!(&mut err, "Failed to open {}: {}", fname, e); + std::process::exit(1); + } + }; + generate(fname, BufReader::new(f), output) + } else { + generate("stdin", BufReader::new(stdin()), output) + }; + + if let Err(e) = res { + let _ = writeln!(&mut err, "Failed: {}", e); + } +} diff --git a/src/proxy/rust-xdr/xdrgen/tests/lib.rs b/src/proxy/rust-xdr/xdrgen/tests/lib.rs new file mode 100644 index 00000000..527778e5 --- /dev/null +++ b/src/proxy/rust-xdr/xdrgen/tests/lib.rs @@ -0,0 +1,321 @@ +extern crate tempdir; +extern crate xdr_codec; +extern crate xdrgen; +#[macro_use] +extern crate error_chain; + +use std::fs::{create_dir_all, File}; +use std::io::{Cursor, Write}; +use std::process::Command; + +use xdr_codec::Result; +use xdrgen::generate; + +fn build_test(name: &str, xdr_spec: &str) -> Result<()> { + let tempdir = tempdir::TempDir::new("build").expect("Failed to make tempdir"); + let dir = tempdir.path(); + + println!("tempdir {:?}", dir); + let _ = create_dir_all(&dir); + + let mainfile = dir.join(format!("{}.rs", name)); + let testfile = dir.join(format!("{}_xdr.rs", name)); + let cargohome = dir.join(".cargo"); + let cargotoml = dir.join("Cargo.toml"); + + let toml = format!( + r#" +[package] +name = "test" +version = "0.0.0" +publish = false + +[lib] +name = "test" +path = "{}" + +[dependencies] +xdr-codec = {{ path = "{}" }} +"#, + mainfile.as_os_str().to_string_lossy(), + std::env::current_dir()? + .join("../xdr-codec") + .as_os_str() + .to_string_lossy() + ); + + let template = format!( + r#" +#![allow(dead_code, non_camel_case_types, unused_assignments, unused_imports)] +extern crate xdr_codec; + +mod test {{ + use xdr_codec; + include!("{}"); +}} + +fn main() {{}} +"#, + testfile.as_os_str().to_string_lossy() + ); + + { + let mut main = File::create(&mainfile)?; + main.write_all(template.as_bytes())?; + } + + { + let mut cargo = File::create(&cargotoml)?; + cargo.write_all(toml.as_bytes())?; + } + + let _ = create_dir_all(&cargohome); + + { + let test = File::create(&testfile)?; + generate(name, Cursor::new(xdr_spec.as_bytes()), test)?; + } + + let compile = { + let mut cmd = Command::new("cargo"); + let cmd = cmd + .current_dir(std::env::current_dir()?) + //.env("CARGO_HOME", cargohome) + .arg("test") + .arg("--manifest-path") + .arg(cargotoml); + println!("CWD: {:?} Command: {:?}", std::env::current_dir(), cmd); + cmd.output()? + }; + + println!( + "stdout: {}\n, stderr: {}", + String::from_utf8_lossy(&compile.stdout), + String::from_utf8_lossy(&compile.stderr) + ); + + if compile.status.success() { + Ok(()) + } else { + bail!("couldn't compile") + } +} + +#[test] +fn typedef_arrays() { + let name = "typedef_arrays"; + let spec = r#" +typedef opaque buf1<20>; +typedef opaque buf2[10]; +typedef opaque buf3<>; +"#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn recursive_type() { + let name = "recursive_type"; + let spec = r#" +struct list { list *next; }; +"#; + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn union_with_default() { + let name = "union_with_default"; + let spec = r#" +union foo switch (int bar) { +case 1: + int val; +default: + void; +}; +"#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn union_default_nonempty() { + let name = "union_default_nonempty"; + let spec = r#" +union foo switch (int bar) { +case 1: + int val; +default: + opaque buf<>; +}; +"#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn simple() { + let name = "simple"; + let specs = vec![ + "struct foo { int bar; unsigned int blat; hyper foo; unsigned hyper hyperfoo; };", + "const blop = 123;", + "typedef opaque Ioaddr<>;", + ]; + + for (i, spec) in specs.into_iter().enumerate() { + let name = format!("{}_{}", name, i); + + if let Err(e) = build_test(&name, spec) { + panic!("test {} failed: {}", name, e); + } + } +} + +#[test] +fn rfc4506() { + let name = "rfc4506"; + let spec = r#" + + const MAXUSERNAME = 32; /* max length of a user name */ + const MAXFILELEN = 65535; /* max length of a file */ + const MAXNAMELEN = 255; /* max length of a file name */ + + /* + * Types of files: + */ + enum filekind { + TEXT = 0, /* ascii data */ + DATA = 1, /* raw data */ + EXEC = 2 /* executable */ + }; + + /* + * File information, per kind of file: + */ + union filetype switch (filekind kind) { + case TEXT: + void; /* no extra information */ + case DATA: + string creator; /* data creator */ + case EXEC: + string interpretor; /* program interpretor */ + }; + + /* + * A complete file: + */ + struct file { + string filename; /* name of file */ + filetype type; /* info about file */ + string owner; /* owner of file */ + opaque data; /* file data */ + }; +"#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn enums() { + let name = "enums"; + let spec = r#" + enum Foo { + A = 0, + B = -1 + }; + struct Bar { Foo x; }; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn unions() { + let name = "unions"; + let spec = r#" + enum Foo { + A = 0, + B = -1 + }; + union foo switch (Foo bar) { + case A: int val; + case B: void; + default: int other; + }; + union foo2 switch (Foo bar) { + case A: void; + case B: int a; + default: int other; + }; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn consts() { + let name = "consts"; + let spec = r#" + const FOO = 1; + const BAR = -1; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn arrays() { + let name = "arrays"; + let spec = r#" + struct a { opaque data[15]; }; + struct b { int things[10]; }; + struct c { string decitweet[14]; }; + struct d { c tweetses[10]; }; + struct big { c tweetses[100]; }; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn flex() { + let name = "flex"; + let spec = r#" + struct a { opaque data<>; opaque limdata<15>; }; + struct b { string s<>; string limstr<32>; }; + struct c { a athing<>; a alim<10>; }; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} + +#[test] +fn derive_float() { + let name = "derive_float"; + let spec = r#" + struct a { float a; double b; }; + "#; + + if let Err(e) = build_test(name, spec) { + panic!("test {} failed: {}", name, e); + } +} diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index 71ef204e..c3ff1baf 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.3.3" +VERSION = "2.4.0" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -184,6 +184,12 @@ EFS_PROXY_BIN = "efs-proxy" +OPTIMIZE_READAHEAD_ITEM = "optimize_readahead" +DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER = 15 +NFS_READAHEAD_CONFIG_PATH_FORMAT = "/sys/class/bdi/%s:%s/read_ahead_kb" +DEFAULT_RSIZE = 1048576 +UBUNTU_24_RELEASE = "Ubuntu 24" + def fatal_error(user_message, log_message=None): if log_message is None: @@ -1265,6 +1271,7 @@ def check_efs_mounts( # Set unmount count to 0 if there were inconsistent reads state["unmount_count"] = 0 rewrite_state_file(state, state_file_dir, state_file) + if "certificate" in state: check_certificate(config, state, state_file_dir, state_file) @@ -1278,6 +1285,107 @@ def check_efs_mounts( logging.warning("TLS tunnel for %s is not running", state_file) restart_tls_tunnel(child_procs, state, state_file_dir, state_file) + verify_and_update_readahead( + nfs_mounts[mount].mountpoint, config, nfs_mounts[mount] + ) + + +# This function serves as a safeguard mechanism specifically for Ubuntu 24, +# where the initial readahead setting might be overwritten due to system +# processes. It checks the current readahead value and updates it if necessary. +def verify_and_update_readahead(mount, config, mount_info): + try: + system_release_version = get_system_release_version() + if UBUNTU_24_RELEASE not in system_release_version: + return + + should_optimize_readahead = get_boolean_config_item_value( + config, MOUNT_CONFIG_SECTION, OPTIMIZE_READAHEAD_ITEM, default_value=False + ) + if not should_optimize_readahead: + return + + # Use subprocess with timeout to get device number to avoid hanging on os.stat() + # when NFS mount is stuck (e.g., security group blocks traffic) + process = subprocess.Popen( + ["stat", "-c", "%d", mount], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) + try: + stdout, _ = process.communicate(timeout=2) + device_number = int(stdout.decode().strip()) + major, minor = decode_device_number(device_number) + except subprocess.TimeoutExpired: + process.kill() + logging.warning( + "Timeout getting device number for %s, skipping readahead check", mount + ) + return + except Exception as e: + logging.warning("Failed to get device number for %s: %s", mount, e) + return + + read_ahead_kb_config_file = NFS_READAHEAD_CONFIG_PATH_FORMAT % (major, minor) + + # Use subprocess with timeout to read sysfs to avoid hanging when kernel is stuck + process = subprocess.Popen( + ["cat", read_ahead_kb_config_file], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) + try: + stdout, _ = process.communicate(timeout=2) + current_readahead_kb = int(stdout.strip()) + except subprocess.TimeoutExpired: + process.kill() + logging.warning("Timeout reading readahead for %s, skipping", mount) + return + except Exception as e: + logging.warning("Failed to read readahead for %s: %s", mount, e) + return + + opts = mount_info.options + rsize = DEFAULT_RSIZE + for opt in opts.split(","): + if opt.startswith("rsize="): + rsize = int(opt.split("=")[1]) + break + + expected_readahead_kb = int(DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER * rsize / 1024) + + if current_readahead_kb != expected_readahead_kb: + logging.info( + "Readahead value incorrect for %s. Expected: %d, Current: %d. Updating...", + mount, + expected_readahead_kb, + current_readahead_kb, + ) + p = subprocess.Popen( + "echo %s > %s" % (expected_readahead_kb, read_ahead_kb_config_file), + shell=True, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL, + ) + _, error = p.communicate() + if p.returncode != 0: + logging.warning( + 'Failed to modify read_ahead_kb: %s with returncode: %d, error: "%s".' + % (expected_readahead_kb, p.returncode, error.strip()) + ) + + except Exception as e: + logging.warning("Failed to verify/update readahead for %s: %s", mount, str(e)) + + +# https://github.com/torvalds/linux/blob/master/include/linux/kdev_t.h#L48-L49 +def decode_device_number(device_number): + major = (device_number & 0xFFF00) >> 8 + minor = (device_number & 0xFF) | ((device_number >> 12) & 0xFFF00) + return major, minor + def check_stunnel_health( config, state, state_file_dir, state_file, child_procs, nfs_mounts diff --git a/test/mount_efs_test/test_get_aws_security_credentials.py b/test/mount_efs_test/test_get_aws_security_credentials.py index 3e75014e..daba5fec 100644 --- a/test/mount_efs_test/test_get_aws_security_credentials.py +++ b/test/mount_efs_test/test_get_aws_security_credentials.py @@ -583,6 +583,6 @@ def test_get_aws_security_credentials_pod_identity_invalid_token_file(mocker): mocker.patch("builtins.open", side_effect=IOError("File not found")) with pytest.raises(SystemExit) as ex: - mount_efs.get_aws_security_credentials(config, True, "us-east-1") + mount_efs.get_aws_security_credentials_from_pod_identity(config, True) assert ex.value.code == 1 diff --git a/test/mount_efs_test/test_get_nfs_mount_options.py b/test/mount_efs_test/test_get_nfs_mount_options.py index cf7d1d5d..e114103a 100644 --- a/test/mount_efs_test/test_get_nfs_mount_options.py +++ b/test/mount_efs_test/test_get_nfs_mount_options.py @@ -147,14 +147,6 @@ def test_tlsport(): assert "tls" not in nfs_opts -def test_fsap_efs_only(): - options = dict(DEFAULT_OPTIONS) - options["fsap"] = None - nfs_opts = mount_efs.get_nfs_mount_options(options, _get_config()) - - assert "fsap" not in nfs_opts - - def test_get_default_nfs_mount_options_macos(mocker): mocker.patch("mount_efs.check_if_platform_is_mac", return_value=True) nfs_opts = mount_efs.get_nfs_mount_options(dict(DEFAULT_OPTIONS), _get_config()) diff --git a/test/mount_efs_test/test_optimize_readahead_window.py b/test/mount_efs_test/test_optimize_readahead_window.py index a1a70712..b0ddfc5d 100644 --- a/test/mount_efs_test/test_optimize_readahead_window.py +++ b/test/mount_efs_test/test_optimize_readahead_window.py @@ -7,6 +7,7 @@ # import os import subprocess +import time import mount_efs @@ -185,6 +186,42 @@ def test_optimize_readahead_should_apply_failed_with_exception(mocker, tmpdir): ) +def test_optimize_readahead_ubuntu_24(mocker, tmpdir): + mock_config = _get_new_mock_config(enable_optimize_readahead=True) + _mock_should_revise_readahead(mocker, True) + mocker.patch( + "mount_efs.get_system_release_version", return_value="Ubuntu 24.04 LTS" + ) + mocker.patch( + "mount_efs.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + mocker.patch( + "os.stat", + return_value=generate_os_stat_result(st_dev=DEFAULT_MOUNT_DEVICE_NUMBER), + ) + + expected_major, expected_minor = mount_efs.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + mount_efs.optimize_readahead_window(MOUNT_POINT, DEFAULT_OPTIONS, mock_config) + + expected_readahead_kb_value = int( + mount_efs.DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER + * int(DEFAULT_OPTIONS["rsize"]) + / 1024 + ) + + # Check if the value was set correctly after the delay + time.sleep(3) + with open( + str(tmpdir) + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) as file: + assert expected_readahead_kb_value == int(file.read().strip()) + + def generate_os_stat_result( st_mode=0, st_ino=0, diff --git a/test/watchdog_test/test_verify_and_optimize_readahead.py b/test/watchdog_test/test_verify_and_optimize_readahead.py new file mode 100644 index 00000000..c20b1fba --- /dev/null +++ b/test/watchdog_test/test_verify_and_optimize_readahead.py @@ -0,0 +1,432 @@ +import os +import subprocess +from collections import namedtuple +from unittest.mock import MagicMock, patch + +import watchdog + +# Constants +MOUNT_POINT = "/mnt/efs" +UBUNTU_24_RELEASE = "Ubuntu 24" +DEFAULT_RSIZE = 1048576 +DEFAULT_MOUNT_DEVICE_NUMBER = 1048761 +DEFAULT_NFS_MAX_READAHEAD_MULTIPLIER = 15 +NFS_READAHEAD_CONFIG_PATH_FORMAT = "/sys/class/bdi/%s:%s/read_ahead_kb" +MOUNT_CONFIG_SECTION = "mount" +OPTIMIZE_READAHEAD_ITEM = "optimize_readahead" + +# Mock Mount namedtuple +Mount = namedtuple( + "Mount", ["server", "mountpoint", "type", "options", "freq", "passno"] +) + + +def test_verify_and_update_readahead_ubuntu_24(mocker, tmpdir): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + + expected_major, expected_minor = watchdog.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + # Set up mock file for read_ahead_kb + read_ahead_file = tmpdir.join( + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) + read_ahead_file.write("128") # Initial incorrect value + + # Call the function + mock_stat_process = MagicMock() + mock_stat_process.communicate.return_value = ( + str(DEFAULT_MOUNT_DEVICE_NUMBER).encode(), + b"", + ) + mock_stat_process.returncode = 0 + + mock_cat_process = MagicMock() + mock_cat_process.communicate.return_value = (b"128", b"") + mock_cat_process.returncode = 0 + + mock_echo_process = MagicMock() + mock_echo_process.returncode = 0 + + def echo_communicate(*args, **kwargs): + read_ahead_file.write("15360") + return (b"", b"") + + mock_echo_process.communicate = echo_communicate + + def mock_popen(*args, **kwargs): + if len(args) > 0 and len(args[0]) > 0 and args[0][0] == "stat": + return mock_stat_process + elif len(args) > 0 and len(args[0]) > 0 and args[0][0] == "cat": + return mock_cat_process + elif kwargs.get("shell"): + return mock_echo_process + return MagicMock() + + mocker.patch("subprocess.Popen", side_effect=mock_popen) + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the value was updated + assert read_ahead_file.read().strip() == "15360" # Expected value for 1048576 rsize + + +def test_verify_and_update_readahead_non_ubuntu_24(mocker, tmpdir): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value="Ubuntu 22.04") + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + + expected_major, expected_minor = watchdog.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + # Set up mock file for read_ahead_kb + read_ahead_file = tmpdir.join( + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) + read_ahead_file.write("128") # Initial incorrect value + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the value was not updated + assert read_ahead_file.read().strip() == "128" + + +def test_verify_and_update_readahead_optimization_disabled(mocker, tmpdir): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=False) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + + expected_major, expected_minor = watchdog.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + # Set up mock file for read_ahead_kb + read_ahead_file = tmpdir.join( + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) + read_ahead_file.write("128") # Initial incorrect value + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the value was not updated + assert read_ahead_file.read().strip() == "128" + + +def test_verify_and_update_readahead_exception_handling(mocker, tmpdir, caplog): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + mocker.patch("subprocess.Popen", side_effect=Exception("Test exception")) + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the exception was logged + assert ( + "Failed to verify/update readahead for /mnt/efs: Test exception" in caplog.text + ) + + +def test_verify_and_update_readahead_stat_timeout(mocker, caplog): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + + # Mock subprocess.Popen to raise TimeoutExpired for stat command + mock_process = MagicMock() + mock_process.communicate.side_effect = subprocess.TimeoutExpired("stat", 2) + mocker.patch("subprocess.Popen", return_value=mock_process) + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that timeout was logged and process was killed + assert ( + "Timeout getting device number for /mnt/efs, skipping readahead check" + in caplog.text + ) + mock_process.kill.assert_called_once() + + +def test_verify_and_update_readahead_stat_exception(mocker, caplog): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + + # Mock subprocess.Popen to raise exception for stat command + mock_process = MagicMock() + mock_process.communicate.side_effect = ValueError("Invalid device") + mocker.patch("subprocess.Popen", return_value=mock_process) + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that exception was logged + assert "Failed to get device number for /mnt/efs: Invalid device" in caplog.text + + +def test_verify_and_update_readahead_cat_timeout(mocker, tmpdir, caplog): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + + # Mock stat command to succeed, cat command to timeout + mock_stat_process = MagicMock() + mock_stat_process.communicate.return_value = ( + str(DEFAULT_MOUNT_DEVICE_NUMBER).encode(), + b"", + ) + mock_stat_process.returncode = 0 + + mock_cat_process = MagicMock() + mock_cat_process.communicate.side_effect = subprocess.TimeoutExpired("cat", 2) + + def mock_popen(*args, **kwargs): + if len(args) > 0 and len(args[0]) > 0 and args[0][0] == "stat": + return mock_stat_process + elif len(args) > 0 and len(args[0]) > 0 and args[0][0] == "cat": + return mock_cat_process + return MagicMock() + + mocker.patch("subprocess.Popen", side_effect=mock_popen) + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that timeout was logged and process was killed + assert "Timeout reading readahead for /mnt/efs, skipping" in caplog.text + mock_cat_process.kill.assert_called_once() + + +def test_verify_and_update_readahead_cat_exception(mocker, tmpdir, caplog): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount("server", MOUNT_POINT, "nfs4", "rsize=1048576", "0", "0") + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + + # Mock stat command to succeed, cat command to raise exception + mock_stat_process = MagicMock() + mock_stat_process.communicate.return_value = ( + str(DEFAULT_MOUNT_DEVICE_NUMBER).encode(), + b"", + ) + mock_stat_process.returncode = 0 + + mock_cat_process = MagicMock() + mock_cat_process.communicate.side_effect = IOError("Permission denied") + + def mock_popen(*args, **kwargs): + if len(args) > 0 and len(args[0]) > 0 and args[0][0] == "stat": + return mock_stat_process + elif len(args) > 0 and len(args[0]) > 0 and args[0][0] == "cat": + return mock_cat_process + return MagicMock() + + mocker.patch("subprocess.Popen", side_effect=mock_popen) + + # Call the function + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that exception was logged + assert "Failed to read readahead for /mnt/efs: Permission denied" in caplog.text + + +def test_verify_and_update_readahead_custom_rsize(mocker, tmpdir): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount( + "server", MOUNT_POINT, "nfs4", "rsize=2097152", "0", "0" + ) # Custom rsize + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + + expected_major, expected_minor = watchdog.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + # Set up mock file for read_ahead_kb + read_ahead_file = tmpdir.join( + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) + read_ahead_file.write("128") # Initial incorrect value + + # Mock processes + mock_stat_process = MagicMock() + mock_stat_process.communicate.return_value = ( + str(DEFAULT_MOUNT_DEVICE_NUMBER).encode(), + b"", + ) + mock_stat_process.returncode = 0 + + mock_cat_process = MagicMock() + mock_cat_process.communicate.return_value = (b"128", b"") + mock_cat_process.returncode = 0 + + mock_echo_process = MagicMock() + mock_echo_process.returncode = 0 + + def echo_communicate(*args, **kwargs): + # Expected readahead for rsize=2097152: 15 * 2097152 / 1024 = 30720 + read_ahead_file.write("30720") + return (b"", b"") + + mock_echo_process.communicate = echo_communicate + + def mock_popen(*args, **kwargs): + if len(args) > 0 and len(args[0]) > 0 and args[0][0] == "stat": + return mock_stat_process + elif len(args) > 0 and len(args[0]) > 0 and args[0][0] == "cat": + return mock_cat_process + elif kwargs.get("shell"): + return mock_echo_process + return MagicMock() + + mocker.patch("subprocess.Popen", side_effect=mock_popen) + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the value was updated to correct value for custom rsize + assert read_ahead_file.read().strip() == "30720" # Expected value for 2097152 rsize + + +def test_verify_and_update_readahead_no_rsize_option(mocker, tmpdir): + # Mock necessary functions and objects + mock_config = MagicMock() + mock_mount_info = Mount( + "server", MOUNT_POINT, "nfs4", "proto=tcp,vers=4.1", "0", "0" + ) # No rsize + + mocker.patch("watchdog.get_boolean_config_item_value", return_value=True) + mocker.patch("watchdog.get_system_release_version", return_value=UBUNTU_24_RELEASE) + mocker.patch( + "watchdog.NFS_READAHEAD_CONFIG_PATH_FORMAT", + str(tmpdir) + "/%s:%s/read_ahead_kb", + ) + + expected_major, expected_minor = watchdog.decode_device_number( + DEFAULT_MOUNT_DEVICE_NUMBER + ) + os.mkdir(str(tmpdir) + "/%s:%s" % (expected_major, expected_minor)) + + # Set up mock file for read_ahead_kb + read_ahead_file = tmpdir.join( + "/%s:%s/read_ahead_kb" % (expected_major, expected_minor) + ) + read_ahead_file.write("128") # Initial incorrect value + + # Mock processes + mock_stat_process = MagicMock() + mock_stat_process.communicate.return_value = ( + str(DEFAULT_MOUNT_DEVICE_NUMBER).encode(), + b"", + ) + mock_stat_process.returncode = 0 + + mock_cat_process = MagicMock() + mock_cat_process.communicate.return_value = (b"128", b"") + mock_cat_process.returncode = 0 + + mock_echo_process = MagicMock() + mock_echo_process.returncode = 0 + + def echo_communicate(*args, **kwargs): + # Expected readahead for default rsize=1048576: 15 * 1048576 / 1024 = 15360 + read_ahead_file.write("15360") + return (b"", b"") + + mock_echo_process.communicate = echo_communicate + + def mock_popen(*args, **kwargs): + if len(args) > 0 and len(args[0]) > 0 and args[0][0] == "stat": + return mock_stat_process + elif len(args) > 0 and len(args[0]) > 0 and args[0][0] == "cat": + return mock_cat_process + elif kwargs.get("shell"): + return mock_echo_process + return MagicMock() + + mocker.patch("subprocess.Popen", side_effect=mock_popen) + watchdog.verify_and_update_readahead(MOUNT_POINT, mock_config, mock_mount_info) + + # Assert that the value was updated using default rsize + assert read_ahead_file.read().strip() == "15360" # Expected value for default rsize + + +def generate_os_stat_result( + st_mode=0, + st_ino=0, + st_dev=0, + st_nlink=0, + st_uid=0, + st_gid=0, + st_size=0, + st_atime=0, + st_mtime=0, + st_ctime=0, +): + return os.stat_result( + ( + st_mode, + st_ino, + st_dev, + st_nlink, + st_uid, + st_gid, + st_size, + st_atime, + st_mtime, + st_ctime, + ) + ) From de938b624391bed196d20a8ed2ef3258bb10d9c4 Mon Sep 17 00:00:00 2001 From: Anthony Tse Date: Thu, 20 Nov 2025 18:05:27 +0000 Subject: [PATCH 44/51] efs-utils v2.4.1-1 release --- amazon-efs-utils.spec | 5 ++++- build-deb.sh | 2 +- config.ini | 2 +- dist/efs-utils.conf | 2 +- src/mount_efs/__init__.py | 2 +- src/proxy/Cargo.lock | 2 +- src/proxy/Cargo.toml | 2 +- src/watchdog/__init__.py | 2 +- 8 files changed, 11 insertions(+), 8 deletions(-) diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 5eca2886..46fe2de6 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.4.0 +Version : 2.4.1 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -196,6 +196,9 @@ fi %clean %changelog +* Thu Nov 20 2025 Anthony Tse - 2.4.1 +- Add cafile override for eusc-de-east-1 in efs-utils.conf + * Fri Oct 03 2025 Yangjinan Hu - 2.4.0 - Upgrade s2n-tls version in efs-proxy to use AWS-LC - Add ubuntu24 and macOS Tahoe support efs-utils diff --git a/build-deb.sh b/build-deb.sh index 2eef495a..93cdee72 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.4.0 +VERSION=2.4.1 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index f2e9b000..90025d23 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.4.0 +version=2.4.1 release=1 diff --git a/dist/efs-utils.conf b/dist/efs-utils.conf index 62382885..36e0f66e 100644 --- a/dist/efs-utils.conf +++ b/dist/efs-utils.conf @@ -68,7 +68,7 @@ stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem [mount.eusc-de-east-1] dns_name_suffix = amazonaws.eu - +stunnel_cafile = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem [mount.us-iso-east-1] dns_name_suffix = c2s.ic.gov diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 968c2738..3b20238f 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -86,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.4.0" +VERSION = "2.4.1" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index 8c7a20a1..66cf7425 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -337,7 +337,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "efs-proxy" -version = "2.4.0" +version = "2.4.1" dependencies = [ "anyhow", "async-trait", diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index 32930c61..cb469919 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.4.0" +version = "2.4.1" publish = false license = "MIT" diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index c3ff1baf..f66e7fb4 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.4.0" +VERSION = "2.4.1" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" From 413b60de02c706684d28af48aa033d460bf058ef Mon Sep 17 00:00:00 2001 From: Zachary Maguire Date: Wed, 3 Dec 2025 20:55:27 +0000 Subject: [PATCH 45/51] Fix log retention logic commenting out "retention_in_days = 14" in config should prevent log deletion, but was not behaving that way Github issue: https://github.com/aws/efs-utils/issues/318 --- src/mount_efs/__init__.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 3b20238f..e32f3836 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -107,7 +107,6 @@ CLOUDWATCH_LOG_SECTION = "cloudwatch-log" DEFAULT_CLOUDWATCH_LOG_GROUP = "/aws/efs/utils" DEFAULT_FALLBACK_ENABLED = True -DEFAULT_RETENTION_DAYS = 14 DEFAULT_UNKNOWN_VALUE = "unknown" # 50ms DEFAULT_TIMEOUT = 0.05 @@ -3539,7 +3538,7 @@ def get_cloudwatchlog_config(config, fs_id=None): ) logging.debug("Pushing logs to log group named %s in Cloudwatch.", log_group_name) - retention_days = DEFAULT_RETENTION_DAYS + retention_days = None if config.has_option(CLOUDWATCH_LOG_SECTION, "retention_in_days"): retention_days = config.get(CLOUDWATCH_LOG_SECTION, "retention_in_days") @@ -3547,7 +3546,7 @@ def get_cloudwatchlog_config(config, fs_id=None): return { "log_group_name": log_group_name, - "retention_days": int(retention_days), + "retention_days": None if retention_days is None else int(retention_days), "log_stream_name": log_stream_name, } @@ -3644,11 +3643,13 @@ def create_cloudwatch_log_group(cloudwatchlog_client, log_group_name): def cloudwatch_put_retention_policy_helper( cloudwatchlog_client, log_group_name, retention_days ): - cloudwatchlog_client.put_retention_policy( - logGroupName=log_group_name, retentionInDays=retention_days - ) - logging.debug("Set cloudwatch log group retention days to %s" % retention_days) - + if retention_days is not None: + cloudwatchlog_client.put_retention_policy( + logGroupName=log_group_name, retentionInDays=retention_days + ) + logging.debug("Set cloudwatch log group retention days to %s" % retention_days) + else: + cloudwatchlog_client.delete_retention_policy(logGroupName=log_group_name) def put_cloudwatch_log_retention_policy( cloudwatchlog_client, log_group_name, retention_days From fde8027accd4f70d7339329b7dda8bd3b54127b2 Mon Sep 17 00:00:00 2001 From: Zachary Maguire Date: Wed, 3 Dec 2025 21:35:45 +0000 Subject: [PATCH 46/51] Fix whitespace for circleCI --- src/mount_efs/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index e32f3836..faba243a 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -3651,6 +3651,7 @@ def cloudwatch_put_retention_policy_helper( else: cloudwatchlog_client.delete_retention_policy(logGroupName=log_group_name) + def put_cloudwatch_log_retention_policy( cloudwatchlog_client, log_group_name, retention_days ): From 4183473650d78a18befc9a103e447c34d93c59c4 Mon Sep 17 00:00:00 2001 From: David Xu Date: Tue, 23 Dec 2025 21:09:19 +0000 Subject: [PATCH 47/51] Add slack notification workflow for issues and PRs --- .github/workflows/notify-slack.yaml | 65 +++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 .github/workflows/notify-slack.yaml diff --git a/.github/workflows/notify-slack.yaml b/.github/workflows/notify-slack.yaml new file mode 100644 index 00000000..f6d7d403 --- /dev/null +++ b/.github/workflows/notify-slack.yaml @@ -0,0 +1,65 @@ +name: Slack Notifications + +on: + issues: + types: [opened, reopened, edited] + issue_comment: + types: [created, edited] + pull_request_target: + types: [opened, reopened, synchronize] + +permissions: + contents: read + +jobs: + notify: + runs-on: ubuntu-latest + steps: + - name: Send issue notification to Slack + if: github.event_name == 'issues' + uses: slackapi/slack-github-action@v2.1.1 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + { + "type": "issue_update", + "action": "${{ github.event.action }}", + "number": ${{ github.event.issue.number }}, + "title": "${{ github.event.issue.title }}", + "url": "${{ github.event.issue.html_url }}", + "user": "${{ github.event.issue.user.login }}" + } + + - name: Send pull request notification to Slack + if: github.event_name == 'pull_request_target' + uses: slackapi/slack-github-action@v2.1.1 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + { + "type": "pr_update", + "action": "${{ github.event.action }}", + "number": ${{ github.event.pull_request.number }}, + "title": "${{ github.event.pull_request.title }}", + "url": "${{ github.event.pull_request.html_url }}", + "user": "${{ github.event.pull_request.user.login }}" + } + + - name: Send issue comments notification to Slack + if: github.event_name == 'issue_comment' && !github.event.issue.pull_request + uses: slackapi/slack-github-action@v2.1.1 + with: + webhook: ${{ secrets.SLACK_WEBHOOK_URL }} + webhook-type: incoming-webhook + payload: | + { + "type": "issue_comment", + "action": "${{ github.event.action }}", + "number": ${{ github.event.issue.number }}, + "title": "${{ github.event.issue.title }}", + "url": "${{ github.event.comment.html_url }}", + "body": "${{ github.event.comment.body }}", + "user": "${{ github.event.comment.user.login }}" + } From be26e892fd19e0c542cc3007e2e609ad5bae6c20 Mon Sep 17 00:00:00 2001 From: Arron Norwell Date: Mon, 23 Feb 2026 17:01:02 +0000 Subject: [PATCH 48/51] Fix EFS_FQDN_RE to support ADC DNS suffixes with hyphens The dns_name_suffix capture group in EFS_FQDN_RE used [a-z0-9.] which does not match hyphens. ADC regions like NCL use DNS suffix 'cloud.adc-e.uk' which contains a hyphen, causing mount.efs to reject the FQDN with 'did not resolve to a valid DNS name for an EFS mount target'. Add hyphen to the character class: [a-z0-9.-] --- src/mount_efs/__init__.py | 2 +- test/mount_efs_test/test_match_device.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index faba243a..9b5fe51b 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -190,7 +190,7 @@ FS_ID_RE = re.compile("^(?Pfs-[0-9a-f]+)$") EFS_FQDN_RE = re.compile( r"^((?P[a-z0-9-]+)\.)?(?Pfs-[0-9a-f]+)\.(?:[a-z-]+\.)+" - r"(?P[a-z0-9-]+)\.(?P[a-z0-9.]+)$" + r"(?P[a-z0-9-]+)\.(?P[a-z0-9.-]+)$" ) AP_ID_RE = re.compile("^fsap-[0-9a-f]{17}$") diff --git a/test/mount_efs_test/test_match_device.py b/test/mount_efs_test/test_match_device.py index 9eccea4f..af06baf4 100644 --- a/test/mount_efs_test/test_match_device.py +++ b/test/mount_efs_test/test_match_device.py @@ -118,6 +118,26 @@ def test_match_device_correct_descriptors_cname_dns_suffix_override_region(mocke utils.assert_called(gethostbyname_ex_mock) +def test_match_device_correct_descriptors_cname_dns_adc_suffix(mocker): + """ADC regions use DNS suffixes with hyphens (e.g. cloud.adc-e.uk)""" + adc_dns_name = "fs-deadbeef.efs.eu-isoe-west-1.cloud.adc-e.uk" + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(adc_dns_name, None), + ) + gethostbyname_ex_mock = mocker.patch( + "socket.gethostbyname_ex", + return_value=(adc_dns_name, [], None), + ) + config = _get_mock_config(dns_name_suffix="cloud.adc-e.uk") + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(gethostbyname_ex_mock) + + def test_match_device_correct_descriptors_cname_dns_primary(mocker): get_dns_name_mock = mocker.patch( "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", From 1567e44df4690826609ee60ec73ecffd20fe0edc Mon Sep 17 00:00:00 2001 From: samuhale Date: Wed, 4 Mar 2026 15:26:20 +0000 Subject: [PATCH 49/51] efs-utils v2.4.2-1 release --- amazon-efs-utils.spec | 9 +- build-deb.sh | 2 +- config.ini | 2 +- src/mount_efs/__init__.py | 33 +- src/proxy/Cargo.lock | 2 +- src/proxy/Cargo.toml | 3 +- src/proxy/build.rs | 2 - src/proxy/rust-xdr/xdrgen/src/spec/mod.rs | 6 +- src/proxy/src/config_parser.rs | 12 + src/proxy/src/efs_prot.x | 7 - src/proxy/src/lib.rs | 1 + src/proxy/src/log_encoder.rs | 132 +++ src/proxy/src/logger.rs | 215 ++++- src/proxy/src/main.rs | 5 +- src/proxy/src/proxy_identifier.rs | 8 +- src/watchdog/__init__.py | 11 +- test/common.py | 8 + test/mount_efs_test/test_match_device.py | 905 ++++++++++-------- test/mount_efs_test/test_mount_nfs.py | 44 + .../test_write_stunnel_config_file.py | 21 + 20 files changed, 949 insertions(+), 479 deletions(-) create mode 100644 src/proxy/src/log_encoder.rs diff --git a/amazon-efs-utils.spec b/amazon-efs-utils.spec index 46fe2de6..d2c20c77 100644 --- a/amazon-efs-utils.spec +++ b/amazon-efs-utils.spec @@ -41,7 +41,7 @@ %{?!include_vendor_tarball:%define include_vendor_tarball true} Name : amazon-efs-utils -Version : 2.4.1 +Version : 2.4.2 Release : 1%{platform} Summary : This package provides utilities for simplifying the use of EFS file systems @@ -196,6 +196,13 @@ fi %clean %changelog +* Tue Dec 23 2025 Samuel Hale - 2.4.2 +- Skip stunnel binary invocation when efs-proxy mode is enabled +- Retry "access denied" only for access point mounting +- Fix issue for missing PATH in env when check stunnel lib +- Fix EFS_FQDN_RE to support ADC DNS suffixes with hyphens +- Fix IPv6-only mount target FQDN resolution in match_device + * Thu Nov 20 2025 Anthony Tse - 2.4.1 - Add cafile override for eusc-de-east-1 in efs-utils.conf diff --git a/build-deb.sh b/build-deb.sh index 93cdee72..a62a3944 100755 --- a/build-deb.sh +++ b/build-deb.sh @@ -11,7 +11,7 @@ set -ex BASE_DIR=$(pwd) BUILD_ROOT=${BASE_DIR}/build/debbuild -VERSION=2.4.1 +VERSION=2.4.2 RELEASE=1 ARCH=$(dpkg --print-architecture) DEB_SYSTEM_RELEASE_PATH=/etc/os-release diff --git a/config.ini b/config.ini index 90025d23..19ddb2e0 100644 --- a/config.ini +++ b/config.ini @@ -7,5 +7,5 @@ # [global] -version=2.4.1 +version=2.4.2 release=1 diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 9b5fe51b..30aa5336 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -86,7 +86,7 @@ BOTOCORE_PRESENT = False -VERSION = "2.4.1" +VERSION = "2.4.2" SERVICE = "elasticfilesystem" AMAZON_LINUX_2_RELEASE_ID = "Amazon Linux release 2 (Karoo)" @@ -1425,10 +1425,15 @@ def find_command_path(command, install_method): # For more information, see https://brew.sh/2021/02/05/homebrew-3.0.0/ else: env_path = "/opt/homebrew/bin:/usr/local/bin" - os.putenv("PATH", env_path) + + existing_path = os.environ.get("PATH", "") + search_path = env_path + ":" + existing_path if existing_path else env_path + + env = os.environ.copy() + env["PATH"] = search_path try: - path = subprocess.check_output(["which", command]) + path = subprocess.check_output(["which", command], env=env) return path.strip().decode() except subprocess.CalledProcessError as e: fatal_error( @@ -1479,7 +1484,7 @@ def write_stunnel_config_file( hand-serialize it. """ - stunnel_options = get_stunnel_options() + stunnel_options = [] if efs_proxy_enabled else get_stunnel_options() mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port) system_release_version = get_system_release_version() @@ -2223,9 +2228,15 @@ def backoff_function(i): out, err = proc.communicate(timeout=retry_nfs_mount_command_timeout_sec) rc = proc.poll() if rc != 0: + is_access_point_mount = "accesspoint" in options continue_retry = any( error_string in str(err) for error_string in RETRYABLE_ERRORS ) + + # Only retry "access denied" for access point mounts, handles race condition that can occur during AP backend provisioning + if not continue_retry and "access denied by server" in str(err): + continue_retry = is_access_point_mount + if continue_retry: logging.error( 'Mounting %s to %s failed, return code=%s, stdout="%s", stderr="%s", mount attempt %d/%d, ' @@ -3223,8 +3234,18 @@ def match_device(config, device, options): return remote, path, None try: - primary, secondaries, _ = socket.gethostbyname_ex(remote) - hostnames = list(filter(lambda e: e is not None, [primary] + secondaries)) + addrinfo = socket.getaddrinfo( + remote, None, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_CANONNAME + ) + hostnames = list( + set( + filter( + lambda e: e is not None and e != "", [info[3] for info in addrinfo] + ) + ) + ) + if not hostnames: + hostnames = [remote] except socket.gaierror: create_default_cloudwatchlog_agent_if_not_exist(config, options) fatal_error( diff --git a/src/proxy/Cargo.lock b/src/proxy/Cargo.lock index 66cf7425..202db8f4 100644 --- a/src/proxy/Cargo.lock +++ b/src/proxy/Cargo.lock @@ -337,7 +337,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "efs-proxy" -version = "2.4.1" +version = "2.4.2" dependencies = [ "anyhow", "async-trait", diff --git a/src/proxy/Cargo.toml b/src/proxy/Cargo.toml index cb469919..b5c020f2 100644 --- a/src/proxy/Cargo.toml +++ b/src/proxy/Cargo.toml @@ -3,7 +3,7 @@ name = "efs-proxy" edition = "2021" build = "build.rs" # The version of efs-proxy is tied to efs-utils. -version = "2.4.1" +version = "2.4.2" publish = false license = "MIT" @@ -34,6 +34,7 @@ xdr-codec = { path = "rust-xdr/xdr-codec"} test-case = "*" tokio = { version = "1.29.0", features = ["test-util"] } tempfile = "3.10.1" +regex = "1.10.2" [build-dependencies] xdrgen = { path = "rust-xdr/xdrgen" } diff --git a/src/proxy/build.rs b/src/proxy/build.rs index dbc10216..81bcde27 100644 --- a/src/proxy/build.rs +++ b/src/proxy/build.rs @@ -1,5 +1,3 @@ -use xdrgen; - fn main() { xdrgen::compile("src/efs_prot.x").expect("xdrgen efs_prot.x failed"); } diff --git a/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs b/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs index e259ae08..15b6bb67 100644 --- a/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs +++ b/src/proxy/rust-xdr/xdrgen/src/spec/mod.rs @@ -1073,15 +1073,15 @@ impl Symtab { } } - pub fn constants(&self) -> Iter)> { + pub fn constants(&self) -> Iter<'_, String, (i64, Option)> { self.consts.iter() } - pub fn typespecs(&self) -> Iter { + pub fn typespecs(&self) -> Iter<'_, String, Type> { self.typespecs.iter() } - pub fn typesyns(&self) -> Iter { + pub fn typesyns(&self) -> Iter<'_, String, Type> { self.typesyns.iter() } } diff --git a/src/proxy/src/config_parser.rs b/src/proxy/src/config_parser.rs index 0a49fb14..0a367a3a 100644 --- a/src/proxy/src/config_parser.rs +++ b/src/proxy/src/config_parser.rs @@ -20,6 +20,10 @@ where } } +fn default_log_format() -> Option { + Some("file".to_string()) +} + #[derive(Default, Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct ProxyConfig { #[serde(alias = "fips", deserialize_with = "deserialize_bool")] @@ -33,6 +37,11 @@ pub struct ProxyConfig { #[serde(alias = "output")] pub output: Option, + /// The format to use for logging. Values can be "file", "stdout" + /// Default is "file" if not specified. + #[serde(alias = "log_format", default = "default_log_format")] + pub log_format: Option, + /// The proxy process is responsible for writing it's PID into this file so that the Watchdog /// process can monitor it #[serde(alias = "pid")] @@ -136,6 +145,7 @@ checkHost = fs-12341234.efs.us-east-1.amazonaws.com output: Some(String::from( "/var/log/amazon/efs/fs-12341234.home.ec2-user.efs.21036.efs-proxy.log", )), + log_format: Some(String::from("file")), nested_config: EfsConfig { listen_addr: String::from("127.0.0.1:21036"), mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), @@ -162,6 +172,7 @@ socket = a:SO_BINDTODEVICE=lo pid = /var/run/efs/fs-12341234.home.ec2-user.efs.21036+/stunnel.pid port = 8081 initial_partition_ip = 127.0.0.1:2049 +log_format = stdout [efs] accept = 127.0.0.1:21036 @@ -187,6 +198,7 @@ checkHost = fs-12341234.efs.us-east-1.amazonaws.com ), debug: DEFAULT_LOG_LEVEL.to_string(), output: None, + log_format: Some(String::from("stdout")), nested_config: EfsConfig { listen_addr: String::from("127.0.0.1:21036"), mount_target_addr: String::from("fs-12341234.efs.us-east-1.amazonaws.com:2049"), diff --git a/src/proxy/src/efs_prot.x b/src/proxy/src/efs_prot.x index d0faeb4f..eac14ae9 100644 --- a/src/proxy/src/efs_prot.x +++ b/src/proxy/src/efs_prot.x @@ -48,10 +48,3 @@ struct BindClientResponse { BindResponse bind_response; ScaleUpConfig scale_up_config; }; - -union OperationResponse switch (OperationType operation_type) { - case OP_BIND_CLIENT_TO_PARTITION: - BindClientResponse response; - default: - void; -}; diff --git a/src/proxy/src/lib.rs b/src/proxy/src/lib.rs index 42111954..ee41164e 100644 --- a/src/proxy/src/lib.rs +++ b/src/proxy/src/lib.rs @@ -10,6 +10,7 @@ pub mod connections; pub mod controller; pub mod efs_rpc; pub mod error; +pub mod log_encoder; pub mod logger; pub mod proxy; pub mod proxy_identifier; diff --git a/src/proxy/src/log_encoder.rs b/src/proxy/src/log_encoder.rs new file mode 100644 index 00000000..e0529bf5 --- /dev/null +++ b/src/proxy/src/log_encoder.rs @@ -0,0 +1,132 @@ +use anyhow::Result; +use chrono::Utc; +use log4rs::encode::{Encode, Write}; +use std::fmt; + +/// Custom encoder that replaces newlines with spaces to keep multi-line logs on a single line +pub struct SingleLineEncoder; + +impl Encode for SingleLineEncoder { + fn encode(&self, w: &mut dyn Write, record: &log::Record<'_>) -> Result<()> { + let timestamp = Utc::now().format("%Y-%m-%dT%H:%M:%S%.3fZ"); + let level = record.level(); + let module = record.module_path().unwrap_or("-"); + let message = format!("{}", record.args()); + let single_line_message = message.replace('\n', " "); + + writeln!( + w, + "{} {} {} {} {}", + timestamp, + std::process::id(), + level, + module, + single_line_message + ) + .map_err(|e| anyhow::anyhow!(e)) + } +} + +impl fmt::Debug for SingleLineEncoder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SingleLineEncoder").finish() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use log::{Level, Record}; + use regex::Regex; + use std::io; + + struct BufferWriter<'a>(&'a mut Vec); + + impl<'a> io::Write for BufferWriter<'a> { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0.extend_from_slice(buf); + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } + } + + impl<'a> Write for BufferWriter<'a> { + // This trait is implemented automatically because BufferWriter implements io::Write + } + + #[test] + fn test_format_log_message() { + let encoder = SingleLineEncoder; + + let record = Record::builder() + .args(format_args!("Test message")) + .level(Level::Info) + .target("test_target") + .module_path(Some("test_module")) + .file(Some("test_file.rs")) + .line(Some(42)) + .build(); + + let mut buffer = Vec::new(); + + let mut writer = BufferWriter(&mut buffer); + encoder.encode(&mut writer, &record).unwrap(); + + let output = String::from_utf8_lossy(&buffer); + + let timestamp_regex = r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z"; + let pid_regex = r"\d+"; + let level_regex = r"INFO"; + let module_regex = r"test_module"; + let message_regex = r"Test message"; + + let pattern = format!( + "^{} {} {} {} {}$", + timestamp_regex, pid_regex, level_regex, module_regex, message_regex + ); + + let regex = Regex::new(&pattern).unwrap(); + assert!( + regex.is_match(output.trim()), + "Output format doesn't match expected pattern. Got: {}", + output + ); + } + + #[test] + fn test_multiline_message() { + let encoder = SingleLineEncoder; + + let record = Record::builder() + .args(format_args!("Test\nmultiline\nmessage")) + .level(Level::Warn) + .target("test_target") + .module_path(Some("test_module")) + .file(Some("test_file.rs")) + .line(Some(42)) + .build(); + + let mut buffer = Vec::new(); + + let mut writer = BufferWriter(&mut buffer); + encoder.encode(&mut writer, &record).unwrap(); + + let output = String::from_utf8_lossy(&buffer); + + assert!( + output.contains("Test multiline message"), + "Multiline message not properly formatted. Got: {}", + output + ); + + let newline_count = output.chars().filter(|&c| c == '\n').count(); + assert_eq!( + newline_count, 1, + "Expected only one newline at the end. Got: {}", + output + ); + } +} diff --git a/src/proxy/src/logger.rs b/src/proxy/src/logger.rs index 2b7a3c70..a82b504a 100644 --- a/src/proxy/src/logger.rs +++ b/src/proxy/src/logger.rs @@ -16,50 +16,187 @@ use log4rs::{ use std::{path::Path, str::FromStr}; use crate::config_parser::ProxyConfig; +use crate::log_encoder::SingleLineEncoder; const LOG_FILE_MAX_BYTES: u64 = 1048576; const LOG_FILE_COUNT: u32 = 10; -pub fn init(config: &ProxyConfig) { - let log_file_path_string = config - .output - .clone() - .expect("config value `output` is not set"); - let log_file_path = Path::new(&log_file_path_string); +pub fn create_config(config: &ProxyConfig) -> Config { let level_filter = LevelFilter::from_str(&config.debug).expect("config value for `debug` is invalid"); - let stderr = ConsoleAppender::builder().target(Target::Stderr).build(); - - let trigger = SizeTrigger::new(LOG_FILE_MAX_BYTES); - let mut pattern = log_file_path_string.clone(); - pattern.push_str(".{}"); - let roller = FixedWindowRoller::builder() - .build(&pattern, LOG_FILE_COUNT) - .expect("Unable to create roller"); - let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller)); - - let log_file = RollingFileAppender::builder() - .encoder(Box::new(PatternEncoder::new( - "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {P} {l} {M} {m}{n}", - ))) - .build(log_file_path, Box::new(policy)) - .expect("Unable to create log file"); - - let config = Config::builder() - .appender(Appender::builder().build("logfile", Box::new(log_file))) - .appender( - Appender::builder() - .filter(Box::new(ThresholdFilter::new(LevelFilter::Error))) - .build("stderr", Box::new(stderr)), - ) - .build( - Root::builder() - .appender("logfile") - .appender("stderr") - .build(level_filter), - ) - .expect("Invalid logger config"); - - let _ = log4rs::init_config(config).expect("Unable to initialize logger"); + let log_format = config.log_format.as_deref().unwrap_or("file"); + + let mut config_builder = Config::builder(); + let mut root_builder = Root::builder(); + + match log_format { + "file" => { + let log_file_path_string = config + .output + .clone() + .expect("config value `output` is not set"); + + let log_file_path = Path::new(&log_file_path_string); + + let stderr = ConsoleAppender::builder().target(Target::Stderr).build(); + + config_builder = config_builder.appender( + Appender::builder() + .filter(Box::new(ThresholdFilter::new(LevelFilter::Error))) + .build("stderr", Box::new(stderr)), + ); + + let trigger = SizeTrigger::new(LOG_FILE_MAX_BYTES); + let mut pattern = log_file_path_string.clone(); + pattern.push_str(".{}"); + let roller = FixedWindowRoller::builder() + .build(&pattern, LOG_FILE_COUNT) + .expect("Unable to create roller"); + let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller)); + + let log_file = RollingFileAppender::builder() + .encoder(Box::new(PatternEncoder::new( + "{d(%Y-%m-%dT%H:%M:%S%.3fZ)(utc)} {P} {l} {M} {m}{n}", + ))) + .build(log_file_path, Box::new(policy)) + .expect("Unable to create log file"); + + config_builder = + config_builder.appender(Appender::builder().build("logfile", Box::new(log_file))); + + root_builder = root_builder.appender("logfile").appender("stderr"); + } + "stdout" => { + let stderr = ConsoleAppender::builder() + .target(Target::Stderr) + .encoder(Box::new(SingleLineEncoder)) + .build(); + + config_builder = config_builder.appender( + Appender::builder() + .filter(Box::new(ThresholdFilter::new(LevelFilter::Error))) + .build("stderr", Box::new(stderr)), + ); + + let stdout = ConsoleAppender::builder() + .target(Target::Stdout) + .encoder(Box::new(SingleLineEncoder)) + .build(); + + config_builder = + config_builder.appender(Appender::builder().build("stdout", Box::new(stdout))); + + root_builder = root_builder.appender("stderr").appender("stdout"); + } + _ => panic!("Invalid `log_format` value. Must be either 'file' or 'stdout'"), + } + + config_builder + .build(root_builder.build(level_filter)) + .expect("Invalid logger config") +} + +pub fn init(config: &ProxyConfig) { + let log_format = config.log_format.as_deref().unwrap_or("file"); + if log_format == "file" && config.output.is_none() { + return; + } + + let log_config = create_config(config); + let _ = log4rs::init_config(log_config).expect("Unable to initialize logger"); +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::config_parser::ProxyConfig; + use std::panic; + use tempfile::tempdir; + + #[test] + fn test_logger_init_with_file() { + let temp_dir = tempdir().expect("Failed to create temporary directory"); + let log_path = temp_dir.path().join("test.log"); + let log_path_str = log_path.to_str().expect("Failed to convert path to string"); + + let config = ProxyConfig { + fips: false, + debug: "info".to_string(), + output: Some(log_path_str.to_string()), + log_format: Some("file".to_string()), + pid_file_path: "".to_string(), + nested_config: Default::default(), + }; + + let result = panic::catch_unwind(|| { + init(&config); + }); + + let _ = temp_dir.close(); + + assert!( + result.is_ok(), + "Logger initialization panicked with valid config" + ); + } + + #[test] + fn test_create_config_with_file() { + let temp_dir = tempdir().expect("Failed to create temporary directory"); + let log_path = temp_dir.path().join("test.log"); + let log_path_str = log_path.to_str().expect("Failed to convert path to string"); + + let config = ProxyConfig { + fips: false, + debug: "info".to_string(), + output: Some(log_path_str.to_string()), + log_format: Some("file".to_string()), + pid_file_path: "".to_string(), + nested_config: Default::default(), + }; + + let log_config = create_config(&config); + + assert_eq!(log_config.root().level(), LevelFilter::Info); + + let _ = temp_dir.close(); + } + + #[test] + fn test_create_config_with_stdout() { + let config = ProxyConfig { + fips: false, + debug: "debug".to_string(), + output: None, + log_format: Some("stdout".to_string()), + pid_file_path: "".to_string(), + nested_config: Default::default(), + }; + + let log_config = create_config(&config); + + assert_eq!(log_config.root().level(), LevelFilter::Debug); + } + + #[test] + fn test_init_skips_when_output_none() { + let config = ProxyConfig { + fips: false, + debug: "info".to_string(), + output: None, + log_format: Some("file".to_string()), + pid_file_path: "".to_string(), + nested_config: Default::default(), + }; + + let result = panic::catch_unwind(|| { + init(&config); + }); + + assert!( + result.is_ok(), + "Logger initialization should not panic when output is None" + ); + } } diff --git a/src/proxy/src/main.rs b/src/proxy/src/main.rs index 92d4d1e4..65ecd409 100644 --- a/src/proxy/src/main.rs +++ b/src/proxy/src/main.rs @@ -18,6 +18,7 @@ mod connections; mod controller; mod efs_rpc; mod error; +mod log_encoder; mod logger; mod proxy; mod proxy_identifier; @@ -46,9 +47,7 @@ async fn main() { Err(e) => panic!("Failed to read configuration. {}", e), }; - if let Some(_log_file_path) = &proxy_config.output { - logger::init(&proxy_config) - } + logger::init(&proxy_config); info!("Running with configuration: {:?}", proxy_config); diff --git a/src/proxy/src/proxy_identifier.rs b/src/proxy/src/proxy_identifier.rs index 0e986864..75c71f22 100644 --- a/src/proxy/src/proxy_identifier.rs +++ b/src/proxy/src/proxy_identifier.rs @@ -8,6 +8,12 @@ pub struct ProxyIdentifier { pub incarnation: i64, } +impl Default for ProxyIdentifier { + fn default() -> Self { + Self::new() + } +} + impl ProxyIdentifier { pub fn new() -> Self { ProxyIdentifier { @@ -32,7 +38,7 @@ mod tests { #[test] fn test_increment() { - let mut proxy_id = ProxyIdentifier::new(); + let mut proxy_id = ProxyIdentifier::default(); let proxy_id_original = proxy_id; for i in 0..5 { assert_eq!(i, proxy_id.incarnation); diff --git a/src/watchdog/__init__.py b/src/watchdog/__init__.py index f66e7fb4..3a196979 100755 --- a/src/watchdog/__init__.py +++ b/src/watchdog/__init__.py @@ -56,7 +56,7 @@ AMAZON_LINUX_2_RELEASE_ID, AMAZON_LINUX_2_PRETTY_NAME, ] -VERSION = "2.4.1" +VERSION = "2.4.2" SERVICE = "elasticfilesystem" CONFIG_FILE = "/etc/amazon/efs/efs-utils.conf" @@ -992,10 +992,15 @@ def find_command_path(command, install_method): # For more information, see https://brew.sh/2021/02/05/homebrew-3.0.0/ else: env_path = "/opt/homebrew/bin:/usr/local/bin" - os.putenv("PATH", env_path) + + existing_path = os.environ.get("PATH", "") + search_path = env_path + ":" + existing_path if existing_path else env_path + + env = os.environ.copy() + env["PATH"] = search_path try: - path = subprocess.check_output(["which", command]) + path = subprocess.check_output(["which", command], env=env) return path.strip().decode() except subprocess.CalledProcessError as e: fatal_error( diff --git a/test/common.py b/test/common.py index 474746dd..d0dd9820 100644 --- a/test/common.py +++ b/test/common.py @@ -47,6 +47,14 @@ def _create_mock(self): communicate_return_value=(b"", b"mount.nfs4: Connection reset by peer"), ) DEFAULT_NON_RETRYABLE_FAILURE_POPEN = PopenMock( + return_code=1, + poll_result=1, + communicate_return_value=( + b"", + b"mount.nfs4: Protocol not supported", + ), +) +ACCESS_DENIED_FAILURE_POPEN = PopenMock( return_code=1, poll_result=1, communicate_return_value=( diff --git a/test/mount_efs_test/test_match_device.py b/test/mount_efs_test/test_match_device.py index af06baf4..d74401f1 100644 --- a/test/mount_efs_test/test_match_device.py +++ b/test/mount_efs_test/test_match_device.py @@ -1,123 +1,131 @@ -# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. -# -# Licensed under the MIT License. See the LICENSE accompanying this file -# for the specific language governing permissions and limitations under -# the License. - -import socket - -import pytest - -import mount_efs - -from .. import utils - -try: - import ConfigParser -except ImportError: - from configparser import ConfigParser - -DEFAULT_AZ = "us-east-1a" -CORRECT_DEVICE_DESCRIPTORS_FS_ID = [ - ("fs-deadbeef", ("fs-deadbeef", "/", None)), - ("fs-deadbeef:/", ("fs-deadbeef", "/", None)), - ("fs-deadbeef:/some/subpath", ("fs-deadbeef", "/some/subpath", None)), - ( - "fs-deadbeef:/some/subpath/with:colons", - ("fs-deadbeef", "/some/subpath/with:colons", None), - ), -] -CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS = [ - ("custom-cname.example.com", ("fs-deadbeef", "/", None)), - ("custom-cname.example.com:/", ("fs-deadbeef", "/", None)), - ("custom-cname.example.com:/some/subpath", ("fs-deadbeef", "/some/subpath", None)), - ( - "custom-cname.example.com:/some/subpath/with:colons", - ("fs-deadbeef", "/some/subpath/with:colons", None), - ), -] -CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS_WITH_AZ = [ - ("custom-cname.example.com", ("fs-deadbeef", "/", DEFAULT_AZ)), - ("custom-cname.example.com:/", ("fs-deadbeef", "/", DEFAULT_AZ)), - ( - "custom-cname.example.com:/some/subpath", - ("fs-deadbeef", "/some/subpath", DEFAULT_AZ), - ), - ( - "custom-cname.example.com:/some/subpath/with:colons", - ("fs-deadbeef", "/some/subpath/with:colons", DEFAULT_AZ), - ), -] -DEFAULT_REGION = "us-east-1" -DEFAULT_NFS_OPTIONS = {} -FS_ID = "fs-deadbeef" -OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} -TEST_SOCKET_GET_ADDR_INFO_RETURN = [ - (socket.AF_INET, socket.SOCK_STREAM, 6, "", ("93.184.216.34", 80)) -] - - -@pytest.fixture(autouse=True) -def setup(mocker): - mocker.patch("mount_efs.get_target_region", return_value=DEFAULT_REGION) - mocker.patch( - "socket.getaddrinfo", - return_value=TEST_SOCKET_GET_ADDR_INFO_RETURN, - ) - - -def _get_mock_config( - dns_name_format="{az}.{fs_id}.efs.{region}.{dns_name_suffix}", - dns_name_suffix="amazonaws.com", - cloudwatch_enabled="false", - has_fallback_to_mount_target_ip_address_item=True, - fallback_to_mount_target_ip_address=False, -): - try: - config = ConfigParser.SafeConfigParser() - except AttributeError: - config = ConfigParser() - config.add_section(mount_efs.CONFIG_SECTION) - config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION) - config.set(mount_efs.CONFIG_SECTION, "dns_name_format", dns_name_format) - config.set(mount_efs.CONFIG_SECTION, "dns_name_suffix", dns_name_suffix) - config.set(mount_efs.CLOUDWATCH_LOG_SECTION, "enabled", cloudwatch_enabled) - if has_fallback_to_mount_target_ip_address_item: - config.set( - mount_efs.CONFIG_SECTION, - mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, - str(fallback_to_mount_target_ip_address), - ) - - return config - - -def test_match_device_correct_descriptors_fs_id(mocker): - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_FS_ID: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - - -def test_match_device_correct_descriptors_cname_dns_suffix_override_region(mocker): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.cn-north-1.amazonaws.com.cn", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=("fs-deadbeef.efs.cn-north-1.amazonaws.com.cn", [], None), - ) - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - +# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved. +# +# Licensed under the MIT License. See the LICENSE accompanying this file +# for the specific language governing permissions and limitations under +# the License. + +import socket + +import pytest + +import mount_efs + +from .. import utils + +try: + import ConfigParser +except ImportError: + from configparser import ConfigParser + +DEFAULT_AZ = "us-east-1a" +CORRECT_DEVICE_DESCRIPTORS_FS_ID = [ + ("fs-deadbeef", ("fs-deadbeef", "/", None)), + ("fs-deadbeef:/", ("fs-deadbeef", "/", None)), + ("fs-deadbeef:/some/subpath", ("fs-deadbeef", "/some/subpath", None)), + ( + "fs-deadbeef:/some/subpath/with:colons", + ("fs-deadbeef", "/some/subpath/with:colons", None), + ), +] +CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS = [ + ("custom-cname.example.com", ("fs-deadbeef", "/", None)), + ("custom-cname.example.com:/", ("fs-deadbeef", "/", None)), + ("custom-cname.example.com:/some/subpath", ("fs-deadbeef", "/some/subpath", None)), + ( + "custom-cname.example.com:/some/subpath/with:colons", + ("fs-deadbeef", "/some/subpath/with:colons", None), + ), +] +CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS_WITH_AZ = [ + ("custom-cname.example.com", ("fs-deadbeef", "/", DEFAULT_AZ)), + ("custom-cname.example.com:/", ("fs-deadbeef", "/", DEFAULT_AZ)), + ( + "custom-cname.example.com:/some/subpath", + ("fs-deadbeef", "/some/subpath", DEFAULT_AZ), + ), + ( + "custom-cname.example.com:/some/subpath/with:colons", + ("fs-deadbeef", "/some/subpath/with:colons", DEFAULT_AZ), + ), +] +DEFAULT_REGION = "us-east-1" +DEFAULT_NFS_OPTIONS = {} +FS_ID = "fs-deadbeef" +OPTIONS_WITH_AZ = {"az": DEFAULT_AZ} +TEST_SOCKET_GET_ADDR_INFO_RETURN = [ + (socket.AF_INET, socket.SOCK_STREAM, 6, "", ("93.184.216.34", 80)) +] + + +@pytest.fixture(autouse=True) +def setup(mocker): + mocker.patch("mount_efs.get_target_region", return_value=DEFAULT_REGION) + mocker.patch( + "socket.getaddrinfo", + return_value=TEST_SOCKET_GET_ADDR_INFO_RETURN, + ) + + +def _get_mock_config( + dns_name_format="{az}.{fs_id}.efs.{region}.{dns_name_suffix}", + dns_name_suffix="amazonaws.com", + cloudwatch_enabled="false", + has_fallback_to_mount_target_ip_address_item=True, + fallback_to_mount_target_ip_address=False, +): + try: + config = ConfigParser.SafeConfigParser() + except AttributeError: + config = ConfigParser() + config.add_section(mount_efs.CONFIG_SECTION) + config.add_section(mount_efs.CLOUDWATCH_LOG_SECTION) + config.set(mount_efs.CONFIG_SECTION, "dns_name_format", dns_name_format) + config.set(mount_efs.CONFIG_SECTION, "dns_name_suffix", dns_name_suffix) + config.set(mount_efs.CLOUDWATCH_LOG_SECTION, "enabled", cloudwatch_enabled) + if has_fallback_to_mount_target_ip_address_item: + config.set( + mount_efs.CONFIG_SECTION, + mount_efs.FALLBACK_TO_MOUNT_TARGET_IP_ADDRESS_ITEM, + str(fallback_to_mount_target_ip_address), + ) + + return config + + +def test_match_device_correct_descriptors_fs_id(mocker): + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_FS_ID: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + + +def _mock_getaddrinfo_return(canonname, family=socket.AF_INET, ip="93.184.216.34"): + """Helper to build a mock getaddrinfo return value with AI_CANONNAME.""" + sockaddr = (ip, 0) if family == socket.AF_INET else (ip, 0, 0, 0) + return [(family, socket.SOCK_STREAM, 6, canonname, sockaddr)] + + +def test_match_device_correct_descriptors_cname_dns_suffix_override_region(mocker): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.cn-north-1.amazonaws.com.cn", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.cn-north-1.amazonaws.com.cn" + ), + ) + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + def test_match_device_correct_descriptors_cname_dns_adc_suffix(mocker): """ADC regions use DNS suffixes with hyphens (e.g. cloud.adc-e.uk)""" adc_dns_name = "fs-deadbeef.efs.eu-isoe-west-1.cloud.adc-e.uk" @@ -125,9 +133,9 @@ def test_match_device_correct_descriptors_cname_dns_adc_suffix(mocker): "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", return_value=(adc_dns_name, None), ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=(adc_dns_name, [], None), + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(adc_dns_name), ) config = _get_mock_config(dns_name_suffix="cloud.adc-e.uk") for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: @@ -135,290 +143,367 @@ def test_match_device_correct_descriptors_cname_dns_adc_suffix(mocker): config, device, DEFAULT_NFS_OPTIONS ) utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_correct_descriptors_cname_dns_primary(mocker): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", [], None), - ) - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_correct_descriptors_cname_dns_secondary(mocker): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=(None, ["fs-deadbeef.efs.us-east-1.amazonaws.com"], None), - ) - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_correct_descriptors_cname_dns_tertiary(mocker): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=(None, [None, "fs-deadbeef.efs.us-east-1.amazonaws.com"], None), - ) - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_correct_descriptors_cname_dns_amongst_invalid(mocker): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=( - "fs-deadbeef.efs.us-west-1.amazonaws.com", - ["fs-deadbeef.efs.us-east-1.amazonaws.com", "invalid-efs-name.example.com"], - None, - ), - ) - config = _get_mock_config() - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_unresolvable_domain(mocker, capsys): - mocker.patch("socket.gethostbyname_ex", side_effect=socket.gaierror) - config = _get_mock_config() - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "Failed to resolve" in err - - -def test_match_device_no_hostnames(mocker, capsys): - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(None, [], None) - ) - config = _get_mock_config() - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "did not resolve to an EFS mount target" in err - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_no_hostnames2(mocker, capsys): - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(None, [None, None], None) - ) - config = _get_mock_config() - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "did not resolve to an EFS mount target" in err - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_resolve_to_invalid_efs_dns_name(mocker, capsys): - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=("invalid-efs-name.example.com", [], None), - ) - config = _get_mock_config() - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "did not resolve to a valid DNS name" in err - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_resolve_to_unexpected_efs_dns_name(mocker, capsys): - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=("fs-deadbeef.efs.us-west-1.amazonaws.com", None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", - return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", [], None), - ) - config = _get_mock_config() - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "did not resolve to a valid DNS name" in err - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_fqdn_same_as_dns_name(mocker, capsys): - dns_name = "%s.efs.us-east-1.amazonaws.com" % FS_ID - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(dns_name, [], None) - ) - efs_fqdn_match = mount_efs.EFS_FQDN_RE.match(dns_name) - assert efs_fqdn_match - assert FS_ID == efs_fqdn_match.group("fs_id") - - config = _get_mock_config() - ( - expected_dns_name, - ip_address, - ) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address( - config, FS_ID, DEFAULT_NFS_OPTIONS - ) - assert dns_name == expected_dns_name - assert None == ip_address - - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, DEFAULT_NFS_OPTIONS - ) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_fqdn_same_as_dns_name_with_az(mocker, capsys): - dns_name = "%s.%s.efs.us-east-1.amazonaws.com" % (DEFAULT_AZ, FS_ID) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(dns_name, [], None) - ) - efs_fqdn_match = mount_efs.EFS_FQDN_RE.match(dns_name) - assert efs_fqdn_match - assert FS_ID == efs_fqdn_match.group("fs_id") - - config = _get_mock_config() - ( - expected_dns_name, - ip_address, - ) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address( - config, FS_ID, OPTIONS_WITH_AZ - ) - assert dns_name == expected_dns_name - assert None == ip_address - for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS_WITH_AZ: - assert (fs_id, path, az) == mount_efs.match_device( - config, device, OPTIONS_WITH_AZ - ) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_with_az_dns_name_mount_az_not_in_option(mocker): - # When dns_name is provided for mounting, if the az is not provided in the mount option, also dns_name contains az - # info, verify that the az info returned is equal to the az info in the dns name - dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" - config = _get_mock_config() - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=(dns_name, None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(dns_name, [], None) - ) - fsid, path, az = mount_efs.match_device(config, dns_name, DEFAULT_NFS_OPTIONS) - - assert az == "us-east-1a" - - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_with_az_dns_name_mount_az_in_option(mocker): - # When dns_name is provided for mounting, if the az is provided in the mount option, also dns_name contains az - # info, verify that the az info returned is equal to the az info in the dns name - dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" - config = _get_mock_config() - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=(dns_name, None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(dns_name, [], None) - ) - fsid, path, az = mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) - - assert az == "us-east-1a" - - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_with_dns_name_mount_az_in_option(mocker): - # When dns_name is mapping to the az_dns_name, and the az field is provided to the option, verify that the az info returned is - # equal to the az info in the dns name - dns_name = "example.random.com" - az_dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" - config = _get_mock_config() - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=(az_dns_name, None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(az_dns_name, [], None) - ) - fsid, path, az = mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) - - assert az == "us-east-1a" - - utils.assert_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) - - -def test_match_device_with_dns_name_mount_az_in_option_not_match(mocker, capsys): - # When dns_name is mapping to the az_dns_name, and the az field is provided to the option, while the two az value is not - # the same, verify that exception is thrown - dns_name = "example.random.com" - az_dns_name = "us-east-1b.fs-deadbeef.efs.us-east-1.amazonaws.com" - config = _get_mock_config() - get_dns_name_mock = mocker.patch( - "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", - return_value=(az_dns_name, None), - ) - gethostbyname_ex_mock = mocker.patch( - "socket.gethostbyname_ex", return_value=(az_dns_name, [], None) - ) - - with pytest.raises(SystemExit) as ex: - mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) - - assert 0 != ex.value.code - out, err = capsys.readouterr() - assert "does not match the az provided" in err - utils.assert_not_called(get_dns_name_mock) - utils.assert_called(gethostbyname_ex_mock) + utils.assert_called(getaddrinfo_mock) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_correct_descriptors_cname_dns_primary(mocker): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.us-east-1.amazonaws.com" + ), + ) + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_correct_descriptors_cname_dns_secondary(mocker): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.us-east-1.amazonaws.com" + ), + ) + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_correct_descriptors_cname_dns_tertiary(mocker): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.us-east-1.amazonaws.com" + ), + ) + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_correct_descriptors_cname_dns_amongst_invalid(mocker): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.us-east-1.amazonaws.com", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.us-east-1.amazonaws.com" + ), + ) + config = _get_mock_config() + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_unresolvable_domain(mocker, capsys): + mocker.patch("socket.getaddrinfo", side_effect=socket.gaierror) + config = _get_mock_config() + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "Failed to resolve" in err + + +def test_match_device_no_hostnames(mocker, capsys): + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(""), + ) + config = _get_mock_config() + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "did not resolve" in err + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_no_hostnames2(mocker, capsys): + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=[ + (socket.AF_INET6, socket.SOCK_STREAM, 6, "", ("::1", 0, 0, 0)), + (socket.AF_INET6, socket.SOCK_STREAM, 6, "", ("::2", 0, 0, 0)), + ], + ) + config = _get_mock_config() + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "did not resolve" in err + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_resolve_to_invalid_efs_dns_name(mocker, capsys): + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return("invalid-efs-name.example.com"), + ) + config = _get_mock_config() + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "did not resolve to a valid DNS name" in err + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_resolve_to_unexpected_efs_dns_name(mocker, capsys): + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=("fs-deadbeef.efs.us-west-1.amazonaws.com", None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + "fs-deadbeef.efs.us-east-1.amazonaws.com" + ), + ) + config = _get_mock_config() + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, "custom-cname.example.com", DEFAULT_NFS_OPTIONS) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "did not resolve to a valid DNS name" in err + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_fqdn_same_as_dns_name(mocker, capsys): + dns_name = "%s.efs.us-east-1.amazonaws.com" % FS_ID + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(dns_name), + ) + efs_fqdn_match = mount_efs.EFS_FQDN_RE.match(dns_name) + assert efs_fqdn_match + assert FS_ID == efs_fqdn_match.group("fs_id") + + config = _get_mock_config() + ( + expected_dns_name, + ip_address, + ) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address( + config, FS_ID, DEFAULT_NFS_OPTIONS + ) + assert dns_name == expected_dns_name + assert None == ip_address + + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_fqdn_same_as_dns_name_with_az(mocker, capsys): + dns_name = "%s.%s.efs.us-east-1.amazonaws.com" % (DEFAULT_AZ, FS_ID) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(dns_name), + ) + efs_fqdn_match = mount_efs.EFS_FQDN_RE.match(dns_name) + assert efs_fqdn_match + assert FS_ID == efs_fqdn_match.group("fs_id") + + config = _get_mock_config() + ( + expected_dns_name, + ip_address, + ) = mount_efs.get_dns_name_and_fallback_mount_target_ip_address( + config, FS_ID, OPTIONS_WITH_AZ + ) + assert dns_name == expected_dns_name + assert None == ip_address + for device, (fs_id, path, az) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS_WITH_AZ: + assert (fs_id, path, az) == mount_efs.match_device( + config, device, OPTIONS_WITH_AZ + ) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_with_az_dns_name_mount_az_not_in_option(mocker): + # When dns_name is provided for mounting, if the az is not provided in the mount option, also dns_name contains az + # info, verify that the az info returned is equal to the az info in the dns name + dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(dns_name), + ) + fsid, path, az = mount_efs.match_device(config, dns_name, DEFAULT_NFS_OPTIONS) + + assert az == "us-east-1a" + + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_with_az_dns_name_mount_az_in_option(mocker): + # When dns_name is provided for mounting, if the az is provided in the mount option, also dns_name contains az + # info, verify that the az info returned is equal to the az info in the dns name + dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(dns_name), + ) + fsid, path, az = mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) + + assert az == "us-east-1a" + + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_with_dns_name_mount_az_in_option(mocker): + # When dns_name is mapping to the az_dns_name, and the az field is provided to the option, verify that the az info returned is + # equal to the az info in the dns name + dns_name = "example.random.com" + az_dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(az_dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(az_dns_name), + ) + fsid, path, az = mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) + + assert az == "us-east-1a" + + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_with_dns_name_mount_az_in_option_not_match(mocker, capsys): + # When dns_name is mapping to the az_dns_name, and the az field is provided to the option, while the two az value is not + # the same, verify that exception is thrown + dns_name = "example.random.com" + az_dns_name = "us-east-1b.fs-deadbeef.efs.us-east-1.amazonaws.com" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(az_dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return(az_dns_name), + ) + + with pytest.raises(SystemExit) as ex: + mount_efs.match_device(config, dns_name, OPTIONS_WITH_AZ) + + assert 0 != ex.value.code + out, err = capsys.readouterr() + assert "does not match the az provided" in err + utils.assert_not_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_ipv6_only_mount_target_resolves_via_fqdn(mocker): + """When an FQDN resolves to an IPv6-only mount target, match_device should + succeed using getaddrinfo (AF_INET6) instead of failing like gethostbyname_ex would. + """ + dns_name = "fs-deadbeef.efs.us-east-1.amazonaws.com" + ipv6_addr = "2600:1f16:1090:8802:228c:6404:76f8:e3c5" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + dns_name, family=socket.AF_INET6, ip=ipv6_addr + ), + ) + + for device, ( + expected_fs_id, + expected_path, + _, + ) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS: + assert (expected_fs_id, expected_path, None) == mount_efs.match_device( + config, device, DEFAULT_NFS_OPTIONS + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) + + +def test_match_device_ipv6_only_mount_target_with_az(mocker): + """IPv6-only mount target with AZ in the resolved FQDN.""" + dns_name = "us-east-1a.fs-deadbeef.efs.us-east-1.amazonaws.com" + ipv6_addr = "2600:1f16:1090:8802:228c:6404:76f8:e3c5" + config = _get_mock_config() + get_dns_name_mock = mocker.patch( + "mount_efs.get_dns_name_and_fallback_mount_target_ip_address", + return_value=(dns_name, None), + ) + getaddrinfo_mock = mocker.patch( + "socket.getaddrinfo", + return_value=_mock_getaddrinfo_return( + dns_name, family=socket.AF_INET6, ip=ipv6_addr + ), + ) + + for device, ( + expected_fs_id, + expected_path, + _, + ) in CORRECT_DEVICE_DESCRIPTORS_CNAME_DNS_WITH_AZ: + assert (expected_fs_id, expected_path, DEFAULT_AZ) == mount_efs.match_device( + config, device, OPTIONS_WITH_AZ + ) + utils.assert_called(get_dns_name_mock) + utils.assert_called(getaddrinfo_mock) diff --git a/test/mount_efs_test/test_mount_nfs.py b/test/mount_efs_test/test_mount_nfs.py index d9070846..d3f06dd6 100644 --- a/test/mount_efs_test/test_mount_nfs.py +++ b/test/mount_efs_test/test_mount_nfs.py @@ -331,6 +331,50 @@ def test_mount_nfs_not_retry_on_non_retryable_failure(mocker): utils.assert_not_called(optimize_readahead_window_mock) +def test_mount_nfs_not_retry_access_denied_without_access_point(mocker): + optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") + + mocker.patch( + "subprocess.Popen", side_effect=[common.ACCESS_DENIED_FAILURE_POPEN.mock] + ) + + with pytest.raises(SystemExit) as ex: + mount_efs.mount_nfs( + _get_config(), + DNS_NAME, + "/", + "/mnt", + DEFAULT_OPTIONS, + ) + + assert 0 != ex.value.code + utils.assert_not_called(optimize_readahead_window_mock) + + +def test_mount_nfs_retry_access_denied_with_access_point(mocker): + optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") + + mocker.patch( + "subprocess.Popen", return_value=common.ACCESS_DENIED_FAILURE_POPEN.mock + ) + + options = dict(DEFAULT_OPTIONS) + options["accesspoint"] = "fsap-12345" + + with pytest.raises(SystemExit) as ex: + mount_efs.mount_nfs( + _get_config(), + DNS_NAME, + "/", + "/mnt", + options, + ) + + assert 0 != ex.value.code + assert subprocess.Popen.call_count > 1 + utils.assert_not_called(optimize_readahead_window_mock) + + def test_mount_nfs_failure_after_all_attempts_fail(mocker): optimize_readahead_window_mock = mocker.patch("mount_efs.optimize_readahead_window") mocker.patch( diff --git a/test/mount_efs_test/test_write_stunnel_config_file.py b/test/mount_efs_test/test_write_stunnel_config_file.py index feaaa83d..b8606d0c 100644 --- a/test/mount_efs_test/test_write_stunnel_config_file.py +++ b/test/mount_efs_test/test_write_stunnel_config_file.py @@ -850,3 +850,24 @@ def test_write_stunnel_config_with_ipv6_and_legacy_stunnel(mocker, tmpdir): efs_proxy_enabled=False, ), ) + + +def test_write_stunnel_config_efs_proxy_skips_stunnel_options(mocker, tmpdir): + get_stunnel_options_mock = mocker.patch("mount_efs.get_stunnel_options") + mocker.patch("mount_efs.add_tunnel_ca_options") + + mount_efs.write_stunnel_config_file( + _get_config(mocker), + str(tmpdir), + FS_ID, + MOUNT_POINT, + PORT, + DNS_NAME, + VERIFY_LEVEL, + OCSP_ENABLED, + _get_mount_options_tls(), + DEFAULT_REGION, + efs_proxy_enabled=True, + ) + + get_stunnel_options_mock.assert_not_called() From f0c6c453f81437e894105903b6b56303c5e762ab Mon Sep 17 00:00:00 2001 From: Daniel Fajmon Date: Thu, 2 Apr 2026 14:28:27 +0200 Subject: [PATCH 50/51] UPSTREAM: : Add OpenShift files Add OpenShift specific: OWNERS & OWNER_ALIASES .ci-operator.yaml Dockerfile Botocore specifics: requirements.txt.ocp install-python-deps-ocp.sh Removed ci configs: .github .circleci --- .ci-operator.yaml | 4 + .circleci/config.yml | 377 ---------------------------- .github/PULL_REQUEST_TEMPLATE.md | 6 - .github/workflows/notify-slack.yaml | 65 ----- Dockerfile | 35 +++ OWNERS | 4 + OWNERS_ALIASES | 10 + install-python-deps-ocp.sh | 31 +++ requirements.txt.ocp | 8 + 9 files changed, 92 insertions(+), 448 deletions(-) create mode 100644 .ci-operator.yaml delete mode 100644 .circleci/config.yml delete mode 100644 .github/PULL_REQUEST_TEMPLATE.md delete mode 100644 .github/workflows/notify-slack.yaml create mode 100644 Dockerfile create mode 100644 OWNERS create mode 100644 OWNERS_ALIASES create mode 100755 install-python-deps-ocp.sh create mode 100644 requirements.txt.ocp diff --git a/.ci-operator.yaml b/.ci-operator.yaml new file mode 100644 index 00000000..a3628cf2 --- /dev/null +++ b/.ci-operator.yaml @@ -0,0 +1,4 @@ +build_root_image: + name: release + namespace: openshift + tag: rhel-9-release-golang-1.25-openshift-4.22 diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index b7920761..00000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,377 +0,0 @@ -version: 2.1 -executors: - python: - parameters: - image: - type: string - docker: - - image: << parameters.image >> - linux: - parameters: - image: - type: string - docker: - - image: << parameters.image >> -commands: - runtest: - steps: - - checkout - - run: - name: Install Python dependencies in a virtual env - command: | - pip install --upgrade pip - pip install virtualenv - virtualenv -p $(which python) ~/efs-utils-virtualenv - source ~/efs-utils-virtualenv/bin/activate - pip install -r requirements.txt - - run: - name: Run all tests - command: | - source ~/efs-utils-virtualenv/bin/activate - make test - - store_artifacts: - path: build - build-deb: - steps: - - run: - name: Repo update - command: | - apt-get update - - run: - name: Install curl - command: | - apt-get -y install curl - - run: - name: Install latest Rust - command: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - . "$HOME/.cargo/env" - - run: - name: Install golang - command: | - apt-get -y install wget - ARCH=$(dpkg --print-architecture) - VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) - wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - - echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - - run: - name: Install dependencies - command: | - DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata - if grep -q 'Ubuntu 20.04' /etc/os-release 2>/dev/null; then - apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc-10 g++-10 - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-10 100 - update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-10 100 - elif grep -q 'trixie' /etc/os-release 2>/dev/null; then - apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc-13 g++-13 - else - apt-get -y install binutils git rustc cargo pkg-config libssl-dev gettext cmake gcc g++ - fi - - run: - name: Add local build repo as safe git directory - command: | - git config --global --add safe.directory /tmp/_circleci_local_build_repo - - checkout - - run: - name: Build DEB - command: | - . "$HOME/.cargo/env" - export PATH=$PATH:/usr/local/go/bin - if grep -q 'trixie' /etc/os-release 2>/dev/null; then - export CC=gcc-13 - export CXX=g++-13 - fi - rustc --version - cargo --version - go version - cmake --version - ./build-deb.sh - - run: - name: Install package - command: | - apt-get update - DEBIAN_FRONTEND=noninteractive apt-get -y install --fix-missing ./build/amazon-efs-utils*deb - - run: - name: Check installed successfully - command: | - mount.efs --version - build-rpm: - steps: - - checkout - - run: - name: Install golang - command: | - yum -y install wget tar gzip - ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') - VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) - wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - - echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - - run: - name: Install dependencies - command: | - if grep -q '^ID="amzn"' /etc/os-release && grep -q '^VERSION_ID="2"' /etc/os-release 2>/dev/null; then - yum -y install rpm-build make systemd rust cargo openssl-devel cmake3 gcc gcc-c++ perl binutils - if [ ! -e /usr/bin/cmake ]; then - ln -sf /usr/bin/cmake3 /usr/bin/cmake - fi - else - yum -y install rpm-build make systemd rust cargo openssl-devel cmake gcc gcc-c++ perl binutils - fi - - run: - name: Build RPM - command: | - export PATH=$PATH:/usr/local/go/bin - go version - cmake --version - make rpm - - run: - name: Install package - command: | - yum -y install build/amazon-efs-utils*rpm - - run: - name: Check installed successfully - command: | - mount.efs --version - - run: - name: Check changelog - command: | - rpm -q --changelog amazon-efs-utils - build-rpm-rustup: - steps: - - run: - name: Install dependencies - command: | - yum install --skip-broken -y curl - - run: - name: Install latest Rust - command: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - - run: - name: Install golang - command: | - yum -y install wget tar gzip - ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') - VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) - wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - - echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - - checkout - - run: - name: Install dependencies - command: | - yum -y install rpm-build make systemd rust cargo openssl-devel cmake gcc gcc-c++ gcc13 gcc13-c++ perl binutils - - run: - name: Build RPM - command: | - . "$HOME/.cargo/env" - export PATH=$PATH:/usr/local/go/bin - export CC=gcc-13 - export CXX=g++-13 - rustc --version - go version - cmake --version - make rpm - - run: - name: Install package - command: | - yum -y install build/amazon-efs-utils*rpm - - run: - name: Check installed successfully - command: | - mount.efs --version - - run: - name: Check changelog - command: | - rpm -q --changelog amazon-efs-utils - - build-suse-rpm: - steps: - - checkout - - run: - name: Refresh source - command: | - zypper refresh - - run: - name: Install curl - command: | - zypper install -y curl - - run: - name: Install latest Rust - command: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y - - run: - name: Install golang - command: | - zypper install -y wget tar gzip - ARCH=$(uname -m | sed 's/x86_64/amd64/;s/aarch64/arm64/') - VERSION=$(wget -qO- https://go.dev/VERSION?m=text | head -1) - wget -qO- https://go.dev/dl/${VERSION}.linux-${ARCH}.tar.gz | tar -C /usr/local -xzf - - echo 'export PATH=$PATH:/usr/local/go/bin' >> $HOME/.bashrc - - run: - name: Install dependencies - command: | - zypper install -y --force-resolution rpm-build - if grep -q 'Tumbleweed' /etc/os-release 2>/dev/null; then - zypper install -y make systemd rust cargo openssl-devel cmake gcc13 gcc13-c++ perl binutils - else - zypper install -y make systemd rust cargo openssl-devel cmake gcc gcc-c++ perl binutils - fi - - run: - name: Build RPM - command: | - . "$HOME/.cargo/env" - export PATH=$PATH:/usr/local/go/bin - if grep -q 'Tumbleweed' /etc/os-release 2>/dev/null; then - export CC=gcc-13 - export CXX=g++-13 - fi - rustc --version - go version - cmake --version - make rpm - - run: - name: Install package - command: | - zypper --no-gpg-checks install -y build/amazon-efs-utils*rpm - - run: - name: Check installed successfully - command: | - mount.efs --version - - run: - name: Check changelog - command: | - rpm -q --changelog amazon-efs-utils - build-centos-repo: - steps: - - run: - name: change the mirrors to vault.centos.org - command: | - sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* -jobs: - test: - parameters: - image: - type: string - executor: - name: python - image: << parameters.image >> - steps: - - runtest - build-deb-package: - parameters: - image: - type: string - executor: - name: linux - image: << parameters.image >> - steps: - - build-deb - build-rpm-package: - parameters: - image: - type: string - executor: - name: linux - image: << parameters.image >> - steps: - - build-rpm - build-rpm-package-rustup: - parameters: - image: - type: string - executor: - name: linux - image: << parameters.image >> - steps: - - build-rpm-rustup - build-suse-rpm-package: - parameters: - image: - type: string - executor: - name: linux - image: << parameters.image >> - steps: - - build-suse-rpm - build-centos-rpm-package: - parameters: - image: - type: string - executor: - name: linux - image: << parameters.image >> - steps: - - build-centos-repo - - build-rpm-rustup -workflows: - workflow: - jobs: - - test: - name: python3_8 - image: python:3.8.13 - - test: - name: python3_9 - image: python:3.9.13 - - test: - name: python3_10 - image: python:3.10.13 - - test: - name: python3_11 - image: python:3.11.9 - - test: - name: python3_12 - image: python:3.12.4 - - build-deb-package: - name: ubuntu-latest - image: ubuntu:latest - - build-deb-package: - name: ubuntu18 - image: ubuntu:18.04 - - build-deb-package: - name: ubuntu20 - image: ubuntu:20.04 - - build-deb-package: - name: ubuntu22 - image: ubuntu:22.04 - - build-deb-package: - name: ubuntu24 - image: ubuntu:24.04 - - build-deb-package: - name: debian11 - image: debian:bullseye - - build-deb-package: - name: debian12 - image: debian:bookworm - - build-deb-package: - name: debian13 - image: debian:trixie - - build-rpm-package: - name: rocky8 - image: rockylinux/rockylinux:8 - - build-rpm-package: - name: amazon-linux-latest - image: amazonlinux:latest - - build-rpm-package: - name: amazon-linux-2 - image: amazonlinux:2 - - build-rpm-package-rustup: - name: fedora41 - image: fedora:41 - - build-suse-rpm-package: - name: opensuse-leap15.1 - image: opensuse/leap:15.1 - - build-suse-rpm-package: - name: opensuse-leap15.2 - image: opensuse/leap:15.2 - - build-suse-rpm-package: - name: opensuse-leap15.3 - image: opensuse/leap:15.3 - - build-suse-rpm-package: - name: opensuse-leap15.4 - image: opensuse/leap:15.4 - - build-suse-rpm-package: - name: opensuse-leap-latest - image: opensuse/leap:latest - - build-suse-rpm-package: - name: opensuse-tumbleweed - image: opensuse/tumbleweed \ No newline at end of file diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 6bdaa999..00000000 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,6 +0,0 @@ -*Issue #, if available:* - -*Description of changes:* - - -By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. diff --git a/.github/workflows/notify-slack.yaml b/.github/workflows/notify-slack.yaml deleted file mode 100644 index f6d7d403..00000000 --- a/.github/workflows/notify-slack.yaml +++ /dev/null @@ -1,65 +0,0 @@ -name: Slack Notifications - -on: - issues: - types: [opened, reopened, edited] - issue_comment: - types: [created, edited] - pull_request_target: - types: [opened, reopened, synchronize] - -permissions: - contents: read - -jobs: - notify: - runs-on: ubuntu-latest - steps: - - name: Send issue notification to Slack - if: github.event_name == 'issues' - uses: slackapi/slack-github-action@v2.1.1 - with: - webhook: ${{ secrets.SLACK_WEBHOOK_URL }} - webhook-type: incoming-webhook - payload: | - { - "type": "issue_update", - "action": "${{ github.event.action }}", - "number": ${{ github.event.issue.number }}, - "title": "${{ github.event.issue.title }}", - "url": "${{ github.event.issue.html_url }}", - "user": "${{ github.event.issue.user.login }}" - } - - - name: Send pull request notification to Slack - if: github.event_name == 'pull_request_target' - uses: slackapi/slack-github-action@v2.1.1 - with: - webhook: ${{ secrets.SLACK_WEBHOOK_URL }} - webhook-type: incoming-webhook - payload: | - { - "type": "pr_update", - "action": "${{ github.event.action }}", - "number": ${{ github.event.pull_request.number }}, - "title": "${{ github.event.pull_request.title }}", - "url": "${{ github.event.pull_request.html_url }}", - "user": "${{ github.event.pull_request.user.login }}" - } - - - name: Send issue comments notification to Slack - if: github.event_name == 'issue_comment' && !github.event.issue.pull_request - uses: slackapi/slack-github-action@v2.1.1 - with: - webhook: ${{ secrets.SLACK_WEBHOOK_URL }} - webhook-type: incoming-webhook - payload: | - { - "type": "issue_comment", - "action": "${{ github.event.action }}", - "number": ${{ github.event.issue.number }}, - "title": "${{ github.event.issue.title }}", - "url": "${{ github.event.comment.html_url }}", - "body": "${{ github.event.comment.body }}", - "user": "${{ github.event.comment.user.login }}" - } diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..cde05df7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,35 @@ +# Dockefile for OpenShift base image named "aws-efs-utils-base" +# +# The image contains: +# - /sbin/mount.efs +# - /usr/bin/amazon-efs-mount-watchdog +# - /etc/amazon/* +# - /var/log/amazon/efs/* +# - botocore3 (to be able to use zonal EFS volumes) + +FROM registry.ci.openshift.org/ocp/4.22:base-rhel9 + +# create log file +RUN mkdir -p /var/log/amazon/efs +RUN touch /var/log/amazon/efs/mount.log + +# certs +COPY ./dist/efs-utils.crt /etc/amazon/efs/efs-utils.crt +RUN chmod 644 /etc/amazon/efs/efs-utils.crt +COPY ./dist/efs-utils.conf /etc/amazon/efs/efs-utils.conf +RUN chmod 444 /etc/amazon/efs/efs-utils.conf +COPY ./src/mount_efs/__init__.py /sbin/mount.efs +RUN chmod 755 /sbin/mount.efs +COPY ./src/watchdog/__init__.py /usr/bin/amazon-efs-mount-watchdog +RUN chmod 755 /usr/bin/amazon-efs-mount-watchdog + +# Copy cachito / hermeto files used in the build pipeline. Copy an innocent file if the env. vars are not set. +ARG REMOTE_SOURCES +ARG REMOTE_SOURCES_DIR +ENV REMOTE_SOURCES_SRC=${REMOTE_SOURCES:-"requirements.txt"} +ENV REMOTE_SOURCES_DST=${REMOTE_SOURCES_DIR:-"/remote_sources_dir/"} +COPY "$REMOTE_SOURCES_SRC" "$REMOTE_SOURCES_DST" + +# Install python dependencies (i.e. botocore). +COPY requirements.txt.ocp install-python-deps-ocp.sh /src/ +RUN /src/install-python-deps-ocp.sh diff --git a/OWNERS b/OWNERS new file mode 100644 index 00000000..c3584925 --- /dev/null +++ b/OWNERS @@ -0,0 +1,4 @@ +approvers: +- openshift-storage-maintainers +component: "Storage" +subcomponent: Kubernetes External Components diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES new file mode 100644 index 00000000..8cb9d2c2 --- /dev/null +++ b/OWNERS_ALIASES @@ -0,0 +1,10 @@ +aliases: + openshift-storage-maintainers: + - jsafrane + - tsmetana + - gnufied + - dobsonj + - RomanBednar + - mpatlasov + - dfajmon + - rhrmo diff --git a/install-python-deps-ocp.sh b/install-python-deps-ocp.sh new file mode 100755 index 00000000..a0543d72 --- /dev/null +++ b/install-python-deps-ocp.sh @@ -0,0 +1,31 @@ +#!/usr/bin/bash + +# Install all efs-utils deps. Prefer RPMs, but in the end install the +# remaining ones (= botocore) using pip. +# Make sure pip installs from a local cache in the Red Hat build pipeline. +# Everywhere else, let pip install from the internet. + +set -euxo pipefail + +yum update -y +yum install --setopt=tsflags=nodocs -y nfs-utils stunnel python3 openssl util-linux which make python3-pip python3-jmespath python3-urllib3 python3-six.noarch python3-wheel python3-dateutil +yum clean all +rm -rf /var/cache/yum/* + +REQS=/src/requirements.txt.ocp + +if [[ -v REMOTE_SOURCES ]]; then + echo "Red Hat build pipeline detected" + ls -lR "${REMOTE_SOURCES_DIR}/" # DEBUG + + # Load cachito variables and requirements.txt. + # This re-configures pip to use the build system cache. + if [[ -d "${REMOTE_SOURCES_DIR}/cachito-gomod-with-deps" ]]; then + source "${REMOTE_SOURCES_DIR}/cachito-gomod-with-deps/cachito.env" + REQS="${REMOTE_SOURCES_DIR}/cachito-gomod-with-deps/app/requirements.txt.ocp" + fi +fi + +# Finally, install all remaining requirements, ideally just botocore. +# --no-build-isolation: use system setuptools and /bin/wheel from RPM packages. +python3 -m pip install -r "${REQS}" --no-build-isolation diff --git a/requirements.txt.ocp b/requirements.txt.ocp new file mode 100644 index 00000000..f9c8292f --- /dev/null +++ b/requirements.txt.ocp @@ -0,0 +1,8 @@ +# OpenShift carry: requirements.txt tailored for OpenShift image build. + +# WARNING: to keep nr. of deps small, we remove most of unit-test dependencies! +# `make test` won't work in the image build env. + +# All versions except botocore are set to the versions that are available in RHEL. +# botocore is not in RHEL9, download it from the internet (for local development) / cache (for real image builds). +botocore==1.34.140 From 1117c13f1b573243f344c6e5cea2018b7cb76666 Mon Sep 17 00:00:00 2001 From: Daniel Fajmon Date: Thu, 2 Apr 2026 14:29:18 +0200 Subject: [PATCH 51/51] UPSTREAM: : Enable stunnel permanently This needs to be communicated with upstream to have better solution. --- src/mount_efs/__init__.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/mount_efs/__init__.py b/src/mount_efs/__init__.py index 30aa5336..e68cf7ce 100755 --- a/src/mount_efs/__init__.py +++ b/src/mount_efs/__init__.py @@ -2059,11 +2059,14 @@ def check_if_nfsvers_is_compatible_with_macos(options): # Use stunnel instead of efs-proxy for tls mounts, # and attach non-tls mounts directly to the mount target. def legacy_stunnel_mode_enabled(options, config): - return ( - LEGACY_STUNNEL_MOUNT_OPTION in options - or check_if_platform_is_mac() - or is_ocsp_enabled(config, options) - ) + # OpenShift CARRY: Enable stunnel for all connections + return True + + # return ( + # LEGACY_STUNNEL_MOUNT_OPTION in options + # or check_if_platform_is_mac() + # or is_ocsp_enabled(config, options) + # ) def get_nfs_mount_options(options, config):