├── .gitignore ├── LICENSE ├── README.md ├── bind9 ├── Dockerfile ├── README.md ├── ext_sync.sh ├── run_named.sh └── ssh_config ├── isc-dhcp ├── Dockerfile ├── README.md ├── ext_sync.sh ├── run_isc_dhcp.sh └── ssh_config └── s3-backed-ftp ├── Dockerfile ├── README.md ├── add_users_in_container.sh ├── env.list.example ├── s3-fuse.sh ├── sshd_config ├── supervisord.conf ├── users.sh └── vsftpd.conf /.gitignore: -------------------------------------------------------------------------------- 1 | env.list 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Factual 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Open Dockerfiles 2 | A repository for Factual's open source Dockerfiles 3 | -------------------------------------------------------------------------------- /bind9/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM factual/docker-base 2 | RUN apt-get -y update && apt-get -y install bind9 git-core wget 3 | 4 | RUN rm -rf /etc/bind 5 | 6 | ADD ssh_config /root/.ssh/config 7 | ADD run_named.sh /etc/my_init.d/99_named 8 | 9 | RUN mkdir -p /var/run/named 10 | RUN mkdir -p /var/log/named/ 11 | RUN chown -R bind:bind /var/run/named 12 | RUN chown -R bind:bind /var/log/named 13 | 14 | 15 | ADD ext_sync.sh / 16 | 17 | VOLUME ["/conf"] 18 | 19 | EXPOSE 53 53/udp 953 20 | -------------------------------------------------------------------------------- /bind9/README.md: -------------------------------------------------------------------------------- 1 | # bind9 (named) in a docker 2 | 3 | This image fetches its config from github or a url path and does a graceful reload when the config changes. As a result, it can be part of a highly available but independent set of dns servers. 4 | 5 | ## Running 6 | 7 | 8 | ### Github Checkout Example 9 | 10 | ```bash 11 | docker run --restart=always --name bind9 -e GITHUB_REPO="git@github.com:Factual/some-repo-where-we-keep-configs.git" -e GITHUB_SSH_KEY="`cat ~/.ssh/my_deploy_key`" -e GIT_PATH="/bind9" -e REFRESH=60 factual/bind9 12 | ``` 13 | 14 | ### URL Example 15 | 16 | ```bash 17 | docker run --restart=always --name bind9 -e -e URL="https://my-config-files.factual.com/services/bind9/" -e REFRESH=60 factual/bind9 18 | ``` 19 | 20 | ## Environment Variables 21 | 22 | ### Github Sync 23 | 24 | - GITHUB_REPO 25 | - GITHUB_SSH_KEY 26 | - GIT_PATH 27 | 28 | ### URL Sync 29 | 30 | - URL 31 | 32 | ### Common 33 | 34 | - REFRESH (default: 300) 35 | -------------------------------------------------------------------------------- /bind9/ext_sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REFRESH=${REFRESH:-300} 4 | 5 | function config_changed() { 6 | res=$(diff -qr /conf/pending/ /conf/active/) 7 | if [ -n "$res" ]; then 8 | return 0 9 | else 10 | return 1 11 | fi 12 | } 13 | 14 | function get_updates() { 15 | if [ -n "$GITHUB_REPO" ]; then 16 | get_updates_from_github && sync_and_reload 17 | elif [ -n "$URL" ]; then 18 | get_updates_from_url && sync_and_reload 19 | fi 20 | } 21 | 22 | function sync_and_reload() { 23 | mv /conf/active /conf/backup 24 | mkdir -p /conf/active 25 | cp -r /conf/pending/$GIT_PATH/* /conf/active/ 26 | echo "Validating config and reloading..." 27 | if config_valid; then 28 | echo "Config valid. Reloading..." 29 | rndc reload 30 | else 31 | echo "Config NOT valid. Reverting..." 32 | mv /conf/backup /conf/active 33 | fi 34 | } 35 | 36 | function get_updates_from_github() { 37 | 38 | git_result=$(echo -n `git pull`) 39 | if [ "$git_result" = "Already up-to-date." ]; then 40 | #already current, return false 41 | return 1 42 | else 43 | #time to update, return true 44 | return 0 45 | fi 46 | } 47 | 48 | function config_valid() { 49 | named-checkconf -z /etc/bind/named.conf 50 | } 51 | 52 | function get_updates_from_url() { 53 | wget -nd -r -nc -np -e robots=off -R "index.html*" $URL 54 | if config_changed; then 55 | return 0 56 | else 57 | return 1 58 | fi 59 | } 60 | 61 | sleep 20 62 | 63 | while true; do 64 | cd /conf/pending/ 65 | get_updates 66 | sleep $REFRESH 67 | done 68 | -------------------------------------------------------------------------------- /bind9/run_named.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | GIT_BRANCH=${GIT_BRANCH:-"master"} 3 | 4 | 5 | function prepare_once() { 6 | mkdir -p /conf/active/ 7 | mkdir -p /conf/pending/ 8 | ln -s /conf/active /etc/bind 9 | cd /conf/pending/ 10 | if [ -n "$GITHUB_REPO" ]; then 11 | prepare_once_github 12 | elif [ -n "$URL" ]; then 13 | prepare_once_url 14 | fi 15 | rsync -arz --delete-after /conf/pending/$GIT_PATH/* /conf/active/ 16 | return 0 17 | } 18 | 19 | function prepare_once_github() { 20 | echo "$GITHUB_SSH_KEY" > ~/.ssh/github_key && chmod 600 ~/.ssh/github_key 21 | if [ -z "$GIT_PATH" ]; then 22 | git clone $GITHUB_REPO --branch $GIT_BRANCH --single-branch . 23 | else 24 | git clone -n $GITHUB_REPO --branch $GIT_BRANCH --single-branch . 25 | git config core.sparseCheckout true 26 | echo "$GIT_PATH" >> .git/info/sparse-checkout 27 | git checkout $GIT_BRANCH 28 | fi 29 | } 30 | 31 | function prepare_once_url() { 32 | return 0 33 | } 34 | 35 | echo "Initializing..." 36 | prepare_once && echo "Done with initialization." 37 | 38 | /ext_sync.sh & 39 | 40 | echo "Starting named" 41 | /usr/sbin/named -g -c /etc/bind/named.conf -u bind 42 | -------------------------------------------------------------------------------- /bind9/ssh_config: -------------------------------------------------------------------------------- 1 | Host github.com 2 | HostName github.com 3 | User git 4 | IdentityFile ~/.ssh/github_key 5 | StrictHostKeyChecking no -------------------------------------------------------------------------------- /isc-dhcp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM factual/docker-base 2 | MAINTAINER Factual Sysops 3 | 4 | RUN DEBIAN_FRONTEND=noninteractive && \ 5 | apt-get -q update && \ 6 | apt-get install -y -q git-core wget isc-dhcp-server 7 | 8 | COPY ssh_config /root/.ssh/config 9 | COPY run_isc_dhcp.sh /etc/my_init.d/99_isc-dhcp 10 | COPY ext_sync.sh /etc/service/isc-dhcp-sync/run 11 | 12 | # Remove sample dhcp config directory/files 13 | RUN rm -rf /etc/dhcp && \ 14 | mkdir -p /var/run/dhcpd /var/log/dhcpd && \ 15 | chown -R dhcpd:dhcpd /var/run/dhcpd /var/run/dhcpd 16 | 17 | # Need to check and see if all these ports are needed 18 | # Or if only 67 is needed 19 | # DHCP server 20 | EXPOSE 67/udp 21 | # DHCPv6 server 22 | EXPOSE 547/udp 23 | # DHCP failover protocol 24 | EXPOSE 647 25 | # DHCP failover protocol 26 | EXPOSE 847 27 | 28 | CMD [ "/sbin/my_init" ] 29 | 30 | -------------------------------------------------------------------------------- /isc-dhcp/README.md: -------------------------------------------------------------------------------- 1 | # isc-dhcp in a docker 2 | 3 | This image will fetch all dhcp config files from github or a url path and reload when it detects changes. 4 | 5 | If checking out from Github you can perform a sparse checkout of the repo by supplying a `GIT_PATH` environment variable. This will be the directory checked out from the Github repo. 6 | 7 | If you want to specify an interface besides `eth0` set the `INTERFACE` environmnet variable to the interface you want the server to listen on and it will only listen on the given interface. Otherwise it will listen on all available interfaces. 8 | 9 | # Running 10 | ## Github checkout Example 11 | ``` 12 | docker run --restart=always --name isc-dhcp -e GITHUB_REPO="git@github.com:Factual/some-repo-where-we-keep-configs.git" -e GITHUB_SSH_KEY="`cat ~/.ssh/my_deploy_key`" -e GIT_PATH="/isc-dhcp" -e INTERFACES="eth0 eth1" -e REFRESH=300 factual/isc-dhcp 13 | ``` 14 | 15 | ## URL Example 16 | ``` 17 | docker run --restart=always --name isc-dhcp -e URL="https://my-config-files.factual.com/services/isc-dhcp/" -e REFRESH=300 factual/isc-dhcp 18 | ``` 19 | 20 | # Env Variables 21 | 22 | ### Github Sync 23 | - GITHUB_REPO 24 | - GITHUB_SSH_KEY 25 | - GIT_PATH 26 | - GIT_BRANCH (default: master) 27 | 28 | ### URL Sync 29 | - URL 30 | 31 | ### Common 32 | - REFRESH (default: 300) 33 | - INTERFACE (default: eth0) 34 | -------------------------------------------------------------------------------- /isc-dhcp/ext_sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | REFRESH=${REFRESH:-300} 6 | 7 | function config_changed() { 8 | res=$(diff -qr /conf/pending/ /conf/active/) 9 | if [ -n "$res" ]; then 10 | return 0 11 | else 12 | return 1 13 | fi 14 | } 15 | 16 | function get_updates() { 17 | if [ -n "$GITHUB_REPO" ]; then 18 | get_updates_from_github && sync_and_reload 19 | elif [ -n "$URL" ]; then 20 | get_updates_from_url && sync_and_reload 21 | fi 22 | } 23 | 24 | function sync_and_reload() { 25 | mv /conf/active /conf/backup 26 | mkdir -p /conf/active 27 | cp -r /conf/pending/"$GIT_PATH"/* /conf/active 28 | echo "Validating config and reloading..." 29 | if config_valid; then 30 | echo "Config valid. Reloading..." 31 | /etc/init.d/isc-dhcp-server restart 32 | rm -r /conf/backup 33 | else 34 | echo "Config NOT valid. Reverting..." 35 | mv /conf/backup/* /conf/active 36 | rm -r /conf/backup 37 | fi 38 | } 39 | 40 | function get_updates_from_github() { 41 | git_result=$(echo -n "$(git pull)") 42 | if [ "$git_result" = "Already up-to-date." ]; then 43 | return 1 44 | else 45 | return 0 46 | fi 47 | } 48 | 49 | function config_valid() { 50 | /usr/sbin/dhcpd -t -cf /etc/dhcp/dhcpd.conf 51 | } 52 | 53 | function get_updates_from_url() { 54 | wget -nd -r -nc -e robots=off -R "index.html*" "$URL" 55 | if config_changed; then 56 | return 0 57 | else 58 | return 1 59 | fi 60 | } 61 | 62 | sleep 20 63 | 64 | while true; do 65 | cd /conf/pending 66 | get_updates 67 | sleep "$REFRESH" 68 | done 69 | 70 | -------------------------------------------------------------------------------- /isc-dhcp/run_isc_dhcp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | GIT_BRANCH="${GIT_BRANCH:-master}" 6 | 7 | function prepare_once() { 8 | mkdir -p /conf/active/ 9 | mkdir -p /conf/pending/ 10 | ln -s /conf/active /etc/dhcp 11 | cd /conf/pending/ 12 | if [ -n "$GITHUB_REPO" ]; then 13 | # Use github repo 14 | prepare_once_github 15 | elif [ -n "$URL" ]; then 16 | # Use a url 17 | prepare_once_url 18 | else 19 | echo "No repo or URL supplied" && exit 1 20 | fi 21 | rsync -arz --delete-after /conf/pending/"$GIT_PATH"/* /conf/active/ 22 | return 0 23 | } 24 | 25 | function prepare_once_github() { 26 | echo "$GITHUB_SSH_KEY" > ~/.ssh/github_key && chmod 600 ~/.ssh/github_key 27 | if [ -z "$GIT_PATH" ]; then 28 | # If no git_path is set checkout whole repo 29 | git clone "$GITHUB_REPO" --branch "$GIT_BRANCH" --single-branch . 30 | else 31 | # If git_path is set do sparse checkout of directory we want 32 | git clone -n "$GITHUB_REPO" --branch "$GIT_BRANCH" --single-branch . 33 | git config core.sparseCheckout true 34 | echo "$GIT_PATH" >> .git/info/sparse-checkout 35 | git checkout "$GIT_BRANCH" 36 | fi 37 | } 38 | 39 | function prepare_one_url() { 40 | return 0 41 | } 42 | 43 | echo "Initializing..." 44 | [[ -z "$GIT_PATH" ]] && echo "GIT_PATH not set...checking out whole git repo" 45 | [[ ! -z "$GIT_PATH" ]] && echo "GIT_PATH set...checking out directory $GIT_PATH in $GITHUB_REPO" 46 | prepare_once && echo "Done with initialization" 47 | chown -R dhcpd:dhcpd /etc/dhcp 48 | 49 | if [ -n "$INTERFACE" ]; then 50 | echo "Interface $INTERFACE given" 51 | /usr/sbin/dhcpd -cf /etc/dhcp/dhcpd.conf "$INTERFACE" 52 | else 53 | echo "No interface given" 54 | /usr/sbin/dhcpd -cf /etc/dhcp/dhcpd.conf 55 | fi 56 | 57 | -------------------------------------------------------------------------------- /isc-dhcp/ssh_config: -------------------------------------------------------------------------------- 1 | Host github.com 2 | HostName github.com 3 | User git 4 | IdentityFile ~/.ssh/github_key 5 | StrictHostKeyChecking no 6 | -------------------------------------------------------------------------------- /s3-backed-ftp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM factual/docker-base 2 | 3 | RUN apt-get -y update && apt-get -y install --no-install-recommends \ 4 | automake \ 5 | autotools-dev \ 6 | g++ \ 7 | git \ 8 | libcurl4-gnutls-dev \ 9 | libfuse-dev \ 10 | libssl-dev \ 11 | libxml2-dev \ 12 | make \ 13 | pkg-config \ 14 | python3-pip \ 15 | vsftpd \ 16 | openssh-server \ 17 | supervisor \ 18 | && rm -rf /var/lib/apt/lists/* 19 | 20 | RUN pip3 install awscli 21 | 22 | RUN git clone https://github.com/s3fs-fuse/s3fs-fuse.git && \ 23 | cd s3fs-fuse && \ 24 | ./autogen.sh && \ 25 | ./configure && \ 26 | make && \ 27 | sudo make install 28 | 29 | RUN mkdir -p /home/aws/s3bucket/ 30 | 31 | ADD s3-fuse.sh /usr/local/ 32 | 33 | ADD vsftpd.conf /etc/vsftpd.conf 34 | 35 | RUN chown root:root /etc/vsftpd.conf 36 | 37 | ADD sshd_config /etc/ssh/sshd_config 38 | 39 | ADD users.sh /usr/local/ 40 | 41 | ADD add_users_in_container.sh /usr/local/ 42 | 43 | RUN echo "/usr/sbin/nologin" >> /etc/shells 44 | 45 | COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf 46 | 47 | EXPOSE 21 22 48 | 49 | CMD ["/usr/bin/supervisord"] 50 | -------------------------------------------------------------------------------- /s3-backed-ftp/README.md: -------------------------------------------------------------------------------- 1 | # S3-Backed-FTP Server 2 | 3 | An ftp/sftp server using s3fs to mount an external s3 bucket as ftp/sftp storage. 4 | 5 | More info [here](http://cloudacademy.com/blog/s3-ftp-server/). 6 | 7 | ## Usage 8 | 9 | To run: 10 | 11 | 1. Replace `env.list.example` file with a real `env.list` file with correct variables filled in. 12 | - Add users to `USERS` environment variable. These should be listed in the form `username:hashedpassword`, each separated by a space. 13 | - Passwords for those users should be hashed. There are several ways to hash a user password. A common way is to execute a command like the following: `openssl passwd -crypt {your_password}`. Substitute `{your_password}` with the one you want to hash. 14 | - You may also use non-hashed passwords if storing passwords in plaintext is fine. To do this, change line ` echo $u | chpasswd -e ` => ` echo $u | chpasswd ` in the `users.sh` file (line #24). 15 | - Specify the S3 buckets were the files (`FTP_BUCKET`) and configs (`CONFIG_BUCKET`) will be stored. 16 | - If you are running this container inside an AWS EC2 Instance with an assigned IAM_ROLE, then specify its name in the `IAM_ROLE` environment variable. 17 | - If you do not have an IAM_ROLE attached to your EC2 Instance or wherever you are running this, then you have to specify the AWS credentials that will be used to access S3. These are the `AWS_ACCESS_KEY_ID` and the `AWS_SECRET_KEY_ID` keys. 18 | 19 | 2. If you have changed other files aside the `env.list` file, then you have to build the docker container using: 20 | 21 | - `docker build --rm -t path/to/dockerfile/folder` 22 | 23 | 3. Then after building the container (if necessary), run using: 24 | 25 | - `docker run --rm -p 21:21 -p 222:22 -p 1024-1048:1024-1048 --name --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list ` 26 | - If you would like the docker to restart after reboot then use: 27 | * `docker run --restart=always -p 21:21 -p 222:22 -p 1024-1048:1024-1048 --name --cap-add SYS_ADMIN --device /dev/fuse --env-file env.list ` 28 | - If `env.list` file is named differently change accordingly. 29 | - If you don't want to use the cap-add and device options you could also just use the privileged option instead: 30 | * `docker run --restart=always -p 21:21 -p 222:22 -p 1024-1024:1024-1048 --privileged --env-file env.list ` 31 | 32 | ## Environment Variables 33 | 34 | 1. ` USERS ` = List of users to add to the ftp/sftp server. Listed in the form username:hashedpassword, each separated by a space. 35 | 2. ` FTP_BUCKET ` = S3 bucket where ftp/sftp users data will be stored. 36 | 3. ` CONFIG_BUCKET ` = S3 bucket where the config data (env.list file) will be stored. 37 | 4. ` IAM_ROLE ` = name of role account linked to EC2 instance the container is running in. 38 | 39 | ### Optional Environment Variables 40 | These two environment variables only need to be set if there is no linked IAM role to the EC2 instance. 41 | 42 | 1. ` AWS_ACCESS_KEY_ID ` = IAM user account access key. 43 | 2. ` AWS_SECRET_ACCESS_KEY ` = IAM user account secret access key. 44 | 45 | **Enjoy!** 46 | -------------------------------------------------------------------------------- /s3-backed-ftp/add_users_in_container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script will update the env.list file (file containing USERS environrment variable) and add the new users if there are any. 3 | # Will check for new users at a given time interval (change sleep duration on line 33) 4 | 5 | FTP_DIRECTORY="/home/aws/s3bucket/ftp-users" 6 | CONFIG_FILE="env.list" # May need to modify config file name to reflect future changes in env file location/name 7 | SLEEP_DURATION=60 8 | # Change theses next two variables to set different permissions for files/directories 9 | # These were default from vsftpd so change accordingly if necessary 10 | FILE_PERMISSIONS=644 11 | DIRECTORY_PERMISSIONS=755 12 | 13 | add_users() { 14 | aws s3 cp s3://$CONFIG_BUCKET/$CONFIG_FILE ~/$CONFIG_FILE 15 | USERS=$(cat ~/"$CONFIG_FILE" | grep USERS | cut -d '=' -f2) 16 | 17 | for u in $USERS; do 18 | read username passwd <<< $(echo $u | sed 's/:/ /g') 19 | 20 | # If account exists set password again 21 | # In cases where password changes in env file 22 | if getent passwd "$username" >/dev/null 2>&1; then 23 | echo $u | chpasswd -e 24 | 25 | # Fix for issue when pulling files that were uploaded directly to S3 (through aws web console) 26 | # Permissions when uploaded directly through S3 Web client were set as: 27 | # 000 root:root 28 | # This would not allow ftp users to read the files 29 | 30 | # Search for files and directories not owned correctly 31 | find "$FTP_DIRECTORY"/"$username"/files/* \( \! -user "$username" \! -group "$username" \) -print0 | xargs -0 chown "$username:$username" 32 | 33 | # Search for files with incorrect permissions 34 | find "$FTP_DIRECTORY"/"$username"/files/* -type f \! -perm "$FILE_PERMISSIONS" -print0 | xargs -0 chmod "$FILE_PERMISSIONS" 35 | 36 | # Search for directories with incorrect permissions 37 | find "$FTP_DIRECTORY"/"$username"/files/* -type d \! -perm "$DIRECTORY_PERMISSIONS" -print0 | xargs -0 chmod "$DIRECTORY_PERMISSIONS" 38 | 39 | fi 40 | 41 | # If user account doesn't exist create it 42 | # As well as their home directory 43 | if ! getent passwd "$username" >/dev/null 2>&1; then 44 | useradd -d "$FTP_DIRECTORY/$username" -s /usr/sbin/nologin $username 45 | usermod -G ftpaccess $username 46 | 47 | mkdir -p "$FTP_DIRECTORY/$username" 48 | chown root:ftpaccess "$FTP_DIRECTORY/$username" 49 | chmod 750 "$FTP_DIRECTORY/$username" 50 | 51 | mkdir -p "$FTP_DIRECTORY/$username/files" 52 | chown $username:ftpaccess "$FTP_DIRECTORY/$username/files" 53 | chmod 750 "$FTP_DIRECTORY/$username/files" 54 | fi 55 | done 56 | } 57 | 58 | while true; do 59 | add_users 60 | sleep $SLEEP_DURATION 61 | done 62 | -------------------------------------------------------------------------------- /s3-backed-ftp/env.list.example: -------------------------------------------------------------------------------- 1 | USERS=user:hashedpassword user2:hashedpassword2 2 | 3 | FTP_BUCKET= 4 | 5 | CONFIG_BUCKET= 6 | 7 | IAM_ROLE= 8 | 9 | # If an IAM role account is not linked with the EC2 instance the docker container is running on set these variables as well 10 | AWS_ACCESS_KEY_ID= 11 | 12 | AWS_SECRET_ACCESS_KEY= 13 | -------------------------------------------------------------------------------- /s3-backed-ftp/s3-fuse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check first if the required FTP_BUCKET variable was provided, if not, abort. 4 | if [ -z $FTP_BUCKET ]; then 5 | echo "You need to set BUCKET environment variable. Aborting!" 6 | exit 1 7 | fi 8 | 9 | # Then check if there is an IAM_ROLE provided, if not, check if the AWS credentials were provided. 10 | if [ -z $IAM_ROLE ]; then 11 | echo "You did not set an IAM_ROLE environment variable. Checking if AWS access keys where provided ..." 12 | fi 13 | 14 | # Abort if the AWS_ACCESS_KEY_ID was not provided if an IAM_ROLE was not provided neither. 15 | if [ -z $IAM_ROLE ] && [ -z $AWS_ACCESS_KEY_ID ]; then 16 | echo "You need to set AWS_ACCESS_KEY_ID environment variable. Aborting!" 17 | exit 1 18 | fi 19 | 20 | # Abort if the AWS_SECRET_ACCESS_KEY was not provided if an IAM_ROLE was not provided neither. 21 | if [ -z $IAM_ROLE ] && [ -z $AWS_SECRET_ACCESS_KEY ]; then 22 | echo "You need to set AWS_SECRET_ACCESS_KEY environment variable. Aborting!" 23 | exit 1 24 | fi 25 | 26 | # If there is no IAM_ROLE but the AWS credentials were provided, then set them as the s3fs credentials. 27 | if [ -z $IAM_ROLE ] && [ ! -z $AWS_ACCESS_KEY_ID ] && [ ! -z $AWS_SECRET_ACCESS_KEY ]; then 28 | #set the aws access credentials from environment variables 29 | echo $AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY > ~/.passwd-s3fs 30 | chmod 600 ~/.passwd-s3fs 31 | fi 32 | 33 | # Update the vsftpd.conf file to include the IP address if running on an EC2 instance 34 | if curl -s http://instance-data.ec2.internal > /dev/null ; then 35 | IP=$(curl -s http://169.254.169.254/latest/meta-data/public-ipv4) 36 | sed -i "s/^pasv_address=/pasv_address=$IP/" /etc/vsftpd.conf 37 | else 38 | exit 1 39 | fi 40 | 41 | # start s3 fuse 42 | # Code above is not needed if the IAM role is attaced to EC2 instance 43 | # s3fs provides the iam_role option to grab those credentials automatically 44 | /usr/local/bin/s3fs $FTP_BUCKET /home/aws/s3bucket -o allow_other -o mp_umask="0022" -o iam_role="$IAM_ROLE" #-d -d -f -o f2 -o curldbg 45 | /usr/local/users.sh 46 | -------------------------------------------------------------------------------- /s3-backed-ftp/sshd_config: -------------------------------------------------------------------------------- 1 | # Change to no to disable tunnelled clear text passwords 2 | PasswordAuthentication no 3 | 4 | #Port 22 5 | 6 | # Kerberos options 7 | #KerberosAuthentication no 8 | #KerberosGetAFSToken no 9 | #KerberosOrLocalPasswd yes 10 | #KerberosTicketCleanup yes 11 | 12 | # GSSAPI options 13 | #GSSAPIAuthentication no 14 | #GSSAPICleanupCredentials yes 15 | 16 | X11Forwarding yes 17 | X11DisplayOffset 10 18 | PrintMotd no 19 | PrintLastLog yes 20 | TCPKeepAlive yes 21 | #UseLogin no 22 | 23 | #MaxStartups 10:30:60 24 | #Banner /etc/issue.net 25 | 26 | # Allow client to pass locale environment variables 27 | AcceptEnv LANG LC_* 28 | 29 | #Subsystem sftp /usr/lib/openssh/sftp-server 30 | 31 | # Set this to 'yes' to enable PAM authentication, account processing, 32 | # and session processing. If this is enabled, PAM authentication will 33 | # be allowed through the ChallengeResponseAuthentication and 34 | # PasswordAuthentication. Depending on your PAM configuration, 35 | # PAM authentication via ChallengeResponseAuthentication may bypass 36 | # the setting of "PermitRootLogin without-password". 37 | # If you just want the PAM account and session checks to run without 38 | # PAM authentication, then enable this but set PasswordAuthentication 39 | # and ChallengeResponseAuthentication to 'no'. 40 | UsePAM yes 41 | 42 | Subsystem sftp internal-sftp 43 | #Subsystem sftp internal-sftp -u 0002 44 | 45 | Match Group ftpaccess 46 | PasswordAuthentication yes 47 | ChrootDirectory %h 48 | X11Forwarding no 49 | AllowTcpForwarding no 50 | ForceCommand internal-sftp 51 | 52 | -------------------------------------------------------------------------------- /s3-backed-ftp/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | logfile=/var/log/supervisord.log 4 | 5 | [program:s3-fuse] 6 | command=/usr/local/s3-fuse.sh 7 | autorestart=false 8 | priority=1 9 | 10 | [program:sshd] 11 | command=/usr/sbin/sshd -D 12 | autorestart=true 13 | 14 | [program:vsftpd] 15 | command=/usr/sbin/vsftpd 16 | autorestart=true 17 | 18 | [program:add_users_in_container] 19 | command=/usr/local/add_users_in_container.sh 20 | autorestart=true 21 | -------------------------------------------------------------------------------- /s3-backed-ftp/users.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FTP_DIRECTORY="/home/aws/s3bucket/ftp-users" 4 | 5 | # Create a group for ftp users 6 | groupadd ftpaccess 7 | 8 | 9 | # Create a directory where all ftp/sftp users home directories will go 10 | mkdir -p $FTP_DIRECTORY 11 | chown root:root $FTP_DIRECTORY 12 | chmod 755 $FTP_DIRECTORY 13 | 14 | # Expecing an environment variable called USERS to look like "bob:hashedbobspassword steve:hashedstevespassword" 15 | for u in $USERS; do 16 | 17 | read username passwd <<< $(echo $u | sed 's/:/ /g') 18 | 19 | # User needs to be created every time since stopping the docker container gets rid of users. 20 | useradd -d "$FTP_DIRECTORY/$username" -s /usr/sbin/nologin $username 21 | usermod -G ftpaccess $username 22 | 23 | # set the users password 24 | echo $u | chpasswd -e 25 | 26 | if [ -z "$username" ] || [ -z "$passwd" ]; then 27 | echo "Invalid username:password combination '$u': please fix to create '$username'" 28 | continue 29 | elif [ -d "$FTP_DIRECTORY/$username" ]; then 30 | echo "Skipping creation of '$username' directory: already exists" 31 | 32 | # Directory exists but permissions for it have to be setup anyway. 33 | chown root:ftpaccess "$FTP_DIRECTORY/$username" 34 | chmod 750 "$FTP_DIRECTORY/$username" 35 | chown $username:ftpaccess "$FTP_DIRECTORY/$username/files" 36 | chmod 750 "$FTP_DIRECTORY/$username/files" 37 | else 38 | echo "Creating '$username' directory..." 39 | 40 | # Root must own all directories leading up to and including users home directory 41 | mkdir -p "$FTP_DIRECTORY/$username" 42 | chown root:ftpaccess "$FTP_DIRECTORY/$username" 43 | chmod 750 "$FTP_DIRECTORY/$username" 44 | 45 | # Need files sub-directory for SFTP chroot 46 | mkdir -p "$FTP_DIRECTORY/$username/files" 47 | chown $username:ftpaccess "$FTP_DIRECTORY/$username/files" 48 | chmod 750 "$FTP_DIRECTORY/$username/files" 49 | fi 50 | 51 | done 52 | -------------------------------------------------------------------------------- /s3-backed-ftp/vsftpd.conf: -------------------------------------------------------------------------------- 1 | # The default compiled in settings are fairly paranoid. This sample file 2 | # loosens things up a bit, to make the ftp daemon more usable. 3 | # Please see vsftpd.conf.5 for all compiled in defaults. 4 | # 5 | # READ THIS: This example file is NOT an exhaustive list of vsftpd options. 6 | # Please read the vsftpd.conf.5 manual page to get a full idea of vsftpd's 7 | # capabilities. 8 | # 9 | # 10 | # Run standalone? vsftpd can run either from an inetd or as a standalone 11 | # daemon started from an initscript. 12 | listen=YES 13 | 14 | # Run standalone with IPv6? 15 | # Like the listen parameter, except vsftpd will listen on an IPv6 socket 16 | # instead of an IPv4 one. This parameter and the listen parameter are mutually 17 | # exclusive. 18 | #listen_ipv6=YES 19 | 20 | # Allow anonymous FTP? (Disabled by default) 21 | anonymous_enable=NO 22 | 23 | # Uncomment this to allow local users to log in. 24 | local_enable=YES 25 | 26 | # Uncomment this to enable any form of FTP write command. 27 | write_enable=YES 28 | 29 | # Default umask for local users is 077. You may wish to change this to 022, 30 | # if your users expect that (022 is used by most other ftpd's) 31 | local_umask=022 32 | 33 | # Activate directory messages - messages given to remote users when they 34 | # go into a certain directory. 35 | dirmessage_enable=YES 36 | 37 | # If enabled, vsftpd will display directory listings with the time 38 | # in your local time zone. The default is to display GMT. The 39 | # times returned by the MDTM FTP command are also affected by this 40 | # option. 41 | use_localtime=YES 42 | 43 | # Activate logging of uploads/downloads. 44 | #xferlog_enable=YES 45 | 46 | # Make sure PORT transfer connections originate from port 20 (ftp-data). 47 | connect_from_port_20=YES 48 | 49 | # You may override where the log file goes if you like. The default is shown 50 | # below. 51 | #xferlog_file=/var/log/vsftpd.log 52 | 53 | # If you want, you can have your log file in standard ftpd xferlog format. 54 | # Note that the default log file location is /var/log/xferlog in this case. 55 | #xferlog_std_format=YES 56 | 57 | # You may change the default value for timing out an idle session. 58 | #idle_session_timeout=600 59 | 60 | # You may change the default value for timing out a data connection. 61 | #data_connection_timeout=120 62 | 63 | # It is recommended that you define on your system a unique user which the 64 | # ftp server can use as a totally isolated and unprivileged user. 65 | #nopriv_user=ftpsecure 66 | 67 | # Enable this and the server will recognise asynchronous ABOR requests. Not 68 | # recommended for security (the code is non-trivial). Not enabling it, 69 | # however, may confuse older FTP clients. 70 | #async_abor_enable=YES 71 | 72 | # By default the server will pretend to allow ASCII mode but in fact ignore 73 | # the request. Turn on the below options to have the server actually do ASCII 74 | # mangling on files when in ASCII mode. 75 | # Beware that on some FTP servers, ASCII support allows a denial of service 76 | # attack (DoS) via the command "SIZE /big/file" in ASCII mode. vsftpd 77 | # predicted this attack and has always been safe, reporting the size of the 78 | # raw file. 79 | # ASCII mangling is a horrible feature of the protocol. 80 | #ascii_upload_enable=YES 81 | #ascii_download_enable=YES 82 | 83 | # You may fully customise the login banner string: 84 | #ftpd_banner=Welcome to blah FTP service. 85 | 86 | # You may specify a file of disallowed anonymous e-mail addresses. Apparently 87 | # useful for combatting certain DoS attacks. 88 | #deny_email_enable=YES 89 | # (default follows) 90 | #banned_email_file=/etc/vsftpd.banned_emails 91 | 92 | # You may restrict local users to their home directories. See the FAQ for 93 | # the possible risks in this before using chroot_local_user or 94 | # chroot_list_enable below. 95 | #chroot_local_user=YES 96 | 97 | # You may specify an explicit list of local users to chroot() to their home 98 | # directory. If chroot_local_user is YES, then this list becomes a list of 99 | # users to NOT chroot(). 100 | # (Warning! chroot'ing can be very dangerous. If using chroot, make sure that 101 | # the user does not have write access to the top level directory within the 102 | # chroot) 103 | 104 | chroot_local_user=YES 105 | chroot_list_enable=NO 106 | allow_writeable_chroot=YES 107 | 108 | # This option should be the name of a directory which is empty. Also, the 109 | # directory should not be writable by the ftp user. This directory is used 110 | # as a secure chroot() jail at times vsftpd does not require filesystem 111 | # access. 112 | secure_chroot_dir=/home/aws/s3bucket/ftp-users 113 | 114 | #chroot_list_enable=YES 115 | # (default follows) 116 | #chroot_list_file=/etc/vsftpd.chroot_list 117 | 118 | # You may activate the "-R" option to the builtin ls. This is disabled by 119 | # default to avoid remote users being able to cause excessive I/O on large 120 | # sites. However, some broken FTP clients such as "ncftp" and "mirror" assume 121 | # the presence of the "-R" option, so there is a strong case for enabling it. 122 | #ls_recurse_enable=YES 123 | 124 | # Customization 125 | 126 | # Some of vsftpd's settings don't fit the filesystem layout by 127 | # default. 128 | 129 | # This string is the name of the PAM service vsftpd will use. 130 | pam_service_name=vsftpd 131 | 132 | # This option specifies the location of the RSA certificate to use for SSL 133 | # encrypted connections. 134 | rsa_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem 135 | # This option specifies the location of the RSA key to use for SSL 136 | # encrypted connections. 137 | rsa_private_key_file=/etc/ssl/private/ssl-cert-snakeoil.key 138 | 139 | pasv_address= 140 | pasv_enable=Yes 141 | pasv_min_port=30000 142 | pasv_max_port=30100 143 | port_enable=YES 144 | 145 | # Change ftp port from default 21 146 | #listen_port=21 147 | 148 | # Set a custom location for vsftpd log file 149 | #vsftpd_log_file=/var/log/vsftpd.log 150 | 151 | # Set up better log output 152 | xferlog_enable=YES 153 | xferlog_std_format=NO 154 | dual_log_enable=YES 155 | log_ftp_protocol=YES 156 | --------------------------------------------------------------------------------