├── examples ├── imagesize │ ├── Dockerfile.small │ └── Dockerfile.large ├── shifter-in-a-box │ ├── config │ │ ├── test.squashfs │ │ ├── test.sh │ │ ├── start.sh │ │ ├── imagemanager.json │ │ └── udiRoot.conf │ ├── up │ ├── Dockerfile │ └── README.md └── shifter │ ├── batch.sl │ ├── helloworld.c │ └── Dockerfile ├── 02-shifter.md ├── 00-intro.md ├── 05-use-cases.md ├── LICENSE ├── checklist.md ├── 06-wrap-up.md ├── setup.md ├── README.md ├── 03-hands-on.md ├── 04-optimizing.md └── 01-hands-on.md /examples/imagesize/Dockerfile.small: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN dd if=/dev/zero of=/largefile bs=4k count=10000 && rm /largefile 4 | -------------------------------------------------------------------------------- /examples/imagesize/Dockerfile.large: -------------------------------------------------------------------------------- 1 | FROM alpine:3.3 2 | 3 | RUN dd if=/dev/zero of=/largefile bs=4k count=10000 4 | 5 | RUN rm /largefile 6 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/config/test.squashfs: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NERSC/ecp-container-tutorial/master/examples/shifter-in-a-box/config/test.squashfs -------------------------------------------------------------------------------- /examples/shifter/batch.sl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH -N 1 -C haswell -p regular 3 | #SBATCH --reservation=ecp19containers 4 | #SBATCH --image docker:ubuntu 5 | 6 | srun -N 1 shifter /app/app.py 7 | -------------------------------------------------------------------------------- /02-shifter.md: -------------------------------------------------------------------------------- 1 | # Intro to Shifter and how it is different 2 | 3 | A brief presentation on how Shifter works and how images should be prepared. 4 | 5 | Slides can be found in the Google Docs folder. 6 | -------------------------------------------------------------------------------- /00-intro.md: -------------------------------------------------------------------------------- 1 | # Welcome and intro to Containers 2 | 3 | ## Setup! 4 | 5 | If you haven't already installed Docker on your laptop and created a DockerHub account, please do it now. Instructions are in [Setup](setup.md). 6 | -------------------------------------------------------------------------------- /05-use-cases.md: -------------------------------------------------------------------------------- 1 | # Uses Cases in Scientific Computing 2 | 3 | We will present some examples and best practices from real world use cases. 4 | 5 | 6 | ## Science Use Cases 7 | Slides can be found in the Google Docs folder. 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2017, The Regents of the University of California, through 2 | Lawrence Berkeley National Laboratory (subject to receipt of any required 3 | approvals from the U.S. Dept. of Energy). All rights reserved. 4 | -------------------------------------------------------------------------------- /checklist.md: -------------------------------------------------------------------------------- 1 | - Get reservation and training accounts 2 | - Run through material 3 | - update reservation info 4 | - add some material on singularity 5 | - run through material using play with docker 6 | 7 | - Prep training accounts 8 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/up: -------------------------------------------------------------------------------- 1 | docker tag scanon/shifterbox:17.11 scanon/shifterbox 2 | docker push scanon/shifterbox:17.11 3 | docker push scanon/shifterbox 4 | 5 | docker tag scanon/shifterbox:17.11 nersc/shifterbox:17.11 6 | docker push nersc/shifterbox:17.11 7 | docker push nersc/shifterbox 8 | 9 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/config/test.sh: -------------------------------------------------------------------------------- 1 | echo "Testing munge" 2 | munge -n > /dev/null 3 | if [ $? -ne 0 ] ; then 4 | echo "Munge not working" 5 | exit 6 | fi 7 | 8 | echo "Test pull" 9 | shifterimg pull busybox 10 | if [ $(shifterimg images|wc -l) -eq 0 ] ; then 11 | echo "Pull Failed" 12 | exit 1 13 | fi 14 | -------------------------------------------------------------------------------- /06-wrap-up.md: -------------------------------------------------------------------------------- 1 | ## Further activities at ECP 2 | - Container Breakout - 15:30 Founders I & II 3 | 4 | 5 | ## Bring us your problem 6 | - Have questions about Dockerizing your application? 7 | - Want to know more about installing Shifter at scale? 8 | - Come talk with us now, or email us at: 9 | - Shane Canon: scanon@lbl.gov 10 | -------------------------------------------------------------------------------- /examples/shifter/helloworld.c: -------------------------------------------------------------------------------- 1 | // Hello World MPI app 2 | #include 3 | #include 4 | 5 | int main(int argc, char** argv) { 6 | int size, rank; 7 | char buffer[1024]; 8 | 9 | MPI_Init(&argc, &argv); 10 | 11 | MPI_Comm_size(MPI_COMM_WORLD, &size); 12 | MPI_Comm_rank(MPI_COMM_WORLD, &rank); 13 | 14 | gethostname(buffer, 1024); 15 | 16 | printf("hello from %d of %d on %s\n", rank, size, buffer); 17 | 18 | MPI_Barrier(MPI_COMM_WORLD); 19 | 20 | MPI_Finalize(); 21 | return 0; 22 | } 23 | -------------------------------------------------------------------------------- /examples/shifter/Dockerfile: -------------------------------------------------------------------------------- 1 | # This example makes use of an Ubuntu-based NERSC base image 2 | # that already has MPI built and installed. 3 | # 4 | # This means the you just need to add your app code in and compile it. 5 | # 6 | # To build this example do: 7 | # docker build -t /hellompi:latest . 8 | # 9 | # And to test: 10 | # docker run -it --rm /hellompi:latest /app/hello 11 | 12 | FROM nersc/ubuntu-mpi:14.04 13 | 14 | ADD helloworld.c /app/ 15 | 16 | RUN cd /app && mpicc helloworld.c -o /app/hello 17 | 18 | ENV PATH=/usr/bin:/bin:/app:/usr/local/bin 19 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos 2 | 3 | RUN \ 4 | yum -y install epel-release && \ 5 | yum -y install mongodb-server mongodb wget 6 | 7 | RUN \ 8 | URL=http://portal.nersc.gov/project/bigdata/shifter/ && \ 9 | yum -y install $URL/shifter-18.03.0-1.nersc.el7.centos.x86_64.rpm \ 10 | $URL/shifter-imagegw-18.03.0-1.nersc.el7.centos.x86_64.rpm \ 11 | $URL/shifter-fasthash-18.03.0-1.nersc.el7.centos.x86_64.rpm \ 12 | $URL/shifter-runtime-18.03.0-1.nersc.el7.centos.x86_64.rpm 13 | 14 | 15 | RUN \ 16 | useradd auser -m && \ 17 | getent passwd >> /etc/shifter/shifter_etc_files/passwd 18 | 19 | ADD ./config/ /src/ 20 | RUN \ 21 | cp /src/imagemanager.json /src/udiRoot.conf /etc/shifter/ 22 | 23 | RUN yum -y install git && \ 24 | sed -i "s|'r:gz'|'r:gz', errors='replace'|" /usr/lib64/python2.7/site-packages/shifter_imagegw/dockerv2.py 25 | 26 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/config/start.sh: -------------------------------------------------------------------------------- 1 | if [ $(grep -c squash /proc/filesystems) -eq 0 ] ;then 2 | echo "Please load the squashfs module" 3 | exit 4 | fi 5 | 6 | [ -e /data/db ] || mkdir /data/db 7 | 8 | [ -e /data/images/cache ] || mkdir -p /data/images/cache 9 | 10 | echo "Starting services" 11 | mongod --smallfiles --fork --logpath=/var/log/mongodb/mongo.log --pidfilepath /mongo.pid & 12 | sleep 2 13 | if [ ! -e "/proc/$(cat /mongo.pid)/" ] ; then 14 | echo "Mongo not running" 15 | echo "Check for a stale lock file" 16 | exit 1 17 | fi 18 | 19 | dd if=/dev/urandom of=/etc/munge/munge.key count=1 20 | chown munge /etc/munge/munge.key 21 | chmod 600 /etc/munge/munge.key 22 | runuser -u munge -- /usr/sbin/munged --force -F & 23 | 24 | echo "Starting Shifter Services" 25 | /usr/bin/gunicorn -b 0.0.0.0:5000 --backlog 2048 --access-logfile=/var/log/shifter_imagegw/access.log --log-file=/var/log/shifter_imagegw/error.log shifter_imagegw.api:app >> shifter.log 2>&1 & 26 | 27 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/README.md: -------------------------------------------------------------------------------- 1 | # Shifter In A Box 2 | 3 | Shifter-in-a-box is intended to provide a functional installation of shifter that can be used for testing and experimentation of Shifter without 4 | installing it on a real system. 5 | 6 | The install uses a Centos:7 base os. The install steps can be seen in the Dockerfile. 7 | 8 | # Usage 9 | 10 | docker run -it --rm --privileged -v shifter:/data scanon/shifterbox 11 | 12 | At the prompt, you can start the services. 13 | 14 | [root@xxxxx /]$ /src/start.sh 15 | 16 | And test the services 17 | 18 | [root@xxxxx /]$ /src/test.sh 19 | 20 | To run a shifter container, you need to become the testuser (auser). 21 | 22 | [root@xxxxx /]$ su - auser 23 | [auser@xxxx /]$ shifterimg pull busybox 24 | [auser@xxxx /]$ shifter --image=busybox sh 25 | 26 | # Docker4Mac Users 27 | 28 | If you are using Docker4Mac then you will need to install the squashfs module. There 29 | is a separate Docker image for this. 30 | 31 | docker run -it --rm --privileged scanon/squashfs 32 | -------------------------------------------------------------------------------- /setup.md: -------------------------------------------------------------------------------- 1 | # Setup instructions 2 | 3 | ## Docker 4 | 5 | Docker has versions for Mac, Windows, and many flavors of Linux at https://www.docker.com/community-edition#/download 6 | 7 | ### Check if everything is working 8 | 9 | Run the following commands: 10 | 11 | ```bash 12 | $ docker run hello-world 13 | $ docker pull ubuntu:14.04 14 | $ docker pull nersc/ubuntu-mpi:14.04 15 | ``` 16 | 17 | ## Docker hub account 18 | 19 | Register for an account at https://cloud.docker.com/ 20 | (take note of your Docker ID and password). 21 | With docker installed, run 22 | ```bash 23 | $ docker login -u 24 | ``` 25 | and enter your password. 26 | 27 | ## SSH access to NERSC 28 | 29 | If you're using Linux or macOS, 30 | you already have the `ssh` client installed. 31 | 32 | On Windows, install [PuTTY](http://www.chiark.greenend.org.uk/~sgtatham/putty/latest.html) 33 | 34 | 35 | ## If you have problems installing Docker 36 | 37 | You can use http://play-with-docker.com/ to try Docker. 38 | We suggest using pay-with-docker only as a last resort. 39 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/config/imagemanager.json: -------------------------------------------------------------------------------- 1 | { 2 | "WorkerThreads":8, 3 | "DefaultLustreReplication": 1, 4 | "DefaultOstCount": 16, 5 | "DefaultImageLocation": "registry-1.docker.io", 6 | "DefaultImageFormat": "squashfs", 7 | "PullUpdateTimeout": 300, 8 | "ImageExpirationTimeout": "90:00:00:00", 9 | "MongoDBURI":"mongodb://localhost/", 10 | "MongoDB":"Shifter", 11 | "Broker":"redis://localhost/", 12 | "CacheDirectory": "/data/images/cache/", 13 | "ExpandDirectory": "/data/images/expand/", 14 | "admins":["root"], 15 | "ImportUsers":["root"], 16 | "Locations": { 17 | "registry-1.docker.io": { 18 | "remotetype": "dockerv2", 19 | "authentication": "http" 20 | } 21 | }, 22 | 23 | "Platforms": { 24 | "mycluster": { 25 | "mungeSocketPath": "/var/run/munge/munge.socket.2", 26 | "accesstype": "local", 27 | "admins": ["root"], 28 | "usergroupService": "local", 29 | "local": { 30 | "imageDir": "/data/images" 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Container Computing for HPC and Scientific Workflows - ECP Tutorial 2 | 3 | Container computing is revolutionizing the way applications are developed and delivered. It offers opportunities that never existed before for significantly improving efficiency of scientific workflows and easily moving these workflows from the laptop to the supercomputer. Tools like Docker, Shifter, Singularity and Charliecloud enable a new paradigm for scientific and technical computing. However, to fully unlock its potential, users and administrators need to understand how to utilize these new approaches. This tutorial will introduce attendees to the basics of creating container images, explain best practices, and cover more advanced topics such as creating images to be run on HPC platforms using three HPC Container runtimes. The tutorial will also explain how research scientists can utilize container-based computing to accelerate their research and how these tools can boost the impact of their research by enabling better reproducibility and sharing of their scientific process without compromising security. 4 | 5 | The content for the handouts will be posted and updated at [https://github.com/nersc/ecp-container-tutorial](https://github.com/nersc/ecp-container-tutorial). 6 | 7 | ## Prerequisites 8 | 9 | This is hands-on tutorial. Participants should bring a laptop and pre-install Docker in advance to make the best use of time during the tutorial (see the [Setup](setup.md) section for details). Users can also create a docker account in advance at [https://cloud.docker.com/](https://cloud.docker.com/). This account will be needed to create images on docker cloud and dockerhub. In addition, users should install an ssh client for their operating system so they can access the HPC resources we will use for the Shifter portion of the tutorials. 10 | 11 | For more detailed instructions, see [Setup](setup.md). 12 | 13 | ## Slides 14 | 15 | Slides can be found in the following [Google Drive Folder](https://drive.google.com/open?id=17ibGRs7HAZ8AOnLQUo1CTNFgShB-6Nph). 16 | 17 | 18 | ## Communication 19 | Please raise your hand if you need assistance. You can also ask questions on this [Google Doc](https://docs.google.com/document/d/1thz5sp7upnhy5PKi6SpS-C9NuGRHoY5lz0NSJy7_mJM/edit?usp=sharing). 20 | 21 | ## Feedback 22 | Please add comments to the Google Doc about this tutorial at the end of the session. 23 | 24 | ## Agenda 25 | 26 | - 14:00: [Welcome and Intro to Containers](00-intro.md) 27 | - 14:30: [First hands-on](01-hands-on.md) 28 | - 15:30: Break 29 | - Distribute NERSC logins. **Please obtain a NERSC login from tutorial staff during the break** 30 | 31 | - 15:45: Container runtimes and HPC 32 | - Overview of HPC runtimes (Shifter, Singularity and Charliecloud) 33 | - 16:00: Second hands-on - HPC Runtimes and SDK 34 | - [Shifter at NERSC](03-hands-on.md) 35 | - Charliecloud - Tim 36 | - Singularity Examples - Sameer 37 | - ECP SDK Demo - Sameer 38 | - 17:00: Optimizations, Advanced Use Cases and User Success Stories 39 | - [Optimizing Images](04-optimizing.md) 40 | - Considerations for running on GPUs and non-x86 Architectures - Adam Carlyle 41 | - [Science Use Cases](05-use-cases.md) 42 | - 17:30: [Wrap-Up](06-wrap-up.md) 43 | -------------------------------------------------------------------------------- /03-hands-on.md: -------------------------------------------------------------------------------- 1 | # Second hands-on - Shifter 2 | 3 | ## Logging in to NERSC 4 | 5 | Use ssh to connect to Cori. The username and password will be on the training account sheet. 6 | 7 | ```bash 8 | ssh @cori.nersc.gov 9 | ``` 10 | 11 | ## Pulling an image 12 | 13 | Pull an image using shifterimg. You can pull a standard image such as Ubuntu or an image you pushed to dockerhub in the previous session. 14 | 15 | ```bash 16 | shifterimg pull ubuntu:14.04 17 | # OR 18 | shifterimg pull scanon/shanetest:latest 19 | ``` 20 | 21 | ## Running an image interactively 22 | 23 | Use salloc and shifter to test the image. 24 | 25 | ```bash 26 | salloc -N 1 -C haswell -q regular --reservation=ecp19containers --image ubuntu:14.04 -A ntrain 27 | ... wait for prompt ... 28 | shifter bash 29 | ``` 30 | 31 | You should be able to browse inside the image and confirm that it matches what you pushed to dockerhub earlier. 32 | 33 | ```bash 34 | lsb_release -a 35 | ``` 36 | 37 | Once you are done exploring, exit out. 38 | ```bash 39 | exit 40 | exit 41 | ``` 42 | 43 | ## Submitting a Shifter batch job 44 | 45 | Now create a batch submission script and try running a batch job with shifter. Use vi or your other favorite editor to create the submission script or cat the contents into a file. 46 | 47 | ```bash 48 | cat << EOF > submit.sl 49 | #!/bin/bash 50 | #SBATCH -N 1 -C haswell 51 | #SBATCH --reservation=ecp19containers 52 | #SBATCH -q regular 53 | #SBATCH -A ntrain 54 | #SBATCH --image ubuntu:latest 55 | 56 | srun -N 1 shifter /app/app.py 57 | EOF 58 | ``` 59 | Use the Slurm sbatch command to submit the script. 60 | 61 | ```bash 62 | sbatch ./submit.sl 63 | ``` 64 | 65 | ## Running a parallel MPI job 66 | 67 | It is possible to run MPI jobs in Shifter and obtain native performance. There are several ways to achieve this. We will demonstrate one approach here. 68 | 69 | If you did not do so earlier, tag and push the MPI image you created earlier. 70 | 71 | ```bash 72 | docker tag hellompi mydockerid/hellompi 73 | docker push mydockerid/hellompi 74 | ``` 75 | 76 | Now, return to your Cori login, pull your image down and run it. 77 | 78 | ```bash 79 | shifterimg pull /hellompi:latest 80 | #Wait for it to complete 81 | salloc -N 2 -C haswell -q regular -A ntrain --reservation=ecp19containers --image /hellompi:latest 82 | # Wait for prepare_compilation_report 83 | # Cori has 32 physical cores per node with 2 hyper-threads per core. 84 | # So you can run up to 64 tasks per node. 85 | srun -N 2 -n 128 shifter /app/hello 86 | exit 87 | ``` 88 | 89 | If you have your own MPI applications, you can attempt to Docker-ize them using the steps above and run it on Cori. As a courtesy, limit your job sizes to leave sufficient resources for other participants. _Don't forget to exit from any "salloc" shells once you are done testing._ 90 | 91 | ## Using Volume mounts 92 | 93 | Like Docker, Shifter allows you to mount directories into your container. 94 | The syntax is similar to Docker but uses "--volume". Here we will mount a 95 | scratch directory into the volume as /data. 96 | 97 | ```bash 98 | mkdir $SCRATCH/input 99 | echo 3.141592 > $SCRATCH/input/data.txt 100 | shifter --volume $SCRATCH/input:/data --image=ubuntu bash 101 | cat /data/data.txt 102 | ``` 103 | -------------------------------------------------------------------------------- /04-optimizing.md: -------------------------------------------------------------------------------- 1 | # Optimizing your images 2 | 3 | ## Controlling layers and making builds faster 4 | How you construct your Dockerfile can have a big impact on your image sizes. Keeping images compact, decreases the time to pull an image down or convert images for use by Shifter. Here are a few tips to reduce image sizes. 5 | 6 | ### Cleanup within the construction of a layer 7 | Each RUN statement will result in a new layer. For example, let's look at the following. 8 | 9 | ``` 10 | RUN wget http://hostname.com/mycode.tgz 11 | RUN tar xzf mycode.tgz 12 | RUN cd mycode ; make; make install 13 | RUN rm -rf mycode.tgz mycode 14 | ``` 15 | 16 | This will result in four layers. Unfortunately, the cleanup line in the last RUN line will not reduce the amount of data that must be pulled from the Docker registry. Instead, it will mask or "white-out" the files so they don't appear when executing the image. 17 | 18 | In contrast, let's examine the following. 19 | 20 | ``` 21 | RUN wget http://hostname.com/mycode.tgz && \ 22 | tar xzf mycode.tgz && \ 23 | cd mycode && make && make install && \ 24 | rm -rf mycode.tgz mycode 25 | ``` 26 | 27 | This image size will be much smaller since the cleanup happens inside the layer construction. Also, notice the use of "&&" between commands. This is good practice so that failures are detected and will stop the build. Using a semi-colon like was done above means that the make could fail but the build would continue on. 28 | 29 | ## Multi-stage Builds (new in Docker 17.05) 30 | 31 | Multi-stage builds allow build to have multiple phases to create genereated files that can be copied into the final image. 32 | For example, one phase could install compilers that are used to generate compiled binaries and then those binaries could 33 | be copied into a more compact runtime image that wouldn't need to contain all the tools required for the build. This can 34 | result in smaller images. It can also be used to generate public images that make use of commercial tools such as compilers. 35 | 36 | Using this is fairly easy. The Dockerfile will contain multiple FROM stanzas and files can be copied from one stage to the 37 | next adding a --from argument to the COPY command. For example... 38 | 39 | ``` 40 | FROM centos:7 as build 41 | 42 | RUN yum -y install gcc make 43 | ADD code.c /src/code.c 44 | RUN gcc -o /src/mycode /src/code.c 45 | 46 | FROM centos:7 47 | COPY --from=build /src/mycode /usr/bin/mycode 48 | ``` 49 | 50 | The image is built as usual. 51 | 52 | ``` 53 | docker build -t myimage . 54 | ``` 55 | 56 | Even in this toy example, the image is over 100 MB smaller compared to the build image. 57 | 58 | ``` 59 | $ docker images|head -3 60 | REPOSITORY TAG IMAGE ID CREATED SIZE 61 | myimage latest c0fadb85b093 About a minute ago 197MB 62 | 4b2009ca9ca4 5 minutes ago 323MB 63 | ``` 64 | 65 | ## What goes in the image and what should stay out 66 | 67 | While Docker doesn't typically impose strict limits on image sizes, larger images are more prone to failure for a variety of reasons. Some Docker deployments may have limited disk space to store images. Pulling down large layers can trigger timeouts or other failures. Here are some best practices to follow. 68 | 69 | * Limit image sizes to a few GB. Avoid exceeding 10GB or more. 70 | * Avoid "Kitchen sink" images that contain extraneous applications or tools 71 | * Limit layer sizes using the technique above 72 | * Avoid including data sets in the image unless they are relatively small and static. Data can be mapped into the image using volume mounts (-v option). 73 | * Limit images to the specific target applications and only add what is needed to support that application. 74 | -------------------------------------------------------------------------------- /01-hands-on.md: -------------------------------------------------------------------------------- 1 | # Intro to Docker 2 | 3 | ## Pulling and running an existing image 4 | 5 | Pull a public image such as Ubuntu or Centos using the docker pull command. If a tag is not specified, docker will default to "latest". 6 | 7 | ```bash 8 | $ docker pull ubuntu:14.04 9 | ``` 10 | 11 | Now run the image using the docker run command. Use the "-it" option to get an interactive terminal during the run. 12 | 13 | ```bash 14 | $ docker run -it ubuntu:14.04 15 | $ whoami 16 | $ lsb_release -a 17 | ``` 18 | 19 | ## Creating and building a Dockerfile 20 | 21 | While there are different ways to build Docker images , the recommended approach is to use a Dockerfile since it provides a way to build images so that others can understand how the image was constructed and make modifications to the recipe. 22 | 23 | A Dockerfile has many options. We will focus on a few basic ones (FROM, MAINTAINER, ADD, and RUN) 24 | 25 | Start by making an empty directory. 26 | 27 | ``` 28 | mkdir mydockerimage 29 | ``` 30 | 31 | Create a simple shell script called `script` in your local directory using your favorite editor. 32 | 33 | ``` 34 | #!/bin/bash 35 | echo "Hello World! -- Me" 36 | ``` 37 | 38 | Now create a file called `Dockerfile` in the same directory like the following. Use your own name and e-mail for the maintainer label. 39 | 40 | ``` 41 | FROM ubuntu:14.04 42 | LABEL maintainer="patsmith" 43 | 44 | ADD ./script /bin/script 45 | RUN mv /bin/script /bin/hello && chmod a+rx /bin/hello 46 | ``` 47 | 48 | Now build the image using the docker build command. Be sure to use the `-t` option to tag it. Tell the Dockerfile to build using the current directory by specifying `.`. Alternatively you could place the Dockerfile and script in an alternate location and specify that directory in the docker build command. 49 | 50 | ```bash 51 | docker build -t hello:1.0 . 52 | ``` 53 | 54 | Try running the image. 55 | 56 | ```bash 57 | docker run -it hello:1.0 58 | hello 59 | ``` 60 | 61 | ## Pushing a Dockerfile to dockerhub 62 | 63 | Docker provides a public hub that can be use to store and share images. Before pushing an image, you will need to create an account at Dockerhub. Go to [https://cloud.docker.com/](https://cloud.docker.com/) to create the account. Once the account is created, push your test image using the docker push command. In this example, we will assume the username is patsmith. If you haven't done a `docker login` you may need to do that first. 64 | 65 | ```bash 66 | docker tag hello:1.0 patsmith/hello:1.0 67 | docker login 68 | docker push patsmith/hello:1.0 69 | ``` 70 | 71 | The first push make take some time depending on your network connection and the size of the image. 72 | 73 | ## Hands on Activity: MPI hello world 74 | 75 | Now that you've practiced loading a simple script, try creating an image that can run this short MPI hello word code: 76 | 77 | ```code 78 | // Hello World MPI app 79 | #include 80 | #include 81 | 82 | int main(int argc, char** argv) { 83 | int size, rank; 84 | char buffer[1024]; 85 | 86 | MPI_Init(&argc, &argv); 87 | 88 | MPI_Comm_size(MPI_COMM_WORLD, &size); 89 | MPI_Comm_rank(MPI_COMM_WORLD, &rank); 90 | 91 | gethostname(buffer, 1024); 92 | 93 | printf("hello from %d of %d on %s\n", rank, size, buffer); 94 | 95 | MPI_Barrier(MPI_COMM_WORLD); 96 | 97 | MPI_Finalize(); 98 | return 0; 99 | } 100 | ``` 101 | Hints: 102 | * You can start with the image "nersc/ubuntu-mpi:14.04". It already has MPI installed. 103 | * You compile with "mpicc helloworld.c -o /app/hello" 104 | 105 |
106 | Expand to see the answer 107 |

108 | 109 | Dockerfile: 110 | ```bash 111 | # MPI Dockerfile 112 | FROM nersc/ubuntu-mpi:14.04 113 | 114 | ADD helloworld.c /app/ 115 | 116 | RUN cd /app && mpicc helloworld.c -o /app/hello 117 | ``` 118 | 119 | docker build -t mydockerid/hellompi:latest . 120 | 121 | docker push mydockerid/hellompi:latest 122 | 123 | Log into the image and run the app: 124 | 125 | docker run -it mydockerid/hellompi:latest 126 | 127 | root@982d980864e5:/# mpirun -n 10 /app/hello 128 | hello from 3 of 10 on 982d980864e5 129 | 130 | hello from 4 of 10 on 982d980864e5 131 | 132 | hello from 7 of 10 on 982d980864e5 133 | 134 | hello from 9 of 10 on 982d980864e5 135 | 136 | hello from 2 of 10 on 982d980864e5 137 | 138 | hello from 5 of 10 on 982d980864e5 139 | 140 | hello from 8 of 10 on 982d980864e5 141 | 142 | hello from 0 of 10 on 982d980864e5 143 | 144 | hello from 6 of 10 on 982d980864e5 145 | 146 | hello from 1 of 10 on 982d980864e5 147 | 148 |

149 | 150 | ## Suprise: You are using Podman 151 | 152 | [Podman](https://podman.io/) is a drop in replacement for Docker. We have replaced docker with podman on the training 153 | systems and aliased `docker` to `podman`. Podman is configured with reduced privileges which provides 154 | improved security since this doesn't require running a daemon and doesn't require extra privileges 155 | requires with a typical Docker installation. 156 | 157 | ```bash 158 | (base) [tutorial@ip-172-31-3-250 ~]$ docker --version 159 | podman version 3.0.1 160 | ``` 161 | 162 | 163 | -------------------------------------------------------------------------------- /examples/shifter-in-a-box/config/udiRoot.conf: -------------------------------------------------------------------------------- 1 | # udiRoot.conf 2 | # 3 | # See https://github.com/NERSC/shifter/wiki/config-reference 4 | # 5 | # udiRoot.conf is read by shifter and most other related shifter utilities within 6 | # the udiRoot component. Unless udiRoot is built enabling particular options 7 | # udiRoot.conf must be owned by root, but readable by all users, or at least all 8 | # users you want accessing shifter. 9 | # 10 | # Configuration File Format 11 | # 12 | # The file configuration format is a basic key=value, however space seperated 13 | # strings can be used for multiple options. Multiple lines can be used if the 14 | # final character on the line is '\'. Items cannot be quoted to allow spaces 15 | # within the configuration option. 16 | # 17 | # Configuration File Options 18 | # 19 | #udiMount (required) 20 | # 21 | # Absolute path to where shifter should generate a mount point for its own use. 22 | # This path to this should be writable by root and should not be in use for other purposes. 23 | # 24 | # Recommended value: /var/udiMount 25 | udiMount=/var/udiMount 26 | 27 | #loopMount (required) 28 | # 29 | # Absolute path to where shifter should mount loop device filesystems. This path 30 | # should be writable by root and should not be in use for other purposes. 31 | # 32 | # Recommended value: /var/udiLoopMount 33 | loopMount=/var/udiLoopMount 34 | 35 | #imagePath (required) 36 | # 37 | # Absolute path to where shifter can find images. This path should be readable by 38 | # root. This path should be visible by all nodes in the system. It may be possible 39 | # to use some kind of rsyncing method to have this path be local on all systems, 40 | # but that may prove problematic if a user attempts to use an image while it is 41 | # being rsynced. Recommend using GPFS or lustre or similar. 42 | imagePath=/data/images 43 | 44 | # udiRootPath (required) 45 | # 46 | # Absolute path (can be a symlink) to where current version of udiRoot is 47 | # installed. This path is used to find all needed shifter-specific utilities 48 | # (shifter, shifterimg, setupRoot, unsetupRoot, mount, etc). 49 | # In general, this should match the installation ${prefix} used when building. 50 | # 51 | # Recommended value: /opt/shifter/default 52 | udiRootPath=/usr 53 | 54 | # sitePreMountHook 55 | # 56 | # Script to be run before bind-mounting the siteFs filesystems. This script needs 57 | # to be root owned and executable by root. It should create any directories on 58 | # the path to the mount point, but not the mount point itself (e.g., mkdir -p 59 | # global but not mkdir -p global/u1 if your siteFs path is /global/u1 ) 60 | # 61 | # Note that the script is executed within your udiMount directory and so all your 62 | # paths within the script should be relative to that. 63 | # 64 | # Recommended value: /etc/opt/nersc/udiRoot/premount.sh 65 | #sitePreMountHook=/etc/shifter/premount.sh 66 | 67 | # sitePostMountHook 68 | # 69 | # Script to be run after bind-mounting the siteFs filesystems. This script need to 70 | # be root owned and executable by root. It should do any work required after 71 | # performing the mounts, e.g., generating a symlink. 72 | # 73 | # Note that the script is executed within your udiMount directory and so all your 74 | # paths within the script should be relative to that. 75 | # 76 | # Recommended value: /etc/opt/nersc/udiRoot/postmount.sh 77 | #sitePostMountHook=/etc/shifter/postmount.sh 78 | 79 | #optUdiImage 80 | # 81 | # Absolute path to the udiImage directory to be bind-mounted onto /opt/udiImage. 82 | # This is typically pre-built with shifter to include an sshd, but you could add 83 | # other things if you so desire. 84 | # 85 | # Recommended value: /opt/shifter/udiRoot/default/deps/udiImage 86 | optUdiImage=/usr/libexec/shifter/opt/udiImage 87 | # 88 | # Absolute path to the files you want copied into /etc for every container. This 89 | # path must be root owned (including the files within), and it must contain, at 90 | # minimum, nsswitch.conf, passwd, group. 91 | # 92 | # Note that any files you put in this path will override whatever the user 93 | # included in their image. 94 | # 95 | # Recommended value: /opt/shifter/default/etc_files 96 | etcPath= 97 | 98 | 99 | #allowLocalChroot (0 or 1) 100 | # 101 | # shifter can be used to construct a "container" out a of local path instead of a 102 | # loop device filesystem. This can be useful if you have an unpacked layer you 103 | # want to examine, or to enable shifter services within an existing path. Setting 104 | # to 1 will allow this path-specified shifting, 0 will not. 105 | # 106 | # This must be enabled if the "ccm" emulation mode is desired. (ccm emulation is 107 | # effectively done with shifter --image=local:/ within the SLURM integration. 108 | 109 | 110 | #autoLoadKernelModule (0 or 1) 111 | # 112 | # Flag to determine if kernel modules can be automatically loaded by shifter if 113 | # required. This is typically limited to loop, squashfs, ext4 (and its dependencies) 114 | # 115 | # Recommend 0 if you already load loop, squashfs, and ext4 as part of node bootup 116 | # process. 117 | # 118 | # Recommend 1 if you want to let shifter load them for you. 119 | 120 | 121 | #mountUdiRootWritable (required) 122 | # 123 | # Flag to remount the udiMount VFS read-only after setup. This is typically only 124 | # needed for debugging, and should usually be set to 1. 125 | # 126 | # Recommended value: 1 127 | mountUdiRootWritable=1 128 | 129 | #maxGroupCount (required) 130 | # 131 | # Maximum number of groups to allow. If the embedded sshd is being used, then this 132 | # should be set to 31. This is used when preparing the /etc/group file, which is 133 | # a filtered version of the group file you provide to shifter. The filtering is 134 | # done because the libc utilities for parsing an /etc/group file are typically 135 | # more limited than the LDAP counterparts. Since LDAP is not usable within 136 | # shifter, a filtered group file is used. 137 | # 138 | # Recommended value: 31 139 | maxGroupCount=31 140 | 141 | #modprobePath 142 | # 143 | # Absolute path to known-good modprobe 144 | modprobePath=/usr/sbin/modprobe 145 | 146 | #insmodPath (required) 147 | # 148 | # Absolute path to known-good insmod 149 | insmodPath=/usr/sbin/insmod 150 | 151 | #cpPath (required) 152 | # 153 | # Absolute path to known-good cp 154 | cpPath=/usr/bin/cp 155 | 156 | #mvPath (required) 157 | # 158 | # Absolute path to known-good mv 159 | mvPath=/usr/bin/mv 160 | 161 | #chmodPath 162 | # 163 | # Absolute path to known-good chmod 164 | chmodPath=/usr/bin/chmod 165 | 166 | #ddPath 167 | # 168 | # Absolute path to known-good dd 169 | ddPath=/usr/bin/dd 170 | 171 | #mkfsXfsPath 172 | # 173 | # Absolute path to known-good mkfs.xfs 174 | # mkfsXfsPath=/usr/sbin/mkfs.xfs 175 | 176 | #rootfsType 177 | # 178 | # The filesystem type to use for setting up the shifter VFS layer. This is 179 | # typically just tmpfs. On cray compute nodes (CLE 5.2), tmpfs will not work, 180 | # instead use ramfs. 181 | # 182 | # Recommended value: tmpfs 183 | rootfsType=tmpfs 184 | 185 | #gatewayTimeout (optional) 186 | # 187 | # Time in seconds to wait for the imagegw to respond before failing over to next 188 | # (or failing). 189 | 190 | #kmodBasePath 191 | # 192 | # Optional absolute path to where kernel modules are accessible -- up-to-but-not- 193 | # including the kernel version directory. On many systems this will be /lib/modules, 194 | # on Cray systems where kernel modules are built as part of the installation process, 195 | # point to where you installed them (possibly under /opt/shifter/udiRoot/default/kmod) 196 | # 197 | # This is not necessary if autoLoadKernelModule is 0, and is required if it is 1. 198 | 199 | 200 | #kmodCacheFile (deprecated) 201 | # 202 | #Do not use. 203 | 204 | 205 | #siteFs 206 | # 207 | # Semicolon seperated list of paths to be automatically bind-mounted into the 208 | # container. This is typically used to make network filesystems accessible 209 | # within the container, but could be used to allow certain other facilities, 210 | # like /var/run or /var/spool/alps to be accessible within the image (depending 211 | # on your needs). 212 | # 213 | # Do not attempt to bind things under /usr or other common critical paths within 214 | # containers. 215 | # 216 | # It is OK to perform this under /var or /opt or a novel path that your site 217 | # maintains (e.g., for NERSC, /global). 218 | siteFs=/home:/home 219 | 220 | 221 | #siteEnv 222 | # 223 | # Space seperated list of environment variables to automatically set (or add, or 224 | # replace) when a shifter container is setup. 225 | # 226 | # This can be useful if network home directories are mounted into the container 227 | # and you users want a way to prevent their localized dotfiles from running. (e.g., 228 | # do not execute if SHIFTER_RUNTIME is set). 229 | # 230 | siteEnv=SHIFTER_RUNTIME=1 231 | 232 | 233 | #siteEnvAppend 234 | # 235 | # Space seperated list of environment variables to automatically append (or add) 236 | # when a shifter container is setup. This only makes sense for colon seperated 237 | # environment variables, .e.g, PATH. 238 | # 239 | # This can be used if your site patches in a path that you want to appear in the 240 | # path. Recommend that all binaries are compatible with all containers, i.e., are 241 | # statically linked, to ensure they work. 242 | # 243 | siteEnvAppend=PATH=/opt/udiImage/bin 244 | 245 | 246 | # siteEnvPrepend 247 | # 248 | # Space seperated list of environment variables to automatically prepend (or add) 249 | # when a shifter container is setup. This only makes sense for colon seperated 250 | # environment variables, e.g., PATH. 251 | # 252 | # This can be used if your site patches in a path that you want to appear in the 253 | # path. Recommend that all binaries are compatible with all containers, i.e., are 254 | # statically linked, to ensure they work. 255 | # 256 | #siteEnvPrepend=PATH=/opt/udiImage/bin 257 | 258 | 259 | #imageGateway 260 | # 261 | # Space seperated URLs for your imagegw. Used by shifterimg and SLURM batch 262 | # integration to communicate with the imagegw. 263 | # batchType (optional) 264 | # 265 | # Used by batch integration code to pick useful settings. May be deprecated in 266 | # the future as it is not necessary at this point. 267 | # 268 | imageGateway=http://localhost:5000 269 | 270 | 271 | #system (required) 272 | # 273 | # Name of your system, e.g., edison or cori. This name must match a configured 274 | # system in the imagegw. This is primarily used by shifterimg to self-identify 275 | # which system it is representing. 276 | # 277 | system=mycluster 278 | 279 | #defaultImageType (required) 280 | # 281 | # Default image type for lookups and user input parsing. This should almost 282 | # certainly be set to "docker" for most systems. 283 | # 284 | defaultImageType=docker 285 | 286 | 287 | #siteResources (optional) deprecated 288 | # 289 | # Absolute path to where site-specific resources will be bind-mounted inside the 290 | # container to enable features like native MPI or GPU support. 291 | # This configuration only affects the container. The specified path will be automatically 292 | # created inside the container. The specified path doesn't need to exist on the host. 293 | # 294 | #siteResources=/opt/shifter/site-resources 295 | etcPath=/etc/shifter/shifter_etc_files/ 296 | --------------------------------------------------------------------------------