├── .github ├── PERUSEME └── workflows │ └── main.yml ├── LICENSE ├── Makefile.am ├── NOTICE ├── README.rst ├── VERSION ├── autogen.sh ├── bin ├── Makefile.am ├── ch-checkns.c ├── ch-completion.bash ├── ch-convert ├── ch-fromhost ├── ch-image.py.in ├── ch-run-oci.py.in ├── ch-run.c ├── ch-test ├── ch_core.c ├── ch_core.h ├── ch_fuse.c ├── ch_fuse.h ├── ch_misc.c └── ch_misc.h ├── configure.ac ├── doc ├── Makefile.am ├── _loc.rst ├── best_practices.rst ├── bugs.rst ├── ch-checkns.rst ├── ch-completion.bash.rst ├── ch-convert.rst ├── ch-fromhost.rst ├── ch-image.rst ├── ch-run-oci.rst ├── ch-run.rst ├── ch-test.rst ├── charliecloud.rst ├── conf.py ├── dev.rst ├── faq.rst ├── favicon.ico ├── index.rst ├── install.rst ├── logo-sidebar.png ├── make-deps-overview ├── man │ └── README ├── publish ├── py_env.rst ├── rd100-winner.png ├── see_also.rst └── tutorial.rst ├── examples ├── .dockerignore ├── Dockerfile.almalinux_8ch ├── Dockerfile.centos_7ch ├── Dockerfile.debian_11ch ├── Dockerfile.libfabric ├── Dockerfile.mpich ├── Dockerfile.nvidia ├── Dockerfile.openmpi ├── Makefile.am ├── chtest │ ├── Build │ ├── Makefile │ ├── bind_priv.py │ ├── chroot-escape.c │ ├── dev_proc_sys.py │ ├── fs_perms.py │ ├── mknods.c │ ├── printns │ ├── setgroups.c │ ├── setuid.c │ └── signal_out.py ├── copy │ ├── Dockerfile │ ├── dirA │ │ └── fileAa │ ├── dirB │ │ ├── fileBa │ │ └── fileBb │ ├── dirCa │ │ └── dirCb │ │ │ ├── fileCba │ │ │ └── fileCbb │ ├── dirD │ │ └── fileDa │ ├── dirEa │ │ └── dirEb │ │ │ ├── fileEba │ │ │ └── fileEbb │ ├── dirF │ │ ├── dir19a2 │ │ │ ├── dir19b2 │ │ │ │ └── file19c1 │ │ │ ├── dir19b3 │ │ │ │ └── file19c1 │ │ │ ├── file19b2 │ │ │ └── file19b3 │ │ ├── dir19a3 │ │ │ └── file19b1 │ │ ├── file19a2 │ │ └── file19a3 │ ├── dirG │ │ ├── diry │ │ │ └── file_ │ │ ├── filey │ │ ├── s_dir1 │ │ ├── s_dir4 │ │ │ └── file_ │ │ ├── s_file1 │ │ └── s_file4 │ │ │ └── file_ │ ├── fileA │ ├── fileB │ └── test.bats ├── distroless │ ├── Dockerfile │ ├── hello.py │ └── test.bats ├── exhaustive │ ├── Dockerfile │ └── test.bats ├── hello │ ├── Dockerfile │ ├── README │ ├── hello.sh │ └── test.bats ├── lammps │ ├── Dockerfile │ ├── melt.patch │ ├── simple.patch │ └── test.bats ├── lustre │ ├── Dockerfile │ └── test.bats ├── mpibench │ ├── Dockerfile.mpich │ ├── Dockerfile.openmpi │ └── test.bats ├── mpihello │ ├── Dockerfile.mpich │ ├── Dockerfile.openmpi │ ├── Makefile │ ├── hello.c │ ├── slurm.sh │ └── test.bats ├── multistage │ ├── Dockerfile │ └── test.bats ├── obspy │ ├── Dockerfile │ ├── README │ ├── hello.py │ ├── obspy.png │ └── test.bats ├── paraview │ ├── Dockerfile │ ├── cone.2ranks.vtk │ ├── cone.nranks.vtk │ ├── cone.png │ ├── cone.py │ ├── cone.serial.vtk │ └── test.bats ├── seccomp │ ├── Dockerfile │ ├── mknods.c │ └── test.bats ├── spack │ ├── Dockerfile │ ├── libfuse.patch │ ├── packages.yaml │ └── test.bats └── spark │ ├── Dockerfile │ ├── slurm.sh │ └── test.bats ├── lib ├── Makefile.am ├── base.sh ├── build.py ├── build_cache.py ├── charliecloud.py ├── filesystem.py ├── force.py ├── image.py ├── misc.py ├── modify.py ├── pull.py ├── push.py └── registry.py ├── misc ├── Makefile.am ├── branches-tidy ├── grep ├── loc ├── m4 │ ├── README │ ├── ax_check_compile_flag.m4 │ ├── ax_compare_version.m4 │ ├── ax_pthread.m4 │ └── ax_with_prog.m4 └── version ├── packaging ├── Makefile.am ├── README ├── fedora │ ├── build │ ├── charliecloud.rpmlintrc │ ├── charliecloud.spec │ ├── el7-pkgdir.patch │ ├── printf.patch │ └── upstream.spec └── requirements.txt └── test ├── .dockerignore ├── Build.centos7xz ├── Build.docker_pull ├── Build.missing ├── Dockerfile.argenv ├── Dockerfile.file-quirks ├── Dockerfile.metadata ├── Dockerfile.ocimanifest ├── Dockerfile.quick ├── Makefile.am ├── approved-trailing-whitespace ├── bucache ├── a-fail.df ├── a.df ├── argenv-special.df ├── argenv.df ├── argenv2.df ├── b.df ├── c.df ├── copy.df ├── difficult.df ├── force.df ├── from.df └── rsync.df ├── build ├── 10_sanity.bats ├── 40_pull.bats ├── 50_ch-image.bats ├── 50_dockerfile.bats ├── 50_localregistry.bats ├── 50_misc.bats ├── 50_rsync.bats ├── 55_cache.bats ├── 60_force.bats └── 99_cleanup.bats ├── common.bash ├── docs-sane.py.in ├── doctest-auto ├── doctest.py.in ├── fixtures ├── README └── empty-file ├── force-auto.py.in ├── make-auto.d ├── build.bats.in ├── build_custom.bats.in ├── builder_to_archive.bats.in └── unpack.bats.in ├── make-perms-test.py.in ├── old-storage ├── order-py.py.in ├── registry-config.yml ├── run ├── build-rpms.bats ├── ch-convert.bats ├── ch-fromhost.bats ├── ch-run_escalated.bats ├── ch-run_isolation.bats ├── ch-run_join.bats ├── ch-run_misc.bats └── ch-run_uidgid.bats ├── run_first.bats ├── sotest ├── files_inferrable.txt ├── libsotest.c └── sotest.c ├── unused ├── echo-euid.c └── su_wrap.py └── whiteout /.github/PERUSEME: -------------------------------------------------------------------------------- 1 | [This file is not called README because files named .github/README.* get 2 | picked up by GitHub and used as the main project README.] 3 | 4 | This directory defines our GitHub Actions test suite setup. 5 | 6 | The basic strategy is to start one “job” per builder; these run in parallel. 7 | Each job then cycles through several different configurations, which vary per 8 | builder. It is configured to “fail fast”, i.e., if one of the jobs fails, the 9 | others will be immediately cancelled. For example, we only run the quick test 10 | suite on one builder, but if it fails everything will stop and you still get 11 | notified quickly. 12 | 13 | The number of concurrent jobs is not clear to me, but I’ve seen 7 and the 14 | documentation [1] implies it’s at least 20 (though I assume there is some 15 | global limit for OSS projects too). Nominally, jobs are started from the left 16 | side of the list, so anything we think is likely to fail fast (e.g., the quick 17 | scope) should be leftward; in practice it seems to be random. 18 | 19 | We could add more matrix dimensions, but then we’d have to deal with ordering 20 | more carefully, and pass the Docker cache manually (or not use it for some 21 | things). 22 | 23 | [1]: https://docs.github.com/en/free-pro-team@latest/actions/reference/usage-limits-billing-and-administration 24 | 25 | Conventions: 26 | 27 | * We install everything to start, then uninstall as needed for more 28 | bare-bones tests. 29 | 30 | * For the “extra things’ tests: 31 | 32 | * Docker is the fastest builder, so that’s where we put extra things. 33 | 34 | * We need to retain sudo for uninstalling stuff. 35 | 36 | * I could not figure out how to set a boolean variable for use in “if” 37 | conditions. (I *did* get an environment variable to work, but not using 38 | our set/unset convention, rather the strings “true” and “false”. This 39 | seemed error-prone.) Therefore the extra things tests all use the full 40 | expression. 41 | 42 | Miscellaneous notes and gotchas: 43 | 44 | * Runner specs (as of 2020-11-25), nominal: Azure Standard_DS2_v2 virtual 45 | machine: 2 vCPUs, 7 GiB RAM, 15 GiB SSD storage. The OS image is 46 | bare-bones but there is a lot of software installed in third-party 47 | locations [1]. 48 | 49 | Looking at the actual VM provisioned, the disk specs are a little 50 | different: it’s got an 84GiB root filesystem mounted, and another 9GiB 51 | mounted on /mnt. With a little deleting, maybe we can make room for a 52 | full-scope test. 53 | 54 | It does seem to boot faster than Travis; overall performance is worse; but 55 | total test time is lower (Travis took 50–55 minutes to complete a passing 56 | build). 57 | 58 | [1]: https://github.com/actions/virtual-environments/blob/ubuntu20/20201116.1/images/linux/Ubuntu2004-README.md 59 | 60 | * The default shell (Bash) does not read any init files [1], so you cannot 61 | configure it with e.g. .bashrc. 62 | 63 | [1]: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions#using-a-specific-shell 64 | 65 | * GitHub doesn’t seem to notice our setup if .github is a symlink. :( 66 | 67 | * GitHub seems to want us to encapsulate some of the steps that are now just 68 | shell scripts into “actions”. I haven’t looked into this. Issue #914. 69 | 70 | * Force-push does start a new build. 71 | 72 | * Commands in “run” blocks aren’t logged by default; you need “set -x” if 73 | you want to see them. However there seems to be a race condition, so the 74 | commands and their output aren’t always interleaved correctly. 75 | 76 | * “docker” does not require “sudo”. 77 | 78 | * There are several places where we configure, make, make install. These 79 | need to be kept in sync. Perhaps there is an opportunity for an “Action” 80 | here? But the configure output validation varies. 81 | 82 | * The .github directory doesn’t have a Makefile.am; the files are listed in 83 | the root Makefile.am. 84 | 85 | * Most variables are strings. It’s easy to get into a situation where you 86 | set a variable to “false” but it’s the string “false” so it’s true. 87 | 88 | * Viewing step output is glitchy: 89 | 90 | * While the job is in progress, sometimes the step headings are links and 91 | sometimes they aren’t. 92 | 93 | * If it does work, you can’t scroll back to the start. 94 | 95 | * The “in progress” throbber seems to often be on the wrong heading. 96 | 97 | * When it’s over, sometimes clicking on a heading opens it but the content 98 | is blank; in this case, clicking a different job and coming back seems 99 | to fix things. 100 | 101 | * Previously we listed $CH_TEST_TARDIR and $CH_TEST_IMGDIR between phases. I 102 | didn’t transfer that over. It must have been useful, so let’s pay 103 | attention to see if it needs to be re-added. 104 | -------------------------------------------------------------------------------- /Makefile.am: -------------------------------------------------------------------------------- 1 | SUBDIRS = lib bin doc examples misc packaging test 2 | 3 | # The CI stuff isn't really relevant for the tarballs, but they should 4 | # have complete source code. 5 | EXTRA_DIST = .github/PERUSEME .github/workflows/main.yml 6 | 7 | EXTRA_DIST += LICENSE README.rst VERSION autogen.sh 8 | 9 | # Embedded paths are in the source code suitable for running from the source 10 | # directory (i.e., without install). When installing, those paths are often 11 | # wrong, so re-write them with the correct paths we got from configure. Note: 12 | # Some variables are in both Python and sh, so we use syntax valid for both; 13 | # others are just sh. 14 | install-exec-hook: 15 | @echo '### re-writing embedded paths ###' 16 | for i in $(DESTDIR)@bindir@/ch-convert \ 17 | $(DESTDIR)@bindir@/ch-fromhost \ 18 | $(DESTDIR)@bindir@/ch-image \ 19 | $(DESTDIR)@bindir@/ch-run-oci \ 20 | $(DESTDIR)@bindir@/ch-test \ 21 | $(DESTDIR)@libdir@/charliecloud/base.sh \ 22 | $(DESTDIR)@libexecdir@/charliecloud/doctest; \ 23 | do \ 24 | sed -Ei -e 's|^(ch_lib ?= ?).+/lib"?$$|\1"@libdir@/charliecloud"|' \ 25 | -e 's|^(CHTEST_DIR=).+$$|\1@libexecdir@/charliecloud|' \ 26 | -e 's|^(CHTEST_EXAMPLES_DIR=).+$$|\1@docdir@/examples|' \ 27 | $$i; \ 28 | done 29 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Charliecloud is copyright © Triad National Security, LLC and others. 2 | 3 | This software was produced in part under U.S. Government contract 4 | 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated 5 | by Triad National Security, LLC for the U.S. Department of Energy/National 6 | Nuclear Security Administration. 7 | 8 | The Government is granted for itself and others acting on its behalf a 9 | nonexclusive, paid-up, irrevocable worldwide license in this material to 10 | reproduce, prepare derivative works, distribute copies to the public, perform 11 | publicly and display publicly, and to permit others to do so. 12 | 13 | Neither the government nor Triad National Security, LLC makes any warranty, 14 | express or implied, or assumes any liability for use of this software. 15 | 16 | If software is modified to produce derivative works, such derivative works 17 | should be clearly marked, so as not to confuse it with the version available 18 | from LANL. 19 | 20 | LA-CC 14-096 21 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Charliecloud is a lightweight, fully unprivileged container implementation for 2 | HPC applications. The project is now hosted on GitLab: 3 | https://gitlab.com/charliecloud/main 4 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 0.39~pre 2 | -------------------------------------------------------------------------------- /autogen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | lark_version=1.1.9 5 | 6 | while [[ "$#" -gt 0 ]]; do 7 | case $1 in 8 | --clean) 9 | clean=yes 10 | ;; 11 | --no-lark) 12 | lark_no_install=yes 13 | ;; 14 | --rm-lark) 15 | lark_shovel=yes 16 | ;; 17 | *) 18 | help=yes 19 | ;; 20 | esac 21 | shift 22 | done 23 | 24 | if [[ $help ]]; then 25 | cat <&1 109 | echo 'hint: Install "wheel" and then re-run with "--rm-lark"?' 2>&1 110 | exit 1 111 | fi 112 | set +x 113 | echo 114 | echo 'Done. Now you can "./configure".' 115 | fi 116 | 117 | -------------------------------------------------------------------------------- /bin/Makefile.am: -------------------------------------------------------------------------------- 1 | # Bugs in this Makefile: 2 | # 3 | # 1. $(EXEEXT) not included for scripts. 4 | 5 | ## C programs 6 | 7 | bin_PROGRAMS = ch-checkns ch-run 8 | 9 | ch_checkns_SOURCES = ch-checkns.c ch_misc.h ch_misc.c 10 | 11 | ch_run_SOURCES = ch-run.c ch_core.h ch_core.c ch_misc.h ch_misc.c 12 | if HAVE_LIBSQUASHFUSE 13 | ch_run_SOURCES += ch_fuse.h ch_fuse.c 14 | endif 15 | 16 | # additional build flags for ch-run 17 | ch_run_CFLAGS = $(PTHREAD_CFLAGS) 18 | ch_run_LDADD = $(CH_RUN_LIBS) 19 | 20 | 21 | ## Shell scripts - distributed as-is 22 | 23 | dist_bin_SCRIPTS = ch-convert \ 24 | ch-fromhost \ 25 | ch-test 26 | 27 | 28 | ## Python scripts - need text processing 29 | 30 | bin_SCRIPTS = ch-run-oci # scripts to build 31 | EXTRA_SCRIPTS = ch-image # more scripts that *may* be built 32 | if ENABLE_CH_IMAGE 33 | bin_SCRIPTS += ch-image 34 | endif 35 | EXTRA_DIST = ch-image.py.in ch-run-oci.py.in 36 | CLEANFILES = $(bin_SCRIPTS) $(EXTRA_SCRIPTS) 37 | 38 | ch-image: ch-image.py.in 39 | ch-run-oci: ch-run-oci.py.in 40 | 41 | $(bin_SCRIPTS): %: %.py.in 42 | rm -f $@ 43 | sed -E 's|%PYTHON_SHEBANG%|@PYTHON_SHEBANG@|' < $< > $@ 44 | chmod +rx,-w $@ # respects umask 45 | -------------------------------------------------------------------------------- /bin/ch_core.h: -------------------------------------------------------------------------------- 1 | /* Copyright © Triad National Security, LLC, and others. 2 | 3 | This interface contains Charliecloud's core containerization features. */ 4 | 5 | #define _GNU_SOURCE 6 | #include 7 | 8 | 9 | /** Types **/ 10 | 11 | enum bind_dep { 12 | BD_REQUIRED, // both source and destination must exist 13 | BD_OPTIONAL, // if either source or destination missing, do nothing 14 | BD_MAKE_DST, // source must exist, try to create destination if it doesn't 15 | }; 16 | 17 | struct bind { 18 | char *src; 19 | char *dst; 20 | enum bind_dep dep; 21 | }; 22 | 23 | enum img_type { 24 | IMG_DIRECTORY, // normal directory, perhaps an external mount of some kind 25 | IMG_SQUASH, // SquashFS archive file (not yet mounted) 26 | IMG_NAME, // name of image in storage 27 | IMG_NONE, // image type is not set yet 28 | }; 29 | 30 | struct container { 31 | struct bind *binds; 32 | gid_t container_gid; // GID to use in container 33 | uid_t container_uid; // UID to use in container 34 | bool env_expand; // expand variables in --set-env 35 | char *host_home; // if --home, host path to user homedir, else NULL 36 | char *img_ref; // image description from command line 37 | char *newroot; // path to new root directory 38 | bool join; // is this a synchronized join? 39 | int join_ct; // number of peers in a synchronized join 40 | pid_t join_pid; // process in existing namespace to join 41 | char *join_tag; // identifier for synchronized join 42 | char *overlay_size; // size of overlaid tmpfs (NULL for no overlay) 43 | bool private_passwd; // don't bind custom /etc/{passwd,group} 44 | bool private_tmp; // don't bind host's /tmp 45 | enum img_type type; // directory, SquashFS, etc. 46 | bool writable; // re-mount image read-write 47 | }; 48 | 49 | 50 | /** Function prototypes **/ 51 | 52 | void containerize(struct container *c); 53 | enum img_type image_type(const char *ref, const char *images_dir); 54 | char *img_name2path(const char *name, const char *storage_dir); 55 | void run_user_command(char *argv[], const char *initial_dir); 56 | #ifdef HAVE_SECCOMP 57 | void seccomp_install(void); 58 | #endif 59 | -------------------------------------------------------------------------------- /bin/ch_fuse.h: -------------------------------------------------------------------------------- 1 | /* Copyright © Triad National Security, LLC, and others. */ 2 | 3 | #define _GNU_SOURCE 4 | 5 | /** Function prototypes **/ 6 | 7 | void sq_fork(struct container *c); 8 | -------------------------------------------------------------------------------- /doc/Makefile.am: -------------------------------------------------------------------------------- 1 | # This Makefile started with the default Makefile produced by the Sphinx 2 | # initialization process, which we then modified over time. During the 3 | # Automake-ification, I stripped out most of the boilderplate and left only 4 | # the targets that we use. 5 | 6 | # We turn off parallel build in doc: 7 | # 8 | # 1. Sphinx handles building the whole documentation internally already, as 9 | # a unit, so we shouldn't call sphinx-build more than once for different 10 | # output files at all, let alone in parallel. 11 | # 12 | # 2. Serial build is plenty fast. 13 | # 14 | # 3. There is a race condition in Sphinx < 1.6.6 that's triggered when two 15 | # instances (e.g., for html and man targets) try to "mkdir doctrees" 16 | # simultaneously. See issue #115. 17 | # 18 | # This special target was introduced in GNU Make 3.79, in April 2000. 19 | .NOTPARALLEL: 20 | 21 | EXTRA_DIST = \ 22 | _loc.rst \ 23 | best_practices.rst \ 24 | bugs.rst \ 25 | charliecloud.rst \ 26 | ch-checkns.rst \ 27 | ch-completion.bash.rst \ 28 | ch-convert.rst \ 29 | ch-fromhost.rst \ 30 | ch-image.rst \ 31 | ch-run.rst \ 32 | ch-run-oci.rst \ 33 | ch-test.rst \ 34 | conf.py \ 35 | dev.rst \ 36 | faq.rst \ 37 | favicon.ico \ 38 | index.rst \ 39 | install.rst \ 40 | logo-sidebar.png \ 41 | make-deps-overview \ 42 | man/README \ 43 | py_env.rst \ 44 | rd100-winner.png \ 45 | see_also.rst \ 46 | tutorial.rst 47 | 48 | if ENABLE_MAN 49 | man_MANS = \ 50 | man/charliecloud.7 \ 51 | man/ch-checkns.1 \ 52 | man/ch-completion.bash.7 \ 53 | man/ch-convert.1 \ 54 | man/ch-fromhost.1 \ 55 | man/ch-image.1 \ 56 | man/ch-run.1 \ 57 | man/ch-run-oci.1 \ 58 | man/ch-test.1 59 | endif 60 | 61 | if ENABLE_HTML 62 | nobase_html_DATA = \ 63 | html/searchindex.js \ 64 | html/_images/rd100-winner.png \ 65 | html/best_practices.html \ 66 | html/ch-checkns.html \ 67 | html/ch-completion.bash.html \ 68 | html/ch-convert.html \ 69 | html/ch-fromhost.html \ 70 | html/ch-image.html \ 71 | html/ch-run.html \ 72 | html/ch-run-oci.html \ 73 | html/ch-test.html \ 74 | html/command-usage.html \ 75 | html/dev.html \ 76 | html/faq.html \ 77 | html/index.html \ 78 | html/install.html \ 79 | html/search.html \ 80 | html/tutorial.html 81 | endif 82 | 83 | 84 | # NOTE: ./html might be a Git checkout to support "make web", so make sure not 85 | # to delete it. 86 | CLEANFILES = $(man_MANS) $(nobase_html_DATA) \ 87 | _deps.rst html/.buildinfo html/.nojekyll 88 | if ENABLE_HTML 89 | # Automake can't remove directories. 90 | clean-local: 91 | rm -Rf doctrees html/_sources html/_static html/_images 92 | endif 93 | 94 | # Automake can't install and uninstall directories. _static contains around 95 | # one hundred files in several directories, and I'm pretty sure the contents 96 | # change depending on Sphinx version and other details, so we can't just list 97 | # the files. These targets deal with it as an opaque directory. The _sources 98 | # directory is another generated directory that contains references to the 99 | # input .rst files which we need for searching to work so we give it a similar 100 | # treatment. 101 | if ENABLE_HTML 102 | install-data-hook: 103 | cp -r html/_sources $(DESTDIR)$(htmldir)/html 104 | cp -r html/_static $(DESTDIR)$(htmldir)/html 105 | find $(DESTDIR)$(htmldir)/html/_sources -xtype f -exec chmod 644 {} \; 106 | find $(DESTDIR)$(htmldir)/html/_static -xtype d -exec chmod 755 {} \; 107 | find $(DESTDIR)$(htmldir)/html/_static -xtype f -exec chmod 644 {} \; 108 | 109 | uninstall-hook: 110 | test -d $(DESTDIR)$(htmldir)/html/_sources \ 111 | && rm -Rf $(DESTDIR)$(htmldir)/html/_sources \; 112 | test -d $(DESTDIR)$(htmldir)/html/_static \ 113 | && rm -Rf $(DESTDIR)$(htmldir)/html/_static \; 114 | test -d $(DESTDIR)$(htmldir)/html/_images \ 115 | && rm -Rf $(DESTDIR)$(htmldir)/html/_images \; 116 | endif 117 | 118 | # You can set these variables from the command line. 119 | SPHINXOPTS = -W 120 | SPHINXBUILD = @SPHINX@ 121 | PAPER = 122 | BUILDDIR = . 123 | 124 | # Internal variables. 125 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(SPHINXOPTS) . 126 | 127 | _deps.rst: ../config.log make-deps-overview 128 | cat $< | ./make-deps-overview > $@ 129 | 130 | # Since we're not doing anything in parallel anyway, just put the HTML and the 131 | # man pages in the same target, with conditionals. Gotchas: 132 | # 133 | # 1. If we build both, the HTML needs to go first otherwise it doesn't get 134 | # curly quotes. ¯\_(ツ)_/¯ 135 | # 136 | # 2. This not a "grouped target" but rather an "independent target" [1], 137 | # because the former came in GNU Make 4.3 which is quite new. However it 138 | # does seem to get run only once. 139 | # 140 | # [1]: https://www.gnu.org/software/make/manual/html_node/Multiple-Targets.html 141 | $(nobase_html_DATA) $(man_MANS): ../lib/version.txt ../README.rst _deps.rst $(EXTRA_DIST) 142 | if ENABLE_HTML 143 | # Create dummy file in case the redirect is disabled for EPEL. 144 | mkdir -p html 145 | touch html/command-usage.html 146 | # Build. 147 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 148 | # Avoid GitHub messing things up with Jekyll. 149 | touch html/.nojekyll 150 | # Some output files are copies with same timestamp as source; fix. Note 151 | # we need all the HTML output files, not just the one picked in $@. 152 | touch --no-create $(nobase_html_DATA) 153 | # remove unused files that Sphinx made 154 | rm -f $(BUILDDIR)/html/_deps.html \ 155 | $(BUILDDIR)/html/charliecloud.html \ 156 | $(BUILDDIR)/html/bugs.html \ 157 | $(BUILDDIR)/html/_loc.html \ 158 | $(BUILDDIR)/html/objects.inv \ 159 | $(BUILDDIR)/html/see_also.html 160 | endif 161 | if ENABLE_MAN 162 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 163 | endif 164 | -------------------------------------------------------------------------------- /doc/_loc.rst: -------------------------------------------------------------------------------- 1 | .. Do not edit this file — it’s auto-generated. 2 | 3 | We pride ourselves on keeping Charliecloud lightweight and simple. The lines 4 | of code as of version 0.38 is: 5 | 6 | .. list-table:: 7 | 8 | * - Program itself 9 | - 9087 10 | * - Test suite & examples 11 | - 12086 12 | * - Documentation 13 | - 6526 14 | * - Build system 15 | - 1298 16 | * - Packaging 17 | - 629 18 | * - Miscellaneous 19 | - 509 20 | * - Total 21 | - 30135 22 | 23 | These include code only, excluding blank lines and comments. They were counted 24 | using `cloc `_ version 1.96. 25 | We typically quote the "Program itself" number when describing the size of 26 | Charliecloud. (Please do not quote the size in Priedhorsky and Randles 2017, 27 | as that number is very out of date.) 28 | 29 | -------------------------------------------------------------------------------- /doc/bugs.rst: -------------------------------------------------------------------------------- 1 | .. only:: man 2 | 3 | Reporting bugs 4 | ============== 5 | 6 | If Charliecloud was obtained from your Linux distribution, use your 7 | distribution’s bug reporting procedures. 8 | 9 | Otherwise, report bugs to: https://github.com/hpc/charliecloud/issues 10 | -------------------------------------------------------------------------------- /doc/ch-checkns.rst: -------------------------------------------------------------------------------- 1 | :code:`ch-checkns` 2 | ++++++++++++++++++ 3 | 4 | .. only:: not man 5 | 6 | Check :code:`ch-run` prerequisites, e.g., namespaces and :code:`pivot_root(2)`. 7 | 8 | 9 | Synopsis 10 | ======== 11 | 12 | :: 13 | 14 | $ ch-checkns 15 | 16 | Example 17 | ======= 18 | 19 | :: 20 | 21 | $ ch-checkns 22 | ok 23 | 24 | 25 | .. include:: ./bugs.rst 26 | .. include:: ./see_also.rst 27 | -------------------------------------------------------------------------------- /doc/ch-completion.bash.rst: -------------------------------------------------------------------------------- 1 | .. _ch-completion.bash: 2 | 3 | :code:`ch-completion.bash` 4 | ++++++++++++++++++++++++++ 5 | 6 | .. only:: not man 7 | 8 | Tab completion for the Charliecloud command line. 9 | 10 | 11 | Synopsis 12 | ======== 13 | 14 | :: 15 | 16 | $ source ch-completion.bash 17 | 18 | 19 | Description 20 | =========== 21 | 22 | :code:`ch-completion.bash` provides tab completion for the charliecloud 23 | command line. Currently, tab completion is available for Bash users for the 24 | executables :code:`ch-image`, :code:`ch-run`, and :code:`ch-convert`. 25 | 26 | We do not currently install the file if Charliecloud is built from source (see 27 | `issue #1842 `_). In this 28 | case, source it from the Charliecloud source code:: 29 | 30 | $ source $CHARLIECLOUD_SOURCE_PATH/bin/ch-completion.bash 31 | 32 | If you installed with a distribution package, the procedure is probably nicer. 33 | See your distro’s docs if you installed a package.) 34 | 35 | Disable completion with the utility function :code:`ch-completion` added to 36 | your environment when the above is sourced:: 37 | 38 | $ ch-completion --disable 39 | 40 | 41 | Dependencies 42 | ============ 43 | 44 | Tab completion has these additional dependencies: 45 | 46 | * Bash ≥ 4.3.0 47 | 48 | * :code:`bash-completion` library (`GitHub 49 | `_, or it probably comes with your 50 | distribution, `e.g. 51 | `_) 52 | 53 | 54 | .. _ch-completion_func: 55 | 56 | :code:`ch-completion` 57 | ===================== 58 | 59 | Utility function for :code:`ch-completion.bash`. 60 | 61 | Synopsis 62 | -------- 63 | 64 | :: 65 | 66 | $ ch-completion [ OPTIONS ] 67 | 68 | 69 | Description 70 | ----------- 71 | 72 | :code:`ch-completion` is a function to manage Charliecloud’s tab completion. 73 | It is added to the environment when completion is sourced. The option(s) given 74 | specify what to do: 75 | 76 | :code:`--disable` 77 | Disable tab completion for all Charliecloud executables. 78 | 79 | :code:`--help` 80 | Print help message. 81 | 82 | :code:`--version` 83 | Print version of tab completion that’s currently enabled. 84 | 85 | :code:`--version-ok` 86 | Verify that tab completion version is consistent with that of 87 | :code:`ch-image`. 88 | 89 | 90 | Debugging 91 | ========= 92 | 93 | Tab completion can write debugging logs to :code:`/tmp/ch-completion.log`. 94 | Enable this by setting the environment variable :code:`CH_COMPLETION_DEBUG`. 95 | (This is primarily intended for developers.) 96 | 97 | 98 | .. LocalWords: func 99 | -------------------------------------------------------------------------------- /doc/charliecloud.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | charliecloud man page 4 | +++++++++++++++++++++ 5 | 6 | .. include:: ../README.rst 7 | .. include:: ./bugs.rst 8 | 9 | 10 | See also 11 | -------- 12 | 13 | ch-checkns(1), 14 | ch-completion.bash(7), 15 | ch-convert(1), 16 | ch-fromhost(1), 17 | ch-image(1), 18 | ch-run(1), 19 | ch-run-oci(1), 20 | ch-test(1), 21 | 22 | Full documentation at: https://hpc.github.io/charliecloud 23 | 24 | 25 | Note 26 | ---- 27 | 28 | These man pages are for Charliecloud version |release| (Git commit |version|). 29 | 30 | -------------------------------------------------------------------------------- /doc/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/doc/favicon.ico -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | Overview 2 | ******** 3 | 4 | .. image:: rd100-winner.png 5 | :align: right 6 | :alt: R&D 100 2018 winner logo 7 | :width: 128px 8 | :target: https://www.lanl.gov/discover/news-release-archive/2018/November/1119-rd-100-awards.php 9 | 10 | .. include:: ../README.rst 11 | 12 | .. note:: 13 | 14 | This documentation is for Charliecloud version |version| and was built 15 | |today|. 16 | 17 | .. toctree:: 18 | :numbered: 19 | :hidden: 20 | 21 | install 22 | tutorial 23 | ch-checkns 24 | ch-completion.bash 25 | ch-convert 26 | ch-fromhost 27 | ch-image 28 | ch-run 29 | ch-run-oci 30 | ch-test 31 | faq 32 | best_practices 33 | dev 34 | -------------------------------------------------------------------------------- /doc/logo-sidebar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/doc/logo-sidebar.png -------------------------------------------------------------------------------- /doc/make-deps-overview: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # This script is a pipe that translates configure output to ReST markup 4 | # suitable for inclusion in the documentation. 5 | # 6 | # Note: Don't pipe configure into it, because that can alter the build in the 7 | # middle of it. E.g., I've had part of "make install" in --prefix and part in 8 | # the default /usr/local. Use config.log instead. 9 | # 10 | # I have very mixed feelings about this script. On one hand, we've defined a 11 | # new markup language. On the other hand, I feel that writing down the 12 | # dependencies really does need to be DRY, checking the run-time dependencies 13 | # too has a lot of value, translating from prose docs to a configure script 14 | # would be nearly impossible, and ReST is way too ugly to use as-is in 15 | # terminal output. 16 | # 17 | # Steps: 18 | # 19 | # 1. Remove everything before "Building Charliecloud" (inclusive) to the 20 | # next log section (exclusive). 21 | # 2. Remove "will build and install" paragraph. 22 | # 3. Remove any "Warning:" paragraphs. 23 | # 4. Remove results of tests: " ..." to EOL, ": yes", ": no". 24 | # 5. Convert indentation to bullet lists. 25 | # 6. Convert "foo(1)" to ":code:`foo`". 26 | 27 | # shellcheck disable=SC2016 28 | awk '/^Building Charliecloud/,/^##/' | head -n-2 \ 29 | | awk -v RS='' '{gsub(/^ will build.*/, ""); print; print ""}' \ 30 | | awk -v RS='' '{gsub(/^ +Warning:.*/, ""); print; print ""}' \ 31 | | sed -r -e 's/ \.\.\..*$//' -e 's/ (yes|no)$//' \ 32 | -e 's/^ //' -e 's/(^( )+)/\1* /' -e 's/:$/:\n/' \ 33 | -e 's/([a-zA-Z0-9-]+)\(1\)/:code:`\1`/g' 34 | -------------------------------------------------------------------------------- /doc/man/README: -------------------------------------------------------------------------------- 1 | This directory contains the compiled man pages. You can read them with: 2 | 3 | $ man -l man/foo.1 4 | -------------------------------------------------------------------------------- /doc/publish: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script builds the documentation and then publishes it to the web. See 4 | # the internal documentation for usage and how to set it up. 5 | 6 | set -e 7 | doc_base=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) 8 | 9 | fatal () { 10 | echo "¯\_(ツ)_/¯ $1" 1>&2 11 | exit 1 12 | } 13 | 14 | # Parse command line. 15 | if [[ $1 == --force ]]; then 16 | clean_only= 17 | else 18 | clean_only=yes 19 | fi 20 | 21 | # Are there any uncommitted changes? 22 | echo 'checking for uncommitted changes' 23 | dirty= 24 | if ! git diff-index --quiet --cached HEAD; then 25 | dirty='+dirty' 26 | fi 27 | if ! git diff-files --quiet; then 28 | dirty='+dirty' 29 | fi 30 | if [[ $clean_only && $dirty ]]; then 31 | fatal 'uncommitted changes present' 32 | fi 33 | 34 | cd "$doc_base" 35 | 36 | # Clean up and prep. 37 | echo 'preparing to build' 38 | make clean > /dev/null 39 | # Did "make clean" work? The only files left should be .git and an empty 40 | # directory _images. 41 | leftovers=$(find html -mindepth 1 -name .git -prune \ 42 | -o -not \( -name _images \ 43 | -o -name '.git*' \) -print) 44 | if [[ -n "$leftovers" ]]; then 45 | echo "$leftovers" 1>&2 46 | fatal 'mysterious files in doc/html after "make clean"' 47 | fi 48 | 49 | # Build. 50 | echo 'building docs' 51 | make 52 | 53 | cd html 54 | 55 | # Can we talk to GitHub? 56 | echo 'testing GitHub access' 57 | if ! git ls-remote > /dev/null; then 58 | fatal "can't talk to GitHub" 59 | fi 60 | 61 | # Publish it (note Unicode siren characters that don't appear in all editors). 62 | echo '🚨🚨🚨 publishing new docs 🚨🚨🚨' 63 | commit=$(cd .. && git rev-parse --short HEAD)${dirty} 64 | set -x 65 | git add --all 66 | git commit -a -m "docs for commit $commit" 67 | git push origin gh-pages 68 | set +x 69 | 70 | # Done. 71 | echo 'Done.' 72 | echo "Typos found: $((RANDOM%5+1))" 73 | -------------------------------------------------------------------------------- /doc/py_env.rst: -------------------------------------------------------------------------------- 1 | :code:`CH_LOG_FILE` 2 | If set, append log chatter to this file, rather than standard error. This is 3 | useful for debugging situations where standard error is consumed or lost. 4 | 5 | Also sets verbose mode if not already set (equivalent to :code:`--verbose`). 6 | 7 | :code:`CH_LOG_FESTOON` 8 | If set, prepend PID and timestamp to logged chatter. 9 | 10 | :code:`CH_XATTRS` 11 | If set, save xattrs in the build cache and restore them when rebuilding from 12 | the cache (equivalent to :code:`--xattrs`). 13 | -------------------------------------------------------------------------------- /doc/rd100-winner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/doc/rd100-winner.png -------------------------------------------------------------------------------- /doc/see_also.rst: -------------------------------------------------------------------------------- 1 | .. only:: man 2 | 3 | See also 4 | ======== 5 | 6 | charliecloud(7) 7 | 8 | Full documentation at: 9 | -------------------------------------------------------------------------------- /examples/.dockerignore: -------------------------------------------------------------------------------- 1 | # Exclude everything by default 2 | * 3 | 4 | # Needed for Dockerfile.openmpi 5 | !dont-init-ucx-on-intel-cray.patch 6 | 7 | # Needed for Dockerfile.exhaustive 8 | !Dockerfile.* 9 | 10 | 11 | -------------------------------------------------------------------------------- /examples/Dockerfile.almalinux_8ch: -------------------------------------------------------------------------------- 1 | # ch-test-scope: standard 2 | FROM almalinux:8 3 | 4 | # This image has three purposes: (1) demonstrate we can build a AlmaLinux 8 5 | # image, (2) provide a build environment for Charliecloud EPEL 8 RPMs, and (3) 6 | # provide image packages necessary for Obspy and Paraview. 7 | # 8 | # Quirks: 9 | # 10 | # 1. Install the dnf ovl plugin to work around RPMDB corruption when 11 | # building images with Docker and the OverlayFS storage driver. 12 | # 13 | # 2. Enable PowerTools repo, because some packages in EPEL depend on it. 14 | # 15 | # 3. Install packages needed to build el8 rpms. 16 | # 17 | # 4. Issue #1103: Install libarchive to resolve cmake bug 18 | # 19 | # 5. AlmaLinux lost their GPG key, so manual intervention is required to 20 | # install current packages [1]. 21 | # 22 | # [1]: https://almalinux.org/blog/2023-12-20-almalinux-8-key-update/ 23 | RUN rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux 24 | RUN dnf install -y --setopt=install_weak_deps=false \ 25 | epel-release \ 26 | 'dnf-command(config-manager)' 27 | RUN dnf config-manager --enable powertools 28 | RUN dnf install -y --setopt=install_weak_deps=false \ 29 | dnf-plugin-ovl \ 30 | autoconf \ 31 | automake \ 32 | gcc \ 33 | git \ 34 | libarchive \ 35 | libpng-devel \ 36 | make \ 37 | python3 \ 38 | python3-devel \ 39 | python3-lark-parser \ 40 | python3-requests \ 41 | python3-sphinx \ 42 | python3-sphinx_rtd_theme \ 43 | rpm-build \ 44 | rpmlint \ 45 | rsync \ 46 | squashfs-tools \ 47 | squashfuse \ 48 | wget \ 49 | which \ 50 | && dnf clean all 51 | 52 | # Need wheel to install bundled Lark, and the RPM version doesn’t work. 53 | RUN pip3 install wheel 54 | 55 | # AlmaLinux's linker doesn’t search these paths by default; add them because we 56 | # will install stuff later into /usr/local. 57 | RUN echo "/usr/local/lib" > /etc/ld.so.conf.d/usrlocal.conf \ 58 | && echo "/usr/local/lib64" >> /etc/ld.so.conf.d/usrlocal.conf \ 59 | && ldconfig 60 | 61 | # Install ImageMagick 62 | # The latest, 7.1.0, fails to install with a cryptic libtool error. ¯\_(ツ)_/¯ 63 | ARG MAGICK_VERSION=7.0.11-14 64 | RUN wget -nv -O ImageMagick-${MAGICK_VERSION}.tar.gz \ 65 | "https://github.com/ImageMagick/ImageMagick/archive/refs/tags/${MAGICK_VERSION}.tar.gz" \ 66 | && tar xf ImageMagick-${MAGICK_VERSION}.tar.gz \ 67 | && cd ImageMagick-${MAGICK_VERSION} \ 68 | && ./configure --prefix=/usr/local \ 69 | && make -j $(getconf _NPROCESSORS_ONLN) install \ 70 | && rm -Rf ../ImageMagick-${MAGICK_VERSION} 71 | 72 | # Add mount points for files and directories for paraview and obspy comparison 73 | # tests. 74 | RUN mkdir /diff \ 75 | && echo "example bind mount file" > /a.png \ 76 | && echo "example bind mount file" > /b.png 77 | -------------------------------------------------------------------------------- /examples/Dockerfile.centos_7ch: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM centos:7 3 | 4 | # This image has two purposes: (1) demonstrate we can build a CentOS 7 image 5 | # and (2) provide a build environment for Charliecloud EPEL 7 RPMs. 6 | 7 | # Install our dependencies, ensuring we fail out if any are missing. 8 | RUN yum install -y epel-release \ 9 | && yum install -y --setopt=skip_missing_names_on_install=0 \ 10 | autoconf \ 11 | automake \ 12 | bats \ 13 | fakeroot \ 14 | gcc \ 15 | git \ 16 | make \ 17 | python3-devel \ 18 | python3 \ 19 | python36-lark-parser \ 20 | python36-requests \ 21 | python36-sphinx \ 22 | python36-sphinx_rtd_theme \ 23 | rpm-build \ 24 | rpmlint \ 25 | rsync \ 26 | squashfs-tools \ 27 | squashfuse \ 28 | wget \ 29 | && yum clean all 30 | 31 | # We need to install epel rpm-macros after python3-devel to get the correct 32 | # python package version for our spec file macros. 33 | # https://lists.fedoraproject.org/archives/list/devel@lists.fedoraproject.org/thread/K4EH7V3OUFJFVL6A72IILJUA6JFX2HZW/ 34 | RUN yum install -y epel-rpm-macros 35 | 36 | # Need wheel to install bundled Lark, and the RPM version doesn’t work. 37 | RUN pip3 install wheel 38 | -------------------------------------------------------------------------------- /examples/Dockerfile.debian_11ch: -------------------------------------------------------------------------------- 1 | # ch-test-scope: standard 2 | FROM debian:bullseye 3 | 4 | ARG DEBIAN_FRONTEND=noninteractive 5 | RUN apt-get update \ 6 | && apt-get install -y --no-install-recommends apt-utils \ 7 | && rm -rf /var/lib/apt/lists/* 8 | -------------------------------------------------------------------------------- /examples/Dockerfile.mpich: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | 3 | # See Dockerfile.libfabric for MPI goals and details. 4 | FROM libfabric 5 | 6 | WORKDIR /usr/local/src 7 | 8 | # Configure MPICH with OpenPMIx. Note we did attempt to configure MPICH 9 | # against both PMI2 and PMIx, as we do with OpenMPI, but the examples only 10 | # pass testing when pmix is specified. 11 | # 12 | # Note --with-pm=no disables the hydra and gforker process manager; this 13 | # allows us to launch parallel jobs with slurm using PMIx or PMI2. As a 14 | # consequence, the mpiexec exectuable is no longer compiled or installed; 15 | # thus, single-node guest launch using mpiexec inside container is not 16 | # poassible. 17 | # 18 | # Slingshot CXI requires MPICH version 4.1 or greater. 19 | ARG MPI_VERSION=4.1.1 20 | ARG MPI_URL=http://www.mpich.org/static/downloads/${MPI_VERSION} 21 | RUN wget -nv ${MPI_URL}/mpich-${MPI_VERSION}.tar.gz \ 22 | && tar xf mpich-${MPI_VERSION}.tar.gz \ 23 | && cd mpich-${MPI_VERSION} \ 24 | && CFLAGS=-O3 \ 25 | CXXFLAGS=-O3 \ 26 | ./configure --prefix=/usr/local \ 27 | --enable-fast=O3 \ 28 | --enable-g=none \ 29 | --enable-ofi-domain \ 30 | --enable-threads=multiple \ 31 | --with-ch4-shmmods=posix \ 32 | --with-device=ch4:ofi \ 33 | --with-libfabric=/usr/local \ 34 | --with-pm=no \ 35 | --with-pmix=/usr/local/lib 36 | RUN cd mpich-${MPI_VERSION} \ 37 | && make -j$(getconf _NPROCESSORS_ONLN) install \ 38 | && rm -Rf ../mpich-${MPI_VERSION}* \ 39 | && ldconfig 40 | -------------------------------------------------------------------------------- /examples/Dockerfile.nvidia: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | # ch-test-arch-exclude: aarch64 # only x86-64, ppc64le supported by nVidia 3 | 4 | # This Dockerfile demonstrates a multi-stage build. With a single-stage build 5 | # that brings along the nVidia build environment, the resulting unpacked image 6 | # is 2.9 GiB; with the multi-stage build, it’s 146 MiB. 7 | # 8 | # See: https://docs.docker.com/develop/develop-images/multistage-build 9 | 10 | 11 | ## Stage 1: Install the nVidia build environment and build a sample app. 12 | FROM ubuntu:20.04 13 | 14 | # OS packages needed 15 | ARG DEBIAN_FRONTEND=noninteractive 16 | RUN apt-get update \ 17 | && apt-get install -y --no-install-recommends \ 18 | ca-certificates \ 19 | gnupg \ 20 | make \ 21 | wget \ 22 | && rm -rf /var/lib/apt/lists/* 23 | 24 | # Install CUDA from nVidia. 25 | # See: https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64&target_distro=Ubuntu&target_version=2004&target_type=debnetwork 26 | WORKDIR /usr/local/src 27 | ARG nvidia_pub=3bf863cc.pub 28 | RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin \ 29 | && mv cuda-ubuntu2004.pin /etc/apt/preferences.d/cuda-repository-pin-600 \ 30 | && wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/$nvidia_pub \ 31 | && apt-key add $nvidia_pub \ 32 | && echo "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/ /" >> /etc/apt/sources.list \ 33 | && apt-get update \ 34 | && apt-get install -y --no-install-recommends cuda-toolkit-11-2 \ 35 | && rm -rf /var/lib/apt/lists/* $nvidia_pub 36 | 37 | # Build the sample app we’ll use to test. 38 | WORKDIR /usr/local/cuda-11.2/samples/0_Simple/matrixMulCUBLAS 39 | RUN make 40 | 41 | 42 | ## Stage 2: Copy the built sample app into a clean Ubuntu image. 43 | FROM ubuntu:20.04 44 | 45 | COPY --from=0 /usr/local/cuda-11.2/samples/0_Simple/matrixMulCUBLAS / 46 | 47 | # These are the two nVidia shared libraries that the sample app needs. We could 48 | # be smarter about finding this path. However, one thing to avoid is copying in 49 | # all of /usr/local/cuda-11.2/targets/x86_64-linux/lib, because that directory 50 | # is quite large. 51 | COPY --from=0 /usr/local/cuda-11.2/targets/x86_64-linux/lib/libcublas.so.11.4.1.1043 /usr/local/lib 52 | COPY --from=0 /usr/local/cuda-11.2/targets/x86_64-linux/lib/libcublasLt.so.11.4.1.1043 /usr/local/lib 53 | RUN ldconfig 54 | -------------------------------------------------------------------------------- /examples/Dockerfile.openmpi: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM libfabric 3 | 4 | # See Dockerfile.libfabric for MPI goals and details. 5 | 6 | # OpenMPI. 7 | # 8 | # Build with PMIx, PMI2, and FLUX-PMI support. 9 | # 10 | # 1. --disable-pty-support to avoid “pipe function call failed when 11 | # setting up I/O forwarding subsystem”. 12 | # 13 | # 2. --enable-mca-no-build=plm-slurm to support launching processes using the 14 | # host’s srun (i.e., the container OpenMPI needs to talk to the host Slurm’s 15 | # PMIx) but prevent OpenMPI from invoking srun itself from within the 16 | # container, where srun is not installed (the error messages from this are 17 | # inscrutable). 18 | ARG MPI_URL=https://www.open-mpi.org/software/ompi/v4.1/downloads 19 | ARG MPI_VERSION=4.1.4 20 | RUN wget -nv ${MPI_URL}/openmpi-${MPI_VERSION}.tar.gz \ 21 | && tar xf openmpi-${MPI_VERSION}.tar.gz 22 | RUN cd openmpi-${MPI_VERSION} \ 23 | && CFLAGS=-O3 \ 24 | CXXFLAGS=-O3 \ 25 | FLUX_PMI_CFLAGS=-I/usr/local/include/flux/core,-L/usr/local/lib/flux \ 26 | FLUX_PMI_LIBS=-lpmi \ 27 | ./configure --prefix=/usr/local \ 28 | --sysconfdir=/mnt/0 \ 29 | --with-pmix=/usr/local \ 30 | --with-pmi=/usr/local \ 31 | --with-flux-pmi-library \ 32 | --with-libfabric=/usr/local \ 33 | --disable-pty-support \ 34 | --enable-mca-no-build=btl-openib,plm-slurm \ 35 | && make -j$(getconf _NPROCESSORS_ONLN) install \ 36 | && rm -Rf ../openmpi-${MPI_VERSION}* 37 | RUN ldconfig 38 | 39 | # OpenMPI expects this program to exist, even if it’s not used. Default is 40 | # “ssh : rsh”, but that’s not installed. 41 | RUN echo 'plm_rsh_agent = false' >> /mnt/0/openmpi-mca-params.conf 42 | 43 | # Silence spurious pmix error. https://github.com/open-mpi/ompi/issues/7516. 44 | ENV PMIX_MCA_gds=hash 45 | -------------------------------------------------------------------------------- /examples/Makefile.am: -------------------------------------------------------------------------------- 1 | examplesdir = $(docdir)/examples 2 | 3 | execs = \ 4 | chtest/Build \ 5 | chtest/bind_priv.py \ 6 | chtest/dev_proc_sys.py \ 7 | chtest/fs_perms.py \ 8 | chtest/printns \ 9 | chtest/signal_out.py \ 10 | distroless/hello.py \ 11 | hello/hello.sh \ 12 | obspy/hello.py 13 | 14 | noexecs = \ 15 | Dockerfile.centos_7ch \ 16 | Dockerfile.almalinux_8ch \ 17 | Dockerfile.debian_11ch \ 18 | Dockerfile.libfabric \ 19 | Dockerfile.mpich \ 20 | Dockerfile.nvidia \ 21 | Dockerfile.openmpi \ 22 | chtest/Makefile \ 23 | chtest/chroot-escape.c \ 24 | chtest/mknods.c \ 25 | chtest/setgroups.c \ 26 | chtest/setuid.c \ 27 | copy/Dockerfile \ 28 | copy/dirA/fileAa \ 29 | copy/dirB/fileBa \ 30 | copy/dirB/fileBb \ 31 | copy/dirCa/dirCb/fileCba \ 32 | copy/dirCa/dirCb/fileCbb \ 33 | copy/dirD/fileDa \ 34 | copy/dirEa/dirEb/fileEba \ 35 | copy/dirEa/dirEb/fileEbb \ 36 | copy/dirF/dir19a3/file19b1 \ 37 | copy/dirF/file19a3 \ 38 | copy/dirF/file19a2 \ 39 | copy/dirF/dir19a2/file19b2 \ 40 | copy/dirF/dir19a2/dir19b2/file19c1 \ 41 | copy/dirF/dir19a2/dir19b3/file19c1 \ 42 | copy/dirF/dir19a2/file19b3 \ 43 | copy/dirG/diry/file_ \ 44 | copy/dirG/filey \ 45 | copy/dirG/s_dir1 \ 46 | copy/dirG/s_dir4/file_ \ 47 | copy/dirG/s_file1 \ 48 | copy/dirG/s_file4/file_ \ 49 | copy/fileA \ 50 | copy/fileB \ 51 | copy/test.bats \ 52 | distroless/Dockerfile \ 53 | exhaustive/Dockerfile \ 54 | hello/Dockerfile \ 55 | hello/README \ 56 | lammps/Dockerfile \ 57 | lammps/melt.patch \ 58 | lammps/simple.patch \ 59 | lustre/Dockerfile \ 60 | mpibench/Dockerfile.mpich \ 61 | mpibench/Dockerfile.openmpi \ 62 | mpihello/Dockerfile.mpich \ 63 | mpihello/Dockerfile.openmpi \ 64 | mpihello/Makefile \ 65 | mpihello/hello.c \ 66 | mpihello/slurm.sh \ 67 | multistage/Dockerfile \ 68 | obspy/Dockerfile \ 69 | obspy/README \ 70 | obspy/obspy.png \ 71 | paraview/Dockerfile \ 72 | paraview/cone.2ranks.vtk \ 73 | paraview/cone.nranks.vtk \ 74 | paraview/cone.png \ 75 | paraview/cone.py \ 76 | paraview/cone.serial.vtk \ 77 | seccomp/Dockerfile \ 78 | seccomp/mknods.c \ 79 | seccomp/test.bats \ 80 | spack/Dockerfile \ 81 | spark/Dockerfile \ 82 | spark/slurm.sh 83 | 84 | batsfiles = \ 85 | distroless/test.bats \ 86 | exhaustive/test.bats \ 87 | hello/test.bats \ 88 | lammps/test.bats \ 89 | lustre/test.bats \ 90 | mpibench/test.bats \ 91 | mpihello/test.bats \ 92 | multistage/test.bats \ 93 | obspy/test.bats \ 94 | paraview/test.bats \ 95 | spack/test.bats \ 96 | spark/test.bats 97 | 98 | nobase_examples_SCRIPTS = $(execs) 99 | 100 | nobase_examples_DATA = $(noexecs) 101 | if ENABLE_TEST 102 | nobase_examples_DATA += $(batsfiles) 103 | endif 104 | 105 | EXTRA_DIST = $(execs) $(noexecs) $(batsfiles) 106 | 107 | # Automake is completely unable to deal with symlinks; we cannot include them 108 | # in the source code or "make dist" won't work, and we can't include them in 109 | # the files to install or "make install" won't work. These targets take care 110 | # of everything manually. 111 | # 112 | # Note: -T prevents ln(1) from dereferencing and descending into symlinks to 113 | # directories. Without this, new symlinks are created within such directories, 114 | # instead of replacing the existing symlink as we wanted. See PR #722. 115 | 116 | all-local: 117 | ln -fTs dirCb copy/dirCa/symlink-to-dirCb 118 | ln -fTs fileDa copy/dirD/symlink-to-fileDa 119 | ln -fTs dirEb copy/dirEa/symlink-to-dirEb 120 | ln -fTs filey copy/dirG/s_dir2 121 | ln -fTs diry copy/dirG/s_dir3 122 | ln -fTs filey copy/dirG/s_file2 123 | ln -fTs diry copy/dirG/s_file3 124 | ln -fTs fileA copy/symlink-to-fileA 125 | ln -fTs fileB copy/symlink-to-fileB-A 126 | ln -fTs fileB copy/symlink-to-fileB-B 127 | 128 | clean-local: 129 | rm -f copy/dirCa/symlink-to-dirCb 130 | rm -f copy/dirD/symlink-to-fileDa 131 | rm -f copy/dirEa/symlink-to-dirEb 132 | rm -f copy/dirG/s_dir2 133 | rm -f copy/dirG/s_dir3 134 | rm -f copy/dirG/s_file2 135 | rm -f copy/dirG/s_file3 136 | rm -f copy/symlink-to-fileA 137 | rm -f copy/symlink-to-fileB-A 138 | rm -f copy/symlink-to-fileB-B 139 | 140 | install-data-hook: 141 | ln -fTs dirCb $(DESTDIR)$(examplesdir)/copy/dirCa/symlink-to-dirCb 142 | ln -fTs fileDa $(DESTDIR)$(examplesdir)/copy/dirD/symlink-to-fileDa 143 | ln -fTs dirEb $(DESTDIR)$(examplesdir)/copy/dirEa/symlink-to-dirEb 144 | ln -fTs filey $(DESTDIR)$(examplesdir)/copy/dirG/s_dir2 145 | ln -fTs diry $(DESTDIR)$(examplesdir)/copy/dirG/s_dir3 146 | ln -fTs filey $(DESTDIR)$(examplesdir)/copy/dirG/s_file2 147 | ln -fTs diry $(DESTDIR)$(examplesdir)/copy/dirG/s_file3 148 | ln -fTs fileA $(DESTDIR)$(examplesdir)/copy/symlink-to-fileA 149 | ln -fTs fileB $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-A 150 | ln -fTs fileB $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-B 151 | 152 | uninstall-local: 153 | rm -f $(DESTDIR)$(examplesdir)/copy/dirCa/symlink-to-dirCb 154 | rm -f $(DESTDIR)$(examplesdir)/copy/dirD/symlink-to-fileDa 155 | rm -f $(DESTDIR)$(examplesdir)/copy/dirEa/symlink-to-dirEb 156 | rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_dir2 157 | rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_dir3 158 | rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_file2 159 | rm -f $(DESTDIR)$(examplesdir)/copy/dirG/s_file3 160 | rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileA 161 | rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-A 162 | rm -f $(DESTDIR)$(examplesdir)/copy/symlink-to-fileB-B 163 | 164 | uninstall-hook: 165 | rmdir $$(find $(docdir) -type d | sort -r) 166 | 167 | -------------------------------------------------------------------------------- /examples/chtest/Build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Build an Alpine Linux image roughly following the chroot(2) instructions: 4 | # https://wiki.alpinelinux.org/wiki/Installing_Alpine_Linux_in_a_chroot 5 | # 6 | # We deliberately do not sudo. It’s a little rough around the edges, because 7 | # apk expects root, but it better follows the principle of least privilege. We 8 | # could tidy by using the fakeroot utility, but AFAICT that’s not particularly 9 | # common and we’d prefer not to introduce another dependency. For example, 10 | # it's a standard tool on Debian but only in EPEL for CentOS. 11 | # 12 | # FIXME: Despite the guidance in the Build script API docs, this produces a 13 | # tarball even though the process does not naturally produce one. This is 14 | # because we are also creating some rather bizarre tar edge cases. These 15 | # should be moved to a separate script. 16 | # 17 | # ch-test-scope: quick 18 | 19 | set -ex 20 | 21 | srcdir=$1 22 | tarball_uncompressed=${2}.tar 23 | tarball=${tarball_uncompressed}.gz 24 | workdir=$3 25 | 26 | arch=$(uname -m) 27 | mirror=http://dl-cdn.alpinelinux.org/alpine/v3.9 28 | # Dynamically select apk-tools-static version. We would prefer to hard-code a 29 | # version (and upgrade on our schedule), but we can’t because Alpine does not 30 | # keep old package versions. If we try, the build breaks every few months (for 31 | # example, see issue #242). 32 | apk_tools=$( wget -qO - "${mirror}/main/${arch}" \ 33 | | grep -F apk-tools-static \ 34 | | sed -E 's/^.*(apk-tools-static-[0-9.r-]+\.apk).*$/\1/') 35 | img=${workdir}/img 36 | 37 | cd "$workdir" 38 | 39 | # “apk add” wants to install a bunch of files root:root. Thus, if we don’t map 40 | # ourselves to root:root, we get thousands of errors about “Failed to set 41 | # ownership”. 42 | # 43 | # For most Build scripts, we’d simply error out with missing prerequisites, 44 | # but this is a core image that much of the test suite depends on. 45 | ch_run="ch-run -u0 -g0 -w ${img}" 46 | 47 | ## Bootstrap base Alpine Linux. 48 | 49 | # Download statically linked apk. 50 | wget "${mirror}/main/${arch}/${apk_tools}" 51 | 52 | # Bootstrap directories. 53 | mkdir img 54 | mkdir img/{dev,etc,proc,sys,tmp} 55 | touch img/etc/{group,hosts,passwd,resolv.conf} 56 | 57 | # Bootstrap static apk. 58 | (cd img && tar xf "../${apk_tools}") 59 | mkdir img/etc/apk 60 | echo ${mirror}/main > img/etc/apk/repositories 61 | 62 | # Install the base system and a dynamically linked apk. 63 | # 64 | # This will give a few errors about chown failures. However, the install does 65 | # seem to work, so we ignore the failed exit code. 66 | $ch_run -- /sbin/apk.static \ 67 | --allow-untrusted --initdb --update-cache \ 68 | add alpine-base apk-tools \ 69 | || true 70 | 71 | # Now that we’ve bootstrapped, we don’t need apk.static any more. It wasn’t 72 | # installed using apk, so it’s not in the database and can just be rm’ed. 73 | rm img/sbin/apk.static.* 74 | 75 | # Install packages we need for our tests. 76 | $ch_run -- /sbin/apk add gcc make musl-dev python3 || true 77 | 78 | # Validate the install. 79 | $ch_run -- /sbin/apk audit --system 80 | $ch_run -- /sbin/apk stats 81 | 82 | # Fix permissions. 83 | # 84 | # Note that this removes setuid/setgid bits from a few files (and 85 | # directories). There is not a race condition, i.e., a window where setuid 86 | # executables could become the invoking users, which would be a security hole, 87 | # because the setuid/setgid binaries are not group- or world-readable until 88 | # after this chmod. 89 | chmod -R u+rw,ug-s img 90 | 91 | 92 | ## Install our test stuff. 93 | 94 | # Fixtures for --bind tests 95 | mkdir img/home/directory-in-home 96 | touch img/home/file-in-home 97 | 98 | # Test programs. 99 | cp -r "$srcdir" img/test 100 | $ch_run --cd /test -- sh -c 'make clean && make' 101 | 102 | # Fixtures for /dev cleaning. 103 | touch img/dev/deleteme 104 | mkdir -p img/mnt/dev 105 | touch img/mnt/dev/dontdeleteme 106 | 107 | # Fixture to make sure we raise hidden files in non-tarbombs. 108 | touch img/.hiddenfile1 img/..hiddenfile2 img/...hiddenfile3 109 | 110 | # Fixtures for bind-mounting 111 | ln -s ../bind4 img/mnt/bind4 112 | ln -s ./doesnotexist img/mnt/link-b0rken-rel 113 | ln -s /doesnotexist img/mnt/link-b0rken-abs 114 | ln -s /tmp img/mnt/link-bad-abs 115 | ln -s ../.. img/mnt/link-bad-rel 116 | 117 | # Fixture to test resolv.conf as symlink (issue #1015). 118 | mv img/etc/resolv.conf img/etc/resolv.conf.real 119 | ln -s /etc/resolv.conf.real img/etc/resolv.conf 120 | 121 | # Fixtures to validate permissions are retained on export (issue #1241). See 122 | # FAQ for why this isn’t 7777. 123 | touch img/maxperms_file 124 | chmod 0777 img/maxperms_file 125 | mkdir img/maxperms_dir 126 | chmod 1777 img/maxperms_dir 127 | 128 | # Get rid of “/root” directory, used for “HOME” test in “ch-run_misc.bats”. 129 | rmdir "$img"/root 130 | 131 | ## Tar it up. 132 | 133 | # Using pigz saves about 8 seconds. Normally we wouldn’t care about that, but 134 | # this script is part of the quick scope, which we’d like developers to use 135 | # frequently, so every second matters. 136 | if command -v pigz > /dev/null 2>&1; then 137 | gzip_cmd=pigz 138 | else 139 | gzip_cmd=gzip 140 | fi 141 | 142 | # Charliecloud supports images both with a single top level directory and 143 | # without (tarbomb). The Docker images in the test suite are all tarbombs 144 | # (because that’s what “docker export” gives us), so use a containing 145 | # directory for this one. 146 | tar cf "$tarball_uncompressed" -- img 147 | 148 | # Finalize the tarball. 149 | $gzip_cmd -f "$tarball_uncompressed" 150 | [[ -f $tarball ]] 151 | -------------------------------------------------------------------------------- /examples/chtest/Makefile: -------------------------------------------------------------------------------- 1 | BINS := chroot-escape mknods setgroups setuid 2 | ALL := $(BINS) 3 | CFLAGS := -std=c11 -Wall -Werror 4 | 5 | .PHONY: all 6 | all: $(ALL) 7 | 8 | .PHONY: clean 9 | clean: 10 | rm -f $(ALL) 11 | -------------------------------------------------------------------------------- /examples/chtest/bind_priv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # This script tries to bind to a privileged port on each of the IP addresses 4 | # specified on the command line. 5 | 6 | import errno 7 | import socket 8 | import sys 9 | 10 | PORT = 7 # echo 11 | 12 | results = dict() 13 | 14 | try: 15 | for ip in sys.argv[1:]: 16 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 17 | try: 18 | s.bind((ip, PORT)) 19 | except OSError as x: 20 | if (x.errno in (errno.EACCES, errno.EADDRNOTAVAIL)): 21 | results[ip] = x.errno 22 | else: 23 | raise 24 | else: 25 | results[ip] = 0 26 | except Exception as x: 27 | print('ERROR\texception: %s' % x) 28 | rc = 1 29 | else: 30 | if (len(results) < 1): 31 | print('ERROR\tnothing to test', end='') 32 | rc = 1 33 | elif (len(set(results.values())) != 1): 34 | print('ERROR\tmixed results: ', end='') 35 | rc = 1 36 | else: 37 | result = next(iter(results.values())) 38 | if (result != 0): 39 | print('SAFE\t%d (%s) ' % (result, errno.errorcode[result]), end='') 40 | rc = 0 41 | else: 42 | print('RISK\tsuccessful bind ', end='') 43 | rc = 1 44 | explanation = ' '.join('%s=%d' % (ip, e) 45 | for (ip, e) in sorted(results.items())) 46 | print(explanation) 47 | 48 | sys.exit(rc) 49 | -------------------------------------------------------------------------------- /examples/chtest/chroot-escape.c: -------------------------------------------------------------------------------- 1 | /* This program tries to escape a chroot using well-established methods, which 2 | are not an exploit but rather take advantage of chroot(2)'s well-defined 3 | behavior. We use device and inode numbers to test whether the root 4 | directory is the same before and after the escape. 5 | 6 | References: 7 | https://filippo.io/escaping-a-chroot-jail-slash-1/ 8 | http://www.bpfh.net/simes/computing/chroot-break.html 9 | 10 | */ 11 | 12 | #define _DEFAULT_SOURCE 13 | #include 14 | #include 15 | #include 16 | #include 17 | #include 18 | #include 19 | #include 20 | #include 21 | 22 | 23 | void fatal(char * msg) 24 | { 25 | printf("ERROR\t%s: %s\n", msg, strerror(errno)); 26 | exit(EXIT_FAILURE); 27 | } 28 | 29 | int main() 30 | { 31 | struct stat before, after; 32 | int fd; 33 | int status = EXIT_FAILURE; 34 | char tmpdir_template[] = "/tmp/chtest.tmp.chroot.XXXXXX"; 35 | char * tmpdir_name; 36 | 37 | if (stat("/", &before)) fatal("stat before"); 38 | 39 | tmpdir_name = mkdtemp(tmpdir_template); 40 | if (tmpdir_name == NULL) 41 | fatal("mkdtemp"); 42 | 43 | if ((fd = open(".", O_RDONLY)) < 0) fatal("open"); 44 | 45 | if (chroot(tmpdir_name)) { 46 | if (errno == EPERM) { 47 | printf("SAFE\tchroot(2) failed with EPERM\n"); 48 | status = EXIT_SUCCESS; 49 | } else { 50 | fatal("chroot"); 51 | } 52 | } else { 53 | if (fchdir(fd)) fatal("fchdir"); 54 | if (close(fd)) fatal("close"); 55 | 56 | for (int i = 0; i < 1024; i++) 57 | if (chdir("..")) fatal("chdir"); 58 | 59 | /* If we got this far, we should be able to call chroot(2), so failure 60 | is an error. */ 61 | if (chroot(".")) fatal("chroot"); 62 | 63 | /* If root directory is the same before and after the attempted escape, 64 | then the escape failed, and we should be happy. */ 65 | if (stat("/", &after)) fatal("stat after"); 66 | if (before.st_dev == after.st_dev && before.st_ino == after.st_ino) { 67 | printf("SAFE\t"); 68 | status = EXIT_SUCCESS; 69 | } else { 70 | printf("RISK\t"); 71 | status = EXIT_FAILURE; 72 | } 73 | printf("dev/inode before %lu/%lu, after %lu/%lu\n", 74 | before.st_dev, before.st_ino, after.st_dev, after.st_ino); 75 | } 76 | 77 | if (rmdir(tmpdir_name)) fatal("rmdir"); 78 | return status; 79 | } 80 | -------------------------------------------------------------------------------- /examples/chtest/dev_proc_sys.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os.path 4 | import sys 5 | 6 | # Files in /dev and /sys seem to vary between Linux systems. Thus, try a few 7 | # candidates and use the first one that exists. What we want is a file with 8 | # permissions root:root -rw------- that’s in a directory readable and 9 | # executable by unprivileged users, so we know we’re testing permissions on 10 | # the file rather than any of its containing directories. This may help for 11 | # finding such a file in /sys: 12 | # 13 | # $ find /sys -type f -a -perm 600 -ls 14 | # 15 | sys_file = None 16 | for f in ("/sys/devices/cpu/rdpmc", 17 | "/sys/kernel/mm/page_idle/bitmap", 18 | "/sys/module/nf_conntrack_ipv4/parameters/hashsize", 19 | "/sys/kernel/slab/request_sock_TCP/red_zone"): 20 | if (os.path.exists(f)): 21 | sys_file = f 22 | break 23 | 24 | if (sys_file is None): 25 | print("ERROR\tno test candidates in /sys exist") 26 | sys.exit(1) 27 | 28 | dev_file = None 29 | for f in ("/dev/cpu_dma_latency", "/dev/mem"): 30 | if (os.path.exists(f)): 31 | dev_file = f 32 | break 33 | 34 | if (dev_file is None): 35 | print("ERROR\tno test candidates in /dev exist") 36 | sys.exit(1) 37 | 38 | problem_ct = 0 39 | for f in (dev_file, "/proc/kcore", sys_file): 40 | try: 41 | open(f, "rb").read(1) 42 | print("RISK\t%s: read allowed" % f) 43 | problem_ct += 1 44 | except PermissionError: 45 | print("SAFE\t%s: read not allowed" % f) 46 | except OSError as x: 47 | print("ERROR\t%s: exception: %s" % (f, x)) 48 | problem_ct += 1 49 | 50 | sys.exit(problem_ct != 0) 51 | -------------------------------------------------------------------------------- /examples/chtest/fs_perms.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # This script walks the directories specified in sys.argv[1:] prepared by 4 | # make-perms-test.sh and attempts to read, write, and traverse (cd) each of 5 | # the entries within. It compares the result to the expectation encoded in the 6 | # filename. 7 | # 8 | # A summary line is printed on stdout. Running chatter describing each 9 | # evaluation is printed on stderr. 10 | # 11 | # Note: This works more or less the same as an older version embodied by 12 | # `examples/sandbox.py --filesystem` but is implemented in pure Python without 13 | # shell commands. Thus, the whole script must be run as root if you want to 14 | # see what root can do. 15 | 16 | import os.path 17 | import random 18 | import re 19 | import sys 20 | 21 | EXPECTED_RE = re.compile(r'~(...)$') 22 | class Makes_No_Sense(TypeError): pass 23 | 24 | VERBOSE = False 25 | 26 | 27 | def main(): 28 | if (sys.argv[1] == '--verbose'): 29 | global VERBOSE 30 | VERBOSE = True 31 | sys.argv.pop(1) 32 | d = sys.argv[1] 33 | mismatch_ct = 0 34 | test_ct = 0 35 | for path in sorted(os.listdir(d)): 36 | test_ct += 1 37 | mismatch_ct += not test('%s/%s' % (d, path)) 38 | if (test_ct <= 0 or test_ct % 2887 != 0): 39 | error("unexpected number of tests: %d" % test_ct) 40 | if (mismatch_ct == 0): 41 | print('SAFE\t', end='') 42 | else: 43 | print('RISK\t', end='') 44 | print('%d mismatches in %d tests' % (mismatch_ct, test_ct)) 45 | sys.exit(mismatch_ct != 0) 46 | 47 | # Table of test function name fragments. 48 | testvec = { (False, False, False): ('X', 'bad'), 49 | (False, False, True ): ('l', 'broken_symlink'), 50 | (False, True, False): ('f', 'file'), 51 | (False, True, True ): ('f', 'file'), 52 | (True, False, False): ('d', 'dir'), 53 | (True, False, True ): ('d', 'dir') } 54 | 55 | def error(msg): 56 | print('ERROR\t%s' % msg) 57 | sys.exit(1) 58 | 59 | def expected(path): 60 | m = EXPECTED_RE.search(path) 61 | if (m is None): 62 | return '*' 63 | else: 64 | return m[1] 65 | 66 | def test(path): 67 | filetype = (os.path.isdir(path), 68 | os.path.isfile(path), 69 | os.path.islink(path)) 70 | report = '%s %-24s ' % (testvec[filetype][0], path) 71 | expect = expected(path) 72 | result = '' 73 | for op in 'r', 'w', 't': # read, write, traverse 74 | f = globals()['try_%s_%s' % (op, testvec[filetype][1])] 75 | try: 76 | f(path) 77 | except (PermissionError, Makes_No_Sense): 78 | result += '-' 79 | except Exception as x: 80 | error('exception on %s: %s' % (path, x)) 81 | else: 82 | result += op 83 | report += result 84 | if (expect != '*' and result != expect): 85 | print('%s mismatch' % report) 86 | return False 87 | else: 88 | if (VERBOSE): 89 | print('%s ok' % report) 90 | return True 91 | 92 | def try_r_bad(path): 93 | error('bad file type: %s' % path) 94 | try_t_bad = try_r_bad 95 | try_w_bad = try_r_bad 96 | 97 | def try_r_broken_symlink(path): 98 | raise Makes_No_Sense() 99 | try_t_broken_symlink = try_r_broken_symlink 100 | try_w_broken_symlink = try_r_broken_symlink 101 | 102 | def try_r_dir(path): 103 | os.listdir(path) 104 | 105 | def try_t_dir(path): 106 | try_r_file(path + '/file') 107 | 108 | def try_w_dir(path): 109 | fpath = '%s/a%d' % (path, random.getrandbits(64)) 110 | try_w_file(fpath) 111 | os.unlink(fpath) 112 | 113 | def try_r_file(path): 114 | with open(path, 'rb', buffering=0) as fp: 115 | fp.read(1) 116 | 117 | def try_t_file(path): 118 | raise Makes_No_Sense() 119 | 120 | def try_w_file(path): 121 | # The file should exist, but this will create it if it doesn’t. We don't 122 | # check for that error condition because we *only* want to touch the OS for 123 | # open(2) and write(2). 124 | with open(path, 'wb', buffering=0) as fp: 125 | fp.write(b'written by fs_test.py\n') 126 | 127 | if (__name__ == '__main__'): 128 | main() 129 | -------------------------------------------------------------------------------- /examples/chtest/mknods.c: -------------------------------------------------------------------------------- 1 | /* Try to make some device files, and print a message to stdout describing 2 | what happened. See: https://www.kernel.org/doc/Documentation/devices.txt */ 3 | 4 | #define _GNU_SOURCE 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | 14 | const unsigned char_devs[] = { 1, 3, /* /dev/null -- most innocuous */ 15 | 1, 1, /* /dev/mem -- most juicy */ 16 | 0 }; 17 | 18 | int main(int argc, char ** argv) 19 | { 20 | dev_t dev; 21 | char * dir; 22 | int i, j; 23 | unsigned maj, min; 24 | bool open_ok; 25 | char * path; 26 | 27 | for (i = 1; i < argc; i++) { 28 | dir = argv[i]; 29 | for (j = 0; char_devs[j] != 0; j += 2) { 30 | maj = char_devs[j]; 31 | min = char_devs[j + 1]; 32 | if (0 > asprintf(&path, "%s/c%d.%d", dir, maj, min)) { 33 | printf("ERROR\tasprintf() failed with errno=%d\n", errno); 34 | return 1; 35 | } 36 | fprintf(stderr, "trying to mknod %s: ", path); 37 | dev = makedev(maj, min); 38 | if (mknod(path, S_IFCHR | 0500, dev)) { 39 | // Could not create device; make sure it's an error we expected. 40 | switch (errno) { 41 | case EACCES: 42 | case EINVAL: // e.g. /sys/firmware/efi/efivars 43 | case ENOENT: // e.g. /proc 44 | case ENOTDIR: // for bind-mounted files e.g. /etc/passwd 45 | case EPERM: 46 | case EROFS: 47 | fprintf(stderr, "failed as expected with errno=%d\n", errno); 48 | break; 49 | default: 50 | fprintf(stderr, "failed with unexpected errno\n"); 51 | printf("ERROR\tmknod(2) failed on %s with errno=%d\n", 52 | path, errno); 53 | return 1; 54 | } 55 | } else { 56 | // Device created; safe if we can't open it (see issue #381). 57 | fprintf(stderr, "succeeded\n"); 58 | fprintf(stderr, "trying to open %s: ", path); 59 | if (open(path, O_RDONLY) != -1) { 60 | fprintf(stderr, "succeeded\n"); 61 | open_ok = true; 62 | } else { 63 | open_ok = false; 64 | switch (errno) { 65 | case EACCES: 66 | fprintf(stderr, "failed as expected with errno=%d\n", errno); 67 | break; 68 | default: 69 | fprintf(stderr, "failed with unexpected errno\n"); 70 | printf("ERROR\topen(2) failed on %s with errno=%d\n", 71 | path, errno); 72 | return 1; 73 | } 74 | } 75 | // Remove the device, whether or not we were able to open it. 76 | if (unlink(path)) { 77 | printf("ERROR\tunlink(2) failed on %s with errno=%d", 78 | path, errno); 79 | return 1; 80 | } 81 | if (open_ok) { 82 | printf("RISK\tmknod(2), open(2) succeeded on %s (now removed)\n", 83 | path); 84 | return 1; 85 | } 86 | } 87 | } 88 | } 89 | 90 | printf("SAFE\t%d devices in %d dirs failed\n", 91 | (i - 1) * (j / 2), i - 1); 92 | return 0; 93 | } 94 | -------------------------------------------------------------------------------- /examples/chtest/printns: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Print out my namespace IDs, to stdout or (if specified) the path in $2. 4 | # Then, if $1 is specified, wait that number of seconds before exiting. 5 | 6 | import glob 7 | import os 8 | import socket 9 | import sys 10 | import time 11 | 12 | if (len(sys.argv) > 1): 13 | pause = float(sys.argv[1]) 14 | else: 15 | pause = 0 16 | 17 | if (len(sys.argv) > 2): 18 | out = open(sys.argv[2], "wt") 19 | else: 20 | out = sys.stdout 21 | 22 | hostname = socket.gethostname() 23 | 24 | for ns in glob.glob("/proc/self/ns/*"): 25 | stat = os.stat(ns) 26 | print("%s:%s:%d" % (ns, hostname, stat.st_ino), file=out, flush=True) 27 | out.close() # close the file ASAP to not collide with a later printns 28 | 29 | if (pause): 30 | time.sleep(pause) 31 | -------------------------------------------------------------------------------- /examples/chtest/setgroups.c: -------------------------------------------------------------------------------- 1 | /* Try to drop the last supplemental group, and print a message to stdout 2 | describing what happened. */ 3 | 4 | #define _DEFAULT_SOURCE 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | 11 | #define NGROUPS_MAX 128 12 | 13 | int main() 14 | { 15 | int group_ct; 16 | gid_t groups[NGROUPS_MAX]; 17 | 18 | group_ct = getgroups(NGROUPS_MAX, groups); 19 | if (group_ct == -1) { 20 | printf("ERROR\tgetgroups(2) failed with errno=%d\n", errno); 21 | return 1; 22 | } 23 | 24 | fprintf(stderr, "found %d groups; trying to drop last group %d\n", 25 | group_ct, groups[group_ct - 1]); 26 | 27 | if (setgroups(group_ct - 1, groups)) { 28 | if (errno == EPERM) { 29 | printf("SAFE\tsetgroups(2) failed with EPERM\n"); 30 | return 0; 31 | } else { 32 | printf("ERROR\tsetgroups(2) failed with errno=%d\n", errno); 33 | return 1; 34 | } 35 | } else { 36 | printf("RISK\tsetgroups(2) succeeded\n"); 37 | return 1; 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /examples/chtest/setuid.c: -------------------------------------------------------------------------------- 1 | /* Try to change effective UID. */ 2 | 3 | #define _GNU_SOURCE 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #define NOBODY 65534 10 | #define NOBODY2 65533 11 | 12 | int main(int argc, char ** argv) 13 | { 14 | // target UID is nobody, unless we're already nobody 15 | uid_t start = geteuid(); 16 | uid_t target = start != NOBODY ? NOBODY : NOBODY2; 17 | int result; 18 | 19 | fprintf(stderr, "current EUID=%u, attempting EUID=%u\n", start, target); 20 | 21 | result = seteuid(target); 22 | 23 | // setuid(2) fails with EINVAL in user namespaces and EPERM if not root. 24 | if (result == 0) { 25 | printf("RISK\tsetuid(2) succeeded for EUID=%u\n", target); 26 | return 1; 27 | } else if (errno == EINVAL) { 28 | printf("SAFE\tsetuid(2) failed as expected with EINVAL\n"); 29 | return 0; 30 | } 31 | 32 | printf("ERROR\tsetuid(2) failed unexpectedly with errno=%d\n", errno); 33 | return 1; 34 | } 35 | -------------------------------------------------------------------------------- /examples/chtest/signal_out.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Send a signal to a process outside the container. 4 | # 5 | # This is a little tricky. We want a process that: 6 | # 7 | # 1. is certain to exist, to avoid false negatives 8 | # 2. we shouldn’t be able to signal (specifically, we can’t create a process 9 | # to serve as the target) 10 | # 3. is outside the container 11 | # 4. won’t crash the host too badly if killed by the signal 12 | # 13 | # We want a signal that: 14 | # 15 | # 5. will be harmless if received 16 | # 6. is not blocked 17 | # 18 | # Accordingly, this test sends SIGCONT to the youngest getty process. The 19 | # thinking is that the virtual terminals are unlikely to be in use, so losing 20 | # one will be straightforward to clean up. 21 | 22 | import os 23 | import signal 24 | import subprocess 25 | import sys 26 | 27 | try: 28 | pdata = subprocess.check_output(["pgrep", "-nl", "getty"]) 29 | except subprocess.CalledProcessError: 30 | print("ERROR\tpgrep failed") 31 | sys.exit(1) 32 | 33 | pid = int(pdata.split()[0]) 34 | 35 | try: 36 | os.kill(pid, signal.SIGCONT) 37 | except PermissionError as x: 38 | print("SAFE\tfailed as expected: %s" % x) 39 | sys.exit(0) 40 | 41 | print("RISK\tsucceeded") 42 | sys.exit(1) 43 | -------------------------------------------------------------------------------- /examples/copy/dirA/fileAa: -------------------------------------------------------------------------------- 1 | dirA/fileAa 2 | -------------------------------------------------------------------------------- /examples/copy/dirB/fileBa: -------------------------------------------------------------------------------- 1 | dirB/fileBa 2 | -------------------------------------------------------------------------------- /examples/copy/dirB/fileBb: -------------------------------------------------------------------------------- 1 | dirB/fileBb 2 | -------------------------------------------------------------------------------- /examples/copy/dirCa/dirCb/fileCba: -------------------------------------------------------------------------------- 1 | dirCa/dirCb/fileCba 2 | -------------------------------------------------------------------------------- /examples/copy/dirCa/dirCb/fileCbb: -------------------------------------------------------------------------------- 1 | dirCa/dirCb/fileCbb 2 | -------------------------------------------------------------------------------- /examples/copy/dirD/fileDa: -------------------------------------------------------------------------------- 1 | dirD/fileDa 2 | -------------------------------------------------------------------------------- /examples/copy/dirEa/dirEb/fileEba: -------------------------------------------------------------------------------- 1 | dirEa/dirEb/fileEba 2 | -------------------------------------------------------------------------------- /examples/copy/dirEa/dirEb/fileEbb: -------------------------------------------------------------------------------- 1 | dirEa/dirEb/fileEbb 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/dir19a2/dir19b2/file19c1: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/dir19a2/dir19b3/file19c1: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/dir19a2/file19b2: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/dir19a2/file19b3: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/dir19a3/file19b1: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/file19a2: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirF/file19a3: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/diry/file_: -------------------------------------------------------------------------------- 1 | diry/file_ 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/filey: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/s_dir1: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/s_dir4/file_: -------------------------------------------------------------------------------- 1 | s_dir4/file_ 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/s_file1: -------------------------------------------------------------------------------- 1 | new 2 | -------------------------------------------------------------------------------- /examples/copy/dirG/s_file4/file_: -------------------------------------------------------------------------------- 1 | s_file4/file_ 2 | -------------------------------------------------------------------------------- /examples/copy/fileA: -------------------------------------------------------------------------------- 1 | fileA 2 | -------------------------------------------------------------------------------- /examples/copy/fileB: -------------------------------------------------------------------------------- 1 | fileB 2 | -------------------------------------------------------------------------------- /examples/distroless/Dockerfile: -------------------------------------------------------------------------------- 1 | # Skip this test because of issues with gcr.io (see #896). 2 | # ch-test-scope: skip 3 | # ch-test-arch-exclude: ppc64le # base image unavailable 4 | # Distroless is a Google project providing slim images that contain runtime 5 | # dependencies only. https://github.com/GoogleContainerTools/distroless 6 | # The python3 image was chosen for ease of testing. 7 | FROM gcr.io/distroless/python3 8 | COPY hello.py / 9 | -------------------------------------------------------------------------------- /examples/distroless/hello.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | print("Hello, World!") 3 | -------------------------------------------------------------------------------- /examples/distroless/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope standard 6 | prerequisites_ok distroless 7 | } 8 | 9 | @test "${ch_tag}/hello" { 10 | run ch-run "$ch_img" -- /hello.py 11 | echo "$output" 12 | [[ $status -eq 0 ]] 13 | [[ $output = 'Hello, World!' ]] 14 | } 15 | -------------------------------------------------------------------------------- /examples/exhaustive/Dockerfile: -------------------------------------------------------------------------------- 1 | # This Dockerfile aims to have at least one of everything, to exercise the 2 | # comprehensiveness of Dockerfile feature support. 3 | # 4 | # FIXME: That focus is a bit out of date. I think really what is here is the 5 | # ways we want to exercise ch-image in ways we care about the resulting image. 6 | # Exercises where we don’t care are in test/build/50_dockerfile.bats. But, I 7 | # don't want to do the refactoring right now. 8 | # 9 | # See: https://docs.docker.com/engine/reference/builder 10 | # 11 | # ch-test-scope: full 12 | # ch-test-builder-include: ch-image 13 | 14 | # Use a moderately complex image reference. 15 | FROM registry-1.docker.io:443/library/alpine:3.17 AS stage1 16 | 17 | RUN pwd 18 | WORKDIR /usr/local/src 19 | RUN pwd 20 | RUN ls --color=no -lh 21 | 22 | RUN apk add --no-cache bc 23 | RUN ["echo", "hello \n${chse_2} \${chse_2} ${NOTSET}"] 24 | # should print: 25 | # a -${chse_2}- b -value2- c -c- d -d- 26 | RUN echo 'a -${chse_2}-' "b -${chse_2}-" "c -${NOTSET:-c}-" "d -${chse_2:+d}-" 27 | RUN env 28 | 29 | # WORKDIR. See test/build/50_ch-image.bats where we validate this all worked OK. 30 | # FIXME: test with variable 31 | # 32 | # filesystem root 33 | WORKDIR / 34 | RUN mkdir workdir 35 | # absolute path, no mkdir 36 | WORKDIR /workdir 37 | RUN touch file 38 | # absolute path, mkdir 39 | RUN mkdir /workdir/abs2 40 | WORKDIR /workdir/abs2 41 | RUN touch file 42 | # relative path, no mkdir 43 | WORKDIR rel1 44 | RUN touch file1 45 | # relative path, 2nd level, no mkdir 46 | WORKDIR rel2 47 | RUN touch file 48 | # relative path, parent dir, no mkdir 49 | WORKDIR .. 50 | RUN touch file2 51 | # results 52 | RUN ls -R /workdir 53 | 54 | # TODO: 55 | # comment with trailing backslash (line continuation does not work in comments) 56 | 57 | -------------------------------------------------------------------------------- /examples/exhaustive/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope standard 6 | prerequisites_ok exhaustive 7 | } 8 | 9 | @test "${ch_tag}/WORKDIR" { 10 | output_expected=$(cat <<'EOF' 11 | /workdir: 12 | abs2 13 | file 14 | 15 | /workdir/abs2: 16 | file 17 | rel1 18 | 19 | /workdir/abs2/rel1: 20 | file1 21 | file2 22 | rel2 23 | 24 | /workdir/abs2/rel1/rel2: 25 | file 26 | EOF 27 | ) 28 | run ch-run "$ch_img" -- ls -R /workdir 29 | echo "$output" 30 | [[ $status -eq 0 ]] 31 | diff -u <(echo "$output_expected") <(echo "$output") 32 | } 33 | -------------------------------------------------------------------------------- /examples/hello/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: standard 2 | FROM almalinux:8 3 | 4 | RUN dnf install -y --setopt=install_weak_deps=false openssh-clients \ 5 | && dnf clean all 6 | 7 | COPY . hello 8 | -------------------------------------------------------------------------------- /examples/hello/README: -------------------------------------------------------------------------------- 1 | This example is a hello world Charliecloud container. It demonstrates running 2 | a command on the host from inside a container. 3 | -------------------------------------------------------------------------------- /examples/hello/hello.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo 'hello world' 6 | -------------------------------------------------------------------------------- /examples/hello/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope standard 6 | prerequisites_ok hello 7 | pmix_or_skip 8 | LC_ALL=C # no other locales installed in container 9 | } 10 | 11 | @test "${ch_tag}/hello" { 12 | run ch-run "$ch_img" -- /hello/hello.sh 13 | echo "$output" 14 | [[ $status -eq 0 ]] 15 | [[ $output = 'hello world' ]] 16 | } 17 | 18 | @test "${ch_tag}/distribution sanity" { 19 | # Try various simple things that should work in a basic Debian 20 | # distribution. (This does not test anything Charliecloud manipulates.) 21 | ch-run "$ch_img" -- /bin/bash -c true 22 | ch-run "$ch_img" -- /bin/true 23 | ch-run "$ch_img" -- find /etc -name 'a*' 24 | ch-run "$ch_img" -- sh -c 'echo foo | /bin/grep -E foo' 25 | ch-run "$ch_img" -- nice true 26 | } 27 | -------------------------------------------------------------------------------- /examples/lammps/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM openmpi 3 | WORKDIR /usr/local/src 4 | 5 | # Packages for building. 6 | RUN dnf install -y --setopt=install_weak_deps=false \ 7 | cmake \ 8 | patch \ 9 | python3-devel \ 10 | python3-pip \ 11 | python3-setuptools \ 12 | && dnf clean all 13 | 14 | # Building mpi4py from source to ensure it is built against our MPI build 15 | # Building numpy from source to work around issues seen on Aarch64 systems 16 | RUN pip3 install --no-binary :all: cython==0.29.24 mpi4py==3.1.1 numpy==1.19.5 17 | #RUN ln -s /usr/bin/python3 /usr/bin/python 18 | # Build LAMMPS. 19 | ARG LAMMPS_VERSION=29Sep2021 20 | RUN wget -nv https://github.com/lammps/lammps/archive/patch_${LAMMPS_VERSION}.tar.gz \ 21 | && tar xf patch_$LAMMPS_VERSION.tar.gz \ 22 | && mkdir lammps-${LAMMPS_VERSION}.build \ 23 | && cd lammps-${LAMMPS_VERSION}.build \ 24 | && cmake -DCMAKE_INSTALL_PREFIX=/usr/local \ 25 | -DCMAKE_BUILD_TYPE=Release \ 26 | -DBUILD_MPI=yes \ 27 | -DBUILD_LIB=on \ 28 | -DBUILD_SHARED_LIBS=on \ 29 | -DPKG_DIPOLE=yes \ 30 | -DPKG_KSPACE=yes \ 31 | -DPKG_POEMS=yes \ 32 | -DPKG_PYTHON=yes \ 33 | -DPKG_USER-REAXC=yes \ 34 | -DPKG_USER-MEAMC=yes \ 35 | -DLAMMPS_MACHINE=mpi \ 36 | ../lammps-patch_${LAMMPS_VERSION}/cmake \ 37 | && make -j $(getconf _NPROCESSORS_ONLN) install \ 38 | && ln -s /usr/local/src/lammps-patch_${LAMMPS_VERSION}/ /lammps \ 39 | && rm -f ../patch_$LAMMPS_VERSION.tar.gz 40 | RUN ldconfig 41 | 42 | # Patch in.melt to increase problem dimensions. 43 | COPY melt.patch /lammps/examples/melt 44 | RUN patch -p1 -d / < /lammps/examples/melt/melt.patch 45 | # Patch simple.py to uncomment mpi4py calls and disable file output. 46 | # Patch in.simple to increase problem dimensions. 47 | COPY simple.patch /lammps/python/examples 48 | RUN patch -p1 -d / < /lammps/python/examples/simple.patch 49 | -------------------------------------------------------------------------------- /examples/lammps/melt.patch: -------------------------------------------------------------------------------- 1 | --- a/lammps/examples/melt/in.melt 2014-01-07 14:43:31.000000000 -0700 2 | +++ b/lammps/examples/melt/in.melt 2018-03-16 14:37:02.000000000 -0600 3 | @@ -6,3 +6,3 @@ 4 | lattice fcc 0.8442 5 | -region box block 0 10 0 10 0 10 6 | +region box block 0 120 0 120 0 120 7 | create_box 1 box 8 | @@ -32,2 +32,2 @@ 9 | thermo 50 10 | -run 250 11 | +run 3 12 | -------------------------------------------------------------------------------- /examples/lammps/simple.patch: -------------------------------------------------------------------------------- 1 | --- /lammps/python/examples/simple.py 2019-09-20 09:51:15.000000000 -0600 2 | +++ /lammps/python/examples/simple.py 2019-09-23 16:58:28.950720810 -0600 3 | @@ -1,4 +1,4 @@ 4 | -#!/usr/bin/env python -i 5 | +#!/usr/bin/python3 6 | # preceding line should have path for Python on your machine 7 | 8 | # simple.py 9 | @@ -28,12 +28,12 @@ 10 | me = 0 11 | 12 | # uncomment this if running in parallel via mpi4py 13 | -#from mpi4py import MPI 14 | -#me = MPI.COMM_WORLD.Get_rank() 15 | -#nprocs = MPI.COMM_WORLD.Get_size() 16 | +from mpi4py import MPI 17 | +me = MPI.COMM_WORLD.Get_rank() 18 | +nprocs = MPI.COMM_WORLD.Get_size() 19 | 20 | from lammps import lammps 21 | -lmp = lammps() 22 | +lmp = lammps("mpi") 23 | 24 | # run infile one line at a time 25 | 26 | @@ -85,7 +85,7 @@ 27 | # test of new gather/scatter and box extract/reset methods 28 | # can try this in parallel and with/without atom_modify sort enabled 29 | 30 | -lmp.command("write_dump all custom tmp.simple id type x y z fx fy fz"); 31 | +#lmp.command("write_dump all custom tmp.simple id type x y z fx fy fz"); 32 | 33 | x = lmp.gather_atoms("x",1,3) 34 | f = lmp.gather_atoms("f",1,3) 35 | @@ -123,10 +123,10 @@ 36 | boxlo,boxhi,xy,yz,xz,periodicity,box_change = lmp.extract_box() 37 | if me == 0: print("Box info",boxlo,boxhi,xy,yz,xz,periodicity,box_change) 38 | 39 | -lmp.reset_box([0,0,0],[10,10,8],0,0,0) 40 | +#lmp.reset_box([0,0,0],[10,10,8],0,0,0) 41 | 42 | -boxlo,boxhi,xy,yz,xz,periodicity,box_change = lmp.extract_box() 43 | -if me == 0: print("Box info",boxlo,boxhi,xy,yz,xz,periodicity,box_change) 44 | +#boxlo,boxhi,xy,yz,xz,periodicity,box_change = lmp.extract_box() 45 | +#if me == 0: print("Box info",boxlo,boxhi,xy,yz,xz,periodicity,box_change) 46 | 47 | # uncomment if running in parallel via mpi4py 48 | -#print("Proc %d out of %d procs has" % (me,nprocs), lmp) 49 | +print("Proc %d out of %d procs has" % (me,nprocs), lmp) 50 | --- /lammps/python/examples/in.simple 2019-10-02 16:09:55.198770328 -0600 51 | +++ /lammps/python/examples/in.simple 2019-10-02 16:10:21.263332834 -0600 52 | @@ -5,7 +5,7 @@ atom_style atomic 53 | atom_modify map array 54 | 55 | lattice fcc 0.8442 56 | -region box block 0 4 0 4 0 4 57 | +region box block 0 120 0 120 0 120 58 | create_box 1 box 59 | create_atoms 1 box 60 | mass 1 1.0 61 | -------------------------------------------------------------------------------- /examples/lammps/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | # LAMMPS does have a test suite, but we do not use it, because it seems too 5 | # fiddly to get it running properly. 6 | # 7 | # 1. Running the command listed in LAMMPS’ Jenkins tests [2] fails with a 8 | # strange error: 9 | # 10 | # $ python run_tests.py tests/test_commands.py tests/test_examples.py 11 | # Loading tests from tests/test_commands.py... 12 | # Traceback (most recent call last): 13 | # File "run_tests.py", line 81, in 14 | # tests += load_tests(f) 15 | # File "run_tests.py", line 22, in load_tests 16 | # for testname in list(tc): 17 | # TypeError: 'Test' object is not iterable 18 | # 19 | # Looking in run_tests.py, this sure looks like a bug (it’s expecting a 20 | # list of Tests, I think, but getting a single Test). But it works in 21 | # Jenkins. Who knows. 22 | # 23 | # 2. The files test/test_*.py say that the tests can be run with 24 | # “nosetests”, which they can, after setting several environment 25 | # variables. But some of the tests fail for me. I didn’t diagnose. 26 | # 27 | # Instead, we simply run some of the example problems in a loop and see if 28 | # they exit with return code zero. We don’t check output. 29 | # 30 | # Note that a lot of the other examples crash. I haven’t diagnosed or figured 31 | # out if we care. 32 | # 33 | # We are open to patches if anyone knows how to fix this situation reliably. 34 | # 35 | # [1]: https://github.com/lammps/lammps-testing 36 | # [2]: https://ci.lammps.org/job/lammps/job/master/job/testing/lastSuccessfulBuild/console 37 | 38 | setup () { 39 | scope full 40 | prerequisites_ok "$ch_tag" 41 | multiprocess_ok 42 | pmix_or_skip 43 | [[ -n "$ch_cray" ]] && export FI_PROVIDER=$cray_prov 44 | } 45 | 46 | lammps_try () { 47 | # These examples cd because some (not all) of the LAMMPS tests expect to 48 | # find things based on $CWD. 49 | infiles=$(ch-run --cd "/lammps/examples/${1}" "$ch_img" -- \ 50 | bash -c "ls in.*") 51 | for i in $infiles; do 52 | printf '\n\n%s\n' "$i" 53 | # shellcheck disable=SC2086 54 | $ch_mpirun_core ch-run --join --cd /lammps/examples/$1 "$ch_img" -- \ 55 | lmp_mpi -log none -in "$i" 56 | done 57 | 58 | } 59 | 60 | @test "${ch_tag}/inject host cray mpi ($cray_prov)" { 61 | cray_ofi_or_skip "$ch_img" 62 | run ch-run "$ch_img" -- fi_info 63 | echo "$output" 64 | [[ $output == *"provider: $cray_prov"* ]] 65 | [[ $output == *"fabric: $cray_prov"* ]] 66 | [[ $status -eq 0 ]] 67 | } 68 | 69 | @test "${ch_tag}/using all cores" { 70 | # shellcheck disable=SC2086 71 | run $ch_mpirun_core ch-run --join "$ch_img" -- \ 72 | lmp_mpi -log none -in /lammps/examples/melt/in.melt 73 | echo "$output" 74 | [[ $status -eq 0 ]] 75 | ranks_found=$( echo "$output" \ 76 | | grep -F 'MPI tasks' \ 77 | | tail -1 \ 78 | | sed -r 's/^.+with ([0-9]+) MPI tasks.+$/\1/') 79 | echo "ranks expected: ${ch_cores_total}" 80 | echo "ranks found: ${ranks_found}" 81 | [[ $ranks_found -eq "$ch_cores_total" ]] 82 | } 83 | 84 | @test "${ch_tag}/crack" { lammps_try crack; } 85 | @test "${ch_tag}/dipole" { lammps_try dipole; } 86 | @test "${ch_tag}/flow" { lammps_try flow; } 87 | @test "${ch_tag}/friction" { lammps_try friction; } 88 | @test "${ch_tag}/melt" { lammps_try melt; } 89 | 90 | @test "${ch_tag}/mpi4py simple" { 91 | $ch_mpirun_core ch-run --join --cd /lammps/python/examples "$ch_img" -- \ 92 | ./simple.py in.simple 93 | } 94 | 95 | @test "${ch_tag}/revert image" { 96 | unpack_img_all_nodes "$ch_cray" 97 | } 98 | -------------------------------------------------------------------------------- /examples/lustre/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | # ch-test-arch-exclude: aarch64 # No lustre RPMS for aarch64 3 | 4 | FROM almalinux:8 5 | 6 | # Install lustre-client dependencies 7 | RUN dnf install -y --setopt=install_weak_deps=false \ 8 | e2fsprogs-libs \ 9 | wget \ 10 | perl \ 11 | && dnf clean all 12 | 13 | ARG LUSTRE_VERSION=2.12.6 14 | ARG LUSTRE_URL=https://downloads.whamcloud.com/public/lustre/lustre-${LUSTRE_VERSION}/el8/client/RPMS/x86_64/ 15 | 16 | # The lustre-client rpm has a dependency on the kmod-lustre-client rpm, this is 17 | # not required for our tests and frequently is incompatible with the kernel 18 | # headers in the container, using the --nodeps flag to work around this. 19 | 20 | # NOTE: The --nodeps flag ignores all dependencies not just kmod-lustre-client, 21 | # this could surpress a legitimate failure at build time and lead to odd 22 | # behavior at runtime. 23 | RUN wget ${LUSTRE_URL}/lustre-client-${LUSTRE_VERSION}-1.el8.x86_64.rpm \ 24 | && rpm -i --nodeps *.rpm \ 25 | && rm -f *.rpm 26 | -------------------------------------------------------------------------------- /examples/lustre/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope full 6 | prerequisites_ok lustre 7 | 8 | if [[ $CH_TEST_LUSTREDIR = skip ]]; then 9 | # Assume that in a Slurm allocation, even if one node, Lustre should 10 | # be available for testing. 11 | msg='no Lustre test directory to bind mount' 12 | if [[ $SLURM_JOB_ID ]]; then 13 | pedantic_fail "$msg" 14 | else 15 | skip "$msg" 16 | fi 17 | elif [[ ! -d $CH_TEST_LUSTREDIR ]]; then 18 | echo "'${CH_TEST_LUSTREDIR}' is not a directory" 1>&2 19 | exit 1 20 | fi 21 | } 22 | 23 | clean_dir () { 24 | rmdir "${1}/set_stripes" 25 | rmdir "${1}/test_create_dir" 26 | rm "${1}/test_write.txt" 27 | rmdir "$1" 28 | } 29 | 30 | tidy_run () { 31 | ch-run -b "$binds" "$ch_img" -- "$@" 32 | } 33 | 34 | binds=${CH_TEST_LUSTREDIR}:/mnt/0 35 | work_dir=/mnt/0/charliecloud_test 36 | 37 | @test "${ch_tag}/start clean" { 38 | clean_dir "${CH_TEST_LUSTREDIR}/charliecloud_test" || true 39 | mkdir "${CH_TEST_LUSTREDIR}/charliecloud_test" # fail if not cleaned up 40 | } 41 | 42 | @test "${ch_tag}/create directory" { 43 | tidy_run mkdir "${work_dir}/test_create_dir" 44 | } 45 | 46 | @test "${ch_tag}/create file" { 47 | tidy_run touch "${work_dir}/test_create_file" 48 | } 49 | 50 | @test "${ch_tag}/delete file" { 51 | tidy_run rm "${work_dir}/test_create_file" 52 | } 53 | 54 | @test "${ch_tag}/write file" { 55 | # sh wrapper to get echo output to the right place. Without it, the output 56 | # from echo goes outside the container. 57 | tidy_run sh -c "echo hello > ${work_dir}/test_write.txt" 58 | } 59 | 60 | @test "${ch_tag}/read file" { 61 | output_expected=$(cat <<'EOF' 62 | hello 63 | 0+1 records in 64 | 0+1 records out 65 | EOF 66 | ) 67 | # Using dd allows us to skip the write cache and hit the disk. 68 | run tidy_run dd if="${work_dir}/test_write.txt" iflag=nocache status=noxfer 69 | diff -u <(echo "$output_expected") <(echo "$output") 70 | } 71 | 72 | @test "${ch_tag}/striping" { 73 | tidy_run mkdir "${work_dir}/set_stripes" 74 | stripe_ct_old=$(tidy_run lfs getstripe --stripe-count "${work_dir}/set_stripes/") 75 | echo "old stripe count: $stripe_ct_old" 76 | expected_new=$((stripe_ct_old * 2)) 77 | echo "expected new stripe count: $expected_new" 78 | tidy_run lfs setstripe -c "$expected_new" "${work_dir}/set_stripes" 79 | stripe_ct_new=$(tidy_run lfs getstripe --stripe-count "${work_dir}/set_stripes") 80 | echo "actual new stripe count: $stripe_ct_new" 81 | [[ $expected_new -eq $stripe_ct_new ]] 82 | } 83 | 84 | @test "${ch_tag}/clean up" { 85 | clean_dir "${CH_TEST_LUSTREDIR}/charliecloud_test" 86 | } 87 | -------------------------------------------------------------------------------- /examples/mpibench/Dockerfile.mpich: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM mpich 3 | 4 | RUN dnf install -y which \ 5 | && dnf clean all 6 | 7 | # Compile the Intel MPI benchmark 8 | WORKDIR /usr/local/src 9 | ARG IMB_VERSION=IMB-v2021.3 10 | RUN git clone --branch $IMB_VERSION --depth 1 \ 11 | https://github.com/intel/mpi-benchmarks \ 12 | && cd mpi-benchmarks/src_c \ 13 | && make CC=mpicc -j$(getconf _NPROCESSORS_ONLN) -f Makefile TARGET=MPI1 14 | -------------------------------------------------------------------------------- /examples/mpibench/Dockerfile.openmpi: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM openmpi 3 | 4 | RUN dnf install -y which \ 5 | && dnf clean all 6 | 7 | # Compile the Intel MPI benchmark 8 | WORKDIR /usr/local/src 9 | ARG IMB_VERSION=IMB-v2021.3 10 | RUN git clone --branch $IMB_VERSION --depth 1 \ 11 | https://github.com/intel/mpi-benchmarks \ 12 | && cd mpi-benchmarks/src_c \ 13 | && make CC=mpicc -j$(getconf _NPROCESSORS_ONLN) -f Makefile TARGET=MPI1 14 | -------------------------------------------------------------------------------- /examples/mpihello/Dockerfile.mpich: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM mpich 3 | 4 | COPY . /hello 5 | WORKDIR /hello 6 | RUN make clean && make 7 | -------------------------------------------------------------------------------- /examples/mpihello/Dockerfile.openmpi: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM openmpi 3 | 4 | # This example 5 | COPY . /hello 6 | WORKDIR /hello 7 | RUN make clean && make 8 | -------------------------------------------------------------------------------- /examples/mpihello/Makefile: -------------------------------------------------------------------------------- 1 | BINS := hello 2 | CFLAGS := -std=gnu11 -Wall 3 | 4 | .PHONY: all 5 | all: $(BINS) 6 | 7 | .PHONY: clean 8 | clean: 9 | rm -f $(BINS) 10 | 11 | $(BINS): Makefile 12 | 13 | %: %.c 14 | mpicc $(CFLAGS) $< -o $@ 15 | -------------------------------------------------------------------------------- /examples/mpihello/hello.c: -------------------------------------------------------------------------------- 1 | /* MPI test program. Reports user namespace and rank, then sends and receives 2 | some simple messages. 3 | 4 | Patterned after: 5 | http://en.wikipedia.org/wiki/Message_Passing_Interface#Example_program */ 6 | 7 | #define _GNU_SOURCE 8 | #include 9 | #include 10 | #include 11 | #include 12 | #include 13 | #include 14 | #include 15 | 16 | #include 17 | 18 | #define TAG 0 19 | #define MSG_OUT 8675309 20 | 21 | void fatal(char * fmt, ...); 22 | int op(int rank, int i); 23 | 24 | int rank, rank_ct; 25 | 26 | int main(int argc, char ** argv) 27 | { 28 | char hostname[HOST_NAME_MAX+1]; 29 | char mpi_version[MPI_MAX_LIBRARY_VERSION_STRING]; 30 | int mpi_version_len; 31 | int msg; 32 | MPI_Status mstat; 33 | struct stat st; 34 | 35 | stat("/proc/self/ns/user", &st); 36 | 37 | MPI_Init(&argc, &argv); 38 | MPI_Comm_size(MPI_COMM_WORLD, &rank_ct); 39 | MPI_Comm_rank(MPI_COMM_WORLD, &rank); 40 | 41 | if (rank == 0) { 42 | MPI_Get_library_version(mpi_version, &mpi_version_len); 43 | printf("%d: MPI version:\n%s\n", rank, mpi_version); 44 | } 45 | 46 | gethostname(hostname, HOST_NAME_MAX+1); 47 | printf("%d: init ok %s, %d ranks, userns %lu\n", 48 | rank, hostname, rank_ct, st.st_ino); 49 | fflush(stdout); 50 | 51 | if (rank == 0) { 52 | for (int i = 1; i < rank_ct; i++) { 53 | msg = MSG_OUT; 54 | MPI_Send(&msg, 1, MPI_INT, i, TAG, MPI_COMM_WORLD); 55 | msg = 0; 56 | MPI_Recv(&msg, 1, MPI_INT, i, TAG, MPI_COMM_WORLD, &mstat); 57 | if (msg != op(i, MSG_OUT)) 58 | fatal("0: expected %d back but got %d", op(i, MSG_OUT), msg); 59 | } 60 | } else { 61 | msg = 0; 62 | MPI_Recv(&msg, 1, MPI_INT, 0, TAG, MPI_COMM_WORLD, &mstat); 63 | if (msg != MSG_OUT) 64 | fatal("%d: expected %d but got %d", rank, MSG_OUT, msg); 65 | msg = op(rank, msg); 66 | MPI_Send(&msg, 1, MPI_INT, 0, TAG, MPI_COMM_WORLD); 67 | } 68 | 69 | if (rank == 0) 70 | printf("%d: send/receive ok\n", rank); 71 | 72 | MPI_Finalize(); 73 | if (rank == 0) 74 | printf("%d: finalize ok\n", rank); 75 | return 0; 76 | } 77 | 78 | void fatal(char * fmt, ...) 79 | { 80 | va_list ap; 81 | 82 | fprintf(stderr, "rank %d:", rank); 83 | 84 | va_start(ap, fmt); 85 | vfprintf(stderr, fmt, ap); 86 | va_end(ap); 87 | 88 | fprintf(stderr, "\n"); 89 | exit(EXIT_FAILURE); 90 | } 91 | 92 | int op(int rank, int i) 93 | { 94 | return i * rank; 95 | } 96 | -------------------------------------------------------------------------------- /examples/mpihello/slurm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --time=0:10:00 3 | 4 | # Arguments: Path to tarball, path to image parent directory. 5 | 6 | set -e 7 | 8 | tar=$1 9 | imgdir=$2 10 | img=${2}/$(basename "${tar%.tar.gz}") 11 | 12 | if [[ -z $tar ]]; then 13 | echo 'no tarball specified' 1>&2 14 | exit 1 15 | fi 16 | printf 'tarball: %s\n' "$tar" 17 | 18 | if [[ -z $imgdir ]]; then 19 | echo 'no image directory specified' 1>&2 20 | exit 1 21 | fi 22 | printf 'image: %s\n' "$img" 23 | 24 | # Make Charliecloud available (varies by site). 25 | module purge 26 | module load friendly-testing 27 | module load charliecloud 28 | 29 | # Unpack image. 30 | srun ch-convert -o dir "$tar" "$imgdir" 31 | 32 | # MPI version in container. 33 | printf 'container: ' 34 | ch-run "$img" -- mpirun --version | grep -E '^mpirun' 35 | 36 | # Run the app. 37 | srun --cpus-per-task=1 ch-run "$img" -- /hello/hello 38 | -------------------------------------------------------------------------------- /examples/mpihello/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope full 6 | prerequisites_ok "$ch_tag" 7 | pmix_or_skip 8 | if [[ $srun_mpi != pmix* ]]; then 9 | skip 'pmix required' 10 | fi 11 | } 12 | 13 | count_ranks () { 14 | echo "$1" \ 15 | | grep -E '^0: init ok' \ 16 | | tail -1 \ 17 | | sed -r 's/^.+ ([0-9]+) ranks.+$/\1/' 18 | } 19 | 20 | @test "${ch_tag}/guest starts ranks" { 21 | openmpi_or_skip 22 | # shellcheck disable=SC2086 23 | run ch-run $ch_unslurm "$ch_img" -- mpirun $ch_mpirun_np /hello/hello 24 | echo "$output" 25 | [[ $status -eq 0 ]] 26 | rank_ct=$(count_ranks "$output") 27 | echo "found ${rank_ct} ranks, expected ${ch_cores_node}" 28 | [[ $rank_ct -eq "$ch_cores_node" ]] 29 | [[ $output = *'0: send/receive ok'* ]] 30 | [[ $output = *'0: finalize ok'* ]] 31 | } 32 | 33 | @test "${ch_tag}/inject cray mpi ($cray_prov)" { 34 | cray_ofi_or_skip "$ch_img" 35 | run ch-run "$ch_img" -- fi_info 36 | echo "$output" 37 | [[ $output == *"provider: $cray_prov"* ]] 38 | [[ $output == *"fabric: $cray_prov"* ]] 39 | [[ $status -eq 0 ]] 40 | } 41 | 42 | @test "${ch_tag}/validate $cray_prov injection" { 43 | [[ -n "$ch_cray" ]] || skip "host is not cray" 44 | [[ -n "$CH_TEST_OFI_PATH" ]] || skip "--fi-provider not set" 45 | run $ch_mpirun_node ch-run --join "$ch_img" -- sh -c \ 46 | "FI_PROVIDER=$cray_prov FI_LOG_LEVEL=info /hello/hello 2>&1" 47 | echo "$output" 48 | [[ $status -eq 0 ]] 49 | if [[ "$cray_prov" == gni ]]; then 50 | [[ "$output" == *' registering provider: gni'* ]] 51 | [[ "$output" == *'gni:'*'gnix_ep_nic_init()'*'Allocated new NIC for EP'* ]] 52 | fi 53 | if [[ "$cray_prov" == cxi ]]; then 54 | [[ "$output" == *'cxi:mr:ofi_'*'stats:'*'searches'*'deletes'*'hits'* ]] 55 | fi 56 | } 57 | 58 | @test "${ch_tag}/MPI version" { 59 | [[ -z $ch_cray ]] || skip 'serial launches unsupported on Cray' 60 | # shellcheck disable=SC2086 61 | run ch-run $ch_unslurm "$ch_img" -- /hello/hello 62 | echo "$output" 63 | [[ $status -eq 0 ]] 64 | if [[ $ch_mpi = openmpi ]]; then 65 | [[ $output = *'Open MPI'* ]] 66 | else 67 | [[ $ch_mpi = mpich ]] 68 | if [[ $ch_cray ]]; then 69 | [[ $output = *'CRAY MPICH'* ]] 70 | else 71 | [[ $output = *'MPICH Version:'* ]] 72 | fi 73 | fi 74 | } 75 | 76 | @test "${ch_tag}/empty stderr" { 77 | multiprocess_ok 78 | output=$($ch_mpirun_core ch-run --join "$ch_img" -- \ 79 | /hello/hello 2>&1 1>/dev/null) 80 | echo "$output" 81 | [[ -z "$output" ]] 82 | } 83 | 84 | @test "${ch_tag}/serial" { 85 | [[ -z $ch_cray ]] || skip 'serial launches unsupported on Cray' 86 | # This seems to start up the MPI infrastructure (daemons, etc.) within the 87 | # guest even though there's no mpirun. 88 | # shellcheck disable=SC2086 89 | run ch-run $ch_unslurm "$ch_img" -- /hello/hello 90 | echo "$output" 91 | [[ $status -eq 0 ]] 92 | [[ $output = *' 1 ranks'* ]] 93 | [[ $output = *'0: send/receive ok'* ]] 94 | [[ $output = *'0: finalize ok'* ]] 95 | } 96 | 97 | @test "${ch_tag}/host starts ranks" { 98 | multiprocess_ok 99 | echo "starting ranks with: ${ch_mpirun_core}" 100 | 101 | guest_mpi=$(ch-run "$ch_img" -- mpirun --version | head -1) 102 | echo "guest MPI: ${guest_mpi}" 103 | 104 | # shellcheck disable=SC2086 105 | run $ch_mpirun_core ch-run --join "$ch_img" -- /hello/hello 2>&1 106 | echo "$output" 107 | [[ $status -eq 0 ]] 108 | rank_ct=$(count_ranks "$output") 109 | echo "found ${rank_ct} ranks, expected ${ch_cores_total}" 110 | [[ $rank_ct -eq "$ch_cores_total" ]] 111 | [[ $output = *'0: send/receive ok'* ]] 112 | [[ $output = *'0: finalize ok'* ]] 113 | } 114 | 115 | @test "${ch_tag}/Cray bind mounts" { 116 | [[ $ch_cray ]] || skip 'host is not a Cray' 117 | 118 | ch-run "$ch_img" -- mount | grep -F /dev/hugepages 119 | if [[ $cray_prov == 'gni' ]]; then 120 | ch-run "$ch_img" -- mount | grep -F /var/opt/cray/alps/spool 121 | else 122 | ch-run "$ch_img" -- mount | grep -F /var/spool/slurmd 123 | fi 124 | } 125 | 126 | @test "${ch_tag}/revert image" { 127 | unpack_img_all_nodes "$ch_cray" 128 | } 129 | -------------------------------------------------------------------------------- /examples/multistage/Dockerfile: -------------------------------------------------------------------------------- 1 | # This image tests multi-stage build using GNU Hello. In the first stage, we 2 | # install a build environment and build Hello. In the second stage, we start 3 | # fresh again with a base image and copy the Hello executables. Tests 4 | # demonstrate that Hello runs and none of the build environment is present. 5 | # 6 | # ch-test-scope: standard 7 | 8 | 9 | FROM almalinux:8 AS buildstage 10 | 11 | # Build environment 12 | RUN dnf install -y \ 13 | gcc \ 14 | make \ 15 | wget 16 | WORKDIR /usr/local/src 17 | 18 | # GNU Hello. Install using DESTDIR to make copying below easier. 19 | # 20 | # This downloads from a specific mirror [1] that smelled reliable because both 21 | # ftp.gnu.org itself and the mirror alias ftpmirror.gnu.org are unreliable. 22 | # Specifically, ftpmirror.gnu.org frequently ends up a tripadvisor.com, which 23 | # is frequently HTTP 500. 24 | # 25 | # [1]: https://www.gnu.org/prep/ftp.html 26 | ARG gnu_mirror=mirrors.kernel.org/gnu 27 | ARG version=2.12.1 28 | RUN wget -nv https://${gnu_mirror}/hello/hello-${version}.tar.gz 29 | RUN tar xf hello-${version}.tar.gz \ 30 | && cd hello-${version} \ 31 | && ./configure \ 32 | && make -j $(getconf _NPROCESSORS_ONLN) \ 33 | && make install DESTDIR=/hello 34 | RUN ls -ld /hello/usr/local/*/* 35 | 36 | 37 | FROM almalinux:8 38 | 39 | RUN dnf install -y man 40 | 41 | # COPY the hello install over, by both name and index, making sure not to 42 | # overwrite existing contents. Recall that COPY works different than cp(1). 43 | COPY --from=0 /hello/usr/local/bin /usr/local/bin 44 | COPY --from=buildstage /hello/usr/local/share /usr/local/share 45 | COPY --from=buildstage /hello/usr/local/share/locale /usr/local/share/locale 46 | RUN ls -ld /usr/local/*/* 47 | -------------------------------------------------------------------------------- /examples/multistage/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | prerequisites_ok multistage 6 | } 7 | 8 | @test "${ch_tag}/hello" { 9 | run ch-run "$ch_img" -- hello -g 'Hello, Charliecloud!' 10 | echo "$output" 11 | [[ $status -eq 0 ]] 12 | [[ $output = 'Hello, Charliecloud!' ]] 13 | } 14 | 15 | @test "${ch_tag}/man hello" { 16 | ch-run "$ch_img" -- man hello > /dev/null 17 | } 18 | 19 | @test "${ch_tag}/files seem OK" { 20 | [[ $CH_TEST_PACK_FMT = squash-mount ]] && skip 'need directory image' 21 | # hello executable itself. 22 | test -x "${ch_img}/usr/local/bin/hello" 23 | # Present by default. 24 | test -d "${ch_img}/usr/local/share/applications" 25 | test -d "${ch_img}/usr/local/share/info" 26 | test -d "${ch_img}/usr/local/share/man" 27 | # Copied from first stage. 28 | test -d "${ch_img}/usr/local/share/locale" 29 | # Correct file count in directories. 30 | ls -lh "${ch_img}/usr/local/bin" 31 | [[ $(find "${ch_img}/usr/local/bin" -mindepth 1 -maxdepth 1 | wc -l) -eq 1 ]] 32 | ls -lh "${ch_img}/usr/local/share" 33 | [[ $(find "${ch_img}/usr/local/share" -mindepth 1 -maxdepth 1 | wc -l) -eq 4 ]] 34 | } 35 | 36 | @test "${ch_tag}/no first-stage stuff present" { 37 | # Can’t run GCC. 38 | run ch-run "$ch_img" -- gcc --version 39 | echo "$output" 40 | [[ $status -eq $CH_ERR_CMD ]] 41 | [[ $output = *'gcc: No such file or directory'* ]] 42 | 43 | # No GCC or Make. 44 | ls -lh "${ch_img}/usr/bin/gcc" || true 45 | [[ ! -f "${ch_img}/usr/bin/gcc" ]] 46 | ls -lh "${ch_img}/usr/bin/make" || true 47 | [[ ! -f "${ch_img}/usr/bin/make" ]] 48 | } 49 | -------------------------------------------------------------------------------- /examples/obspy/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | # ch-test-arch-exclude: aarch64 # no obspy Conda package 3 | # ch-test-arch-exclude: ppc64le # no obspy Conda package? 4 | FROM almalinux_8ch 5 | 6 | RUN dnf install -y --setopt=install_weak_deps=false \ 7 | zlib-devel \ 8 | && dnf clean all 9 | 10 | WORKDIR /usr/local/src 11 | 12 | # Install Miniconda. Notes/gotchas: 13 | # 14 | # 1. Install into /usr/local. Some of the instructions [e.g., 1] warn 15 | # against putting conda in $PATH; others don’t. However it seems to work 16 | # and then we don’t need to muck with the path. 17 | # 18 | # 2. Use latest version so we catch sooner if things explode. 19 | # 20 | # 3. ObsPy 1.4.0, the latest as of 2024-03-27, is incompatible with Python 21 | # 3.12 [2], which is recently the default in Miniconda (see PR #1885 and 22 | # issue #1886). 23 | # 24 | # [1]: https://docs.anaconda.com/anaconda/user-guide/faq/ 25 | # [2]: https://github.com/obspy/obspy/issues/3313#issuecomment-1818165937 26 | ARG MC_VERSION=py311_24.1.2-0 27 | ARG MC_FILE=Miniconda3-$MC_VERSION-Linux-x86_64.sh 28 | RUN wget -nv https://repo.anaconda.com/miniconda/$MC_FILE 29 | # Miniconda will fail if the HOME variable is not set. 30 | RUN HOME=/home bash $MC_FILE -bf -p /usr/local 31 | RUN rm -Rf $MC_FILE 32 | RUN which conda && conda --version 33 | # Disable automatic conda upgrades for predictable versioning. 34 | RUN conda config --set auto_update_conda False 35 | 36 | # Install obspy, also latest. This is a container, so don’t bother creating a 37 | # new environment for obspy. 38 | # See: https://github.com/obspy/obspy/wiki/Installation-via-Anaconda 39 | RUN conda config --add channels conda-forge 40 | RUN conda install --yes obspy=1.4.0 41 | RUN conda update obspy 42 | 43 | # Hello world program and input from docs. 44 | WORKDIR / 45 | RUN wget -nv http://examples.obspy.org/RJOB_061005_072159.ehz.new 46 | COPY hello.py . 47 | RUN chmod 755 ./hello.py 48 | RUN ldconfig 49 | -------------------------------------------------------------------------------- /examples/obspy/README: -------------------------------------------------------------------------------- 1 | We’d prefer to run the ObsPy test suite, but it seems quite finicky and we 2 | weren’t able to get it to work. Problems with the test suite include: 3 | 4 | 1. Requires network access even for the non-network modules. We filed an 5 | issue about this [1] that did result in likely-actionable exclusions, 6 | though we haven’t followed up. ObsPy also has a PR [2] unmerged as of 7 | 2021-08-04 that could replay the network traffic offline. 8 | 9 | 2. Expects to write within the install directory (e.g., 10 | site-packages/obspy/clients/filesystem/tests/data/tsindex_data), which is 11 | an antipattern even when not containerized. 12 | 13 | 3. LOTS of warnings, e.g. hundreds of deprecation gripes from NumPy as well 14 | as ObsPy itself. 15 | 16 | 4. Various errors, e.g. “AttributeError: 'bool' object has no attribute 17 | 'lower'” from within Matplotlib. (I was able to solve this one by 18 | choosing an older version of Matplotlib than the one depended on by the 19 | ObsPy Conda package, but we don't have time to maintain that.) 20 | 21 | 5. Can’t get it to pass. ;) 22 | 23 | See also issue #64. 24 | 25 | Bottom line, I would love for an ObsPy person to maintain this example with 26 | passing ObsPy tests, but we don't have time to do so. 27 | -------------------------------------------------------------------------------- /examples/obspy/hello.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # “Reading Seismograms” example from §3 of the ObsPy tutorial, with some of 4 | # the prints commented out and taking the plot file from the command line. 5 | # 6 | # See: https://docs.obspy.org/tutorial/code_snippets/reading_seismograms.html 7 | 8 | import sys 9 | 10 | 11 | # §3.0 12 | 13 | from obspy import read 14 | 15 | st = read('RJOB_061005_072159.ehz.new') 16 | #print(st) 17 | #print(len(st)) 18 | tr = st[0] # assign first and only trace to new variable 19 | print(tr) 20 | 21 | # §3.1 22 | 23 | print(tr.stats) 24 | #print(tr.stats.station) 25 | #print(tr.stats.datatype) 26 | 27 | # §3.2 28 | 29 | #print(tr.data) 30 | print(tr.data[0:3]) 31 | print(len(tr)) 32 | 33 | # §3.3 34 | 35 | tr.plot(outfile=sys.argv[1]) 36 | -------------------------------------------------------------------------------- /examples/obspy/obspy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/examples/obspy/obspy.png -------------------------------------------------------------------------------- /examples/obspy/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope standard 6 | prerequisites_ok obspy 7 | indir=$CHTEST_EXAMPLES_DIR/obspy 8 | outdir=$BATS_TMPDIR/obspy 9 | } 10 | 11 | @test "${ch_tag}/hello" { 12 | # Remove prior test’s plot to avoid using it if something else breaks. 13 | mkdir -p "$outdir" 14 | rm -f "$outdir"/obspy.png 15 | ch-run -b "${outdir}:/mnt" "$ch_img" -- /hello.py /mnt/obspy.png 16 | } 17 | 18 | @test "${ch_tag}/hello PNG" { 19 | pict_ok 20 | pict_assert_equal "${indir}/obspy.png" \ 21 | "${outdir}/obspy.png" 1 22 | } 23 | -------------------------------------------------------------------------------- /examples/paraview/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: skip #1810 2 | FROM openmpi 3 | WORKDIR /usr/local/src 4 | 5 | # The mesa rpms introduce explicit dependencies python3.11-libs; ParaView will 6 | # error at configure time unless we provide the python3.11-devel package. 7 | RUN dnf install -y --setopt=install_weak_deps=false \ 8 | cmake \ 9 | expat-devel \ 10 | llvm \ 11 | llvm-devel \ 12 | mesa-libGL \ 13 | mesa-libGL-devel \ 14 | mesa-libOSMesa \ 15 | mesa-libOSMesa-devel \ 16 | python3-mako \ 17 | python3-pip \ 18 | python3.11-devel \ 19 | zlib-devel \ 20 | && dnf clean all 21 | 22 | RUN pip3 install --no-binary=mpi4py \ 23 | cython \ 24 | mpi4py 25 | 26 | WORKDIR /usr/local/src 27 | 28 | # ParaView. Use system libpng to work around issues linking with NEON specific 29 | # symbols on ARM. 30 | ARG PARAVIEW_MAJORMINOR=5.11 31 | ARG PARAVIEW_VERSION=5.11.2 32 | RUN wget -nv -O ParaView-v${PARAVIEW_VERSION}.tar.xz "https://www.paraview.org/paraview-downloads/download.php?submit=Download&version=v${PARAVIEW_MAJORMINOR}&type=binary&os=Sources&downloadFile=ParaView-v${PARAVIEW_VERSION}.tar.xz" \ 33 | && tar xf ParaView-v${PARAVIEW_VERSION}.tar.xz \ 34 | && mkdir ParaView-v${PARAVIEW_VERSION}.build \ 35 | && cd ParaView-v${PARAVIEW_VERSION}.build \ 36 | && cmake -DCMAKE_INSTALL_PREFIX=/usr/local \ 37 | -DCMAKE_BUILD_TYPE=Release \ 38 | -DBUILD_TESTING=OFF \ 39 | -DBUILD_SHARED_LIBS=ON \ 40 | -DPARAVIEW_ENABLE_PYTHON=ON \ 41 | -DPARAVIEW_BUILD_QT_GUI=OFF \ 42 | -DVTK_USE_X=OFF \ 43 | -DOPENGL_INCLUDE_DIR=IGNORE \ 44 | -DOPENGL_gl_LIBRARY=IGNORE \ 45 | -DVTK_OPENGL_HAS_OSMESA=ON \ 46 | -DVTK_USE_OFFSCREEN=OFF \ 47 | -DPARAVIEW_USE_MPI=ON \ 48 | -DPYTHON_EXECUTABLE=/usr/bin/python3 \ 49 | -DVTK_USE_SYSTEM_PNG=ON \ 50 | ../ParaView-v${PARAVIEW_VERSION} \ 51 | && make -j $(getconf _NPROCESSORS_ONLN) install \ 52 | && rm -Rf ../ParaView-v${PARAVIEW_VERSION}* 53 | -------------------------------------------------------------------------------- /examples/paraview/cone.2ranks.vtk: -------------------------------------------------------------------------------- 1 | # vtk DataFile Version 5.1 2 | vtk output 3 | ASCII 4 | DATASET POLYDATA 5 | POINTS 12 float 6 | 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 7 | -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 8 | -0.5 0.25 -0.433013 0.5 0 0 -0.5 -0.5 6.12323e-17 9 | -0.5 -0.25 -0.433013 -0.5 0.25 -0.433013 -0.5 0.5 -1.22465e-16 10 | 11 | POLYGONS 8 24 12 | OFFSETS vtktypeint64 13 | 0 6 9 12 15 18 21 24 14 | CONNECTIVITY vtktypeint64 15 | 6 5 4 3 2 1 0 1 2 16 | 0 2 3 0 3 4 7 8 9 17 | 7 9 10 7 10 11 18 | -------------------------------------------------------------------------------- /examples/paraview/cone.nranks.vtk: -------------------------------------------------------------------------------- 1 | # vtk DataFile Version 5.1 2 | vtk output 3 | ASCII 4 | DATASET POLYDATA 5 | POINTS 22 float 6 | 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 7 | -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 8 | -0.5 0.25 -0.433013 0.5 0 0 -0.5 0.25 0.433013 9 | -0.5 -0.25 0.433013 0.5 0 0 -0.5 -0.25 0.433013 10 | -0.5 -0.5 6.12323e-17 0.5 0 0 -0.5 -0.5 6.12323e-17 11 | -0.5 -0.25 -0.433013 0.5 0 0 -0.5 -0.25 -0.433013 12 | -0.5 0.25 -0.433013 0.5 0 0 -0.5 0.25 -0.433013 13 | -0.5 0.5 -1.22465e-16 14 | POLYGONS 8 24 15 | OFFSETS vtktypeint64 16 | 0 6 9 12 15 18 21 24 17 | CONNECTIVITY vtktypeint64 18 | 6 5 4 3 2 1 0 1 2 19 | 7 8 9 10 11 12 13 14 15 20 | 16 17 18 19 20 21 21 | -------------------------------------------------------------------------------- /examples/paraview/cone.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/examples/paraview/cone.png -------------------------------------------------------------------------------- /examples/paraview/cone.py: -------------------------------------------------------------------------------- 1 | # Draw a cone and write it out to sys.argv[1] in a few different ways. All 2 | # output files should be bit-for-bit reproducible, i.e., no embedded 3 | # timestamps, hostnames, floating point error, etc. 4 | 5 | from __future__ import print_function 6 | 7 | import os 8 | import platform 9 | import sys 10 | 11 | import mpi4py.MPI 12 | import paraview.simple as pv 13 | 14 | # Version information. 15 | print("ParaView %d.%d.%d on Python %s" 16 | % (pv.paraview.servermanager.vtkSMProxyManager.GetVersionMajor(), 17 | pv.paraview.servermanager.vtkSMProxyManager.GetVersionMinor(), 18 | pv.paraview.servermanager.vtkSMProxyManager.GetVersionPatch(), 19 | platform.python_version())) 20 | 21 | # Even if you start multiple pvbatch using MPI, this script is only 22 | # executed by rank 0. Check this assumption. 23 | assert mpi4py.MPI.COMM_WORLD.rank == 0 24 | 25 | # Output directory provided on command line. 26 | outdir = sys.argv[1] 27 | 28 | # Render a cone. 29 | pv.Cone() 30 | pv.Show() 31 | pv.Render() 32 | print("rendered") 33 | 34 | # PNG image (serial). 35 | filename = "%s/cone.png" % outdir 36 | pv.SaveScreenshot(filename) 37 | print(filename) 38 | 39 | # Legacy VTK file (ASCII, serial). 40 | filename = "%s/cone.vtk" % outdir 41 | pv.SaveData(filename, FileType="Ascii") 42 | print(filename) 43 | 44 | # XML VTK files (parallel). 45 | filename=("%s/cone.pvtp" % outdir) 46 | writer = pv.XMLPPolyDataWriter(FileName=filename) 47 | writer.UpdatePipeline() 48 | print(filename) 49 | 50 | # Done. 51 | print("done") 52 | -------------------------------------------------------------------------------- /examples/paraview/cone.serial.vtk: -------------------------------------------------------------------------------- 1 | # vtk DataFile Version 5.1 2 | vtk output 3 | ASCII 4 | DATASET POLYDATA 5 | POINTS 7 float 6 | 0.5 0 0 -0.5 0.5 0 -0.5 0.25 0.433013 7 | -0.5 -0.25 0.433013 -0.5 -0.5 6.12323e-17 -0.5 -0.25 -0.433013 8 | -0.5 0.25 -0.433013 9 | METADATA 10 | INFORMATION 2 11 | NAME L2_NORM_RANGE LOCATION vtkDataArray 12 | DATA 2 0.5 0.707107 13 | NAME L2_NORM_FINITE_RANGE LOCATION vtkDataArray 14 | DATA 2 0.5 0.707107 15 | 16 | POLYGONS 8 24 17 | OFFSETS vtktypeint64 18 | 0 6 9 12 15 18 21 24 19 | CONNECTIVITY vtktypeint64 20 | 6 5 4 3 2 1 0 1 2 21 | 0 2 3 0 3 4 0 4 5 22 | 0 5 6 0 6 1 23 | -------------------------------------------------------------------------------- /examples/paraview/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup () { 5 | scope full 6 | prerequisites_ok paraview 7 | pmix_or_skip 8 | indir=${CHTEST_EXAMPLES_DIR}/paraview 9 | outdir=$BATS_TMPDIR/paraview 10 | inbind=${indir}:/mnt/0 11 | outbind=${outdir}:/mnt/1 12 | if [[ $ch_multinode ]]; then 13 | # Bats only creates $BATS_TMPDIR on the first node. 14 | # shellcheck disable=SC2086 15 | $ch_mpirun_node mkdir -p "$outdir" 16 | else 17 | mkdir -p "$outdir" 18 | fi 19 | } 20 | 21 | # The first two tests demonstrate ParaView as an “executable” to process a 22 | # non-containerized input deck (cone.py) and produce non-containerized output. 23 | # 24 | # .png: In previous versions, PNG output is antialiased with a single rank 25 | # and not with multiple ranks depending on the execution environment. 26 | # This is no longer the case as of version 5.5.4 but may change with 27 | # a new version of Paraview. 28 | # 29 | # .vtk: The number of extra and/or duplicate points and indexing of these 30 | # points into polygons varied by rank count on my VM, but not on the 31 | # cluster. The resulting VTK file is dependent on whether an image was 32 | # rendered serially or using 2 or n processes. 33 | # 34 | # We do not check .pvtp (and its companion .vtp) output because it’s a 35 | # collection of XML files containing binary data and it seems too hairy to me. 36 | 37 | @test "${ch_tag}/inject cray mpi ($cray_prov)" { 38 | cray_ofi_or_skip "$ch_img" 39 | run ch-run "$ch_img" -- fi_info 40 | echo "$output" 41 | [[ $output == *"provider: $cray_prov"* ]] 42 | [[ $output == *"fabric: $cray_prov"* ]] 43 | [[ $status -eq 0 ]] 44 | } 45 | 46 | @test "${ch_tag}/cone serial" { 47 | [[ -z $ch_cray ]] || skip 'serial launches unsupported on Cray' 48 | # shellcheck disable=SC2086 49 | ch-run $ch_unslurm -b "$inbind" -b "$outbind" "$ch_img" -- \ 50 | pvbatch /mnt/0/cone.py /mnt/1 51 | mv "$outdir"/cone.png "$outdir"/cone.serial.png 52 | ls -l "$outdir"/cone* 53 | diff -u "${indir}/cone.serial.vtk" "${outdir}/cone.vtk" 54 | } 55 | 56 | @test "${ch_tag}/cone serial PNG" { 57 | [[ -z $ch_cray ]] || skip 'serial launches unsupported on Cray' 58 | pict_ok 59 | pict_assert_equal "${indir}/cone.png" "${outdir}/cone.serial.png" 1000 60 | } 61 | 62 | @test "${ch_tag}/cone ranks=2" { 63 | multiprocess_ok 64 | # shellcheck disable=SC2086 65 | $ch_mpirun_2 ch-run --join -b "$inbind" -b "$outbind" "$ch_img" -- \ 66 | pvbatch /mnt/0/cone.py /mnt/1 67 | mv "$outdir"/cone.png "$outdir"/cone.2ranks.png 68 | ls -l "$outdir"/cone* 69 | diff -u "${indir}/cone.2ranks.vtk" "${outdir}/cone.vtk" 70 | } 71 | 72 | @test "${ch_tag}/cone ranks=2 PNG" { 73 | multiprocess_ok 74 | pict_ok 75 | pict_assert_equal "${indir}/cone.png" "${outdir}/cone.2ranks.png" 1000 76 | } 77 | 78 | @test "${ch_tag}/cone ranks=N" { 79 | multiprocess_ok 80 | # shellcheck disable=SC2086 81 | $ch_mpirun_core ch-run --join -b "$inbind" -b "$outbind" "$ch_img" -- \ 82 | pvbatch /mnt/0/cone.py /mnt/1 83 | mv "$outdir"/cone.png "$outdir"/cone.nranks.png 84 | ls -l "$outdir"/cone* 85 | diff -u "${indir}/cone.nranks.vtk" "${outdir}/cone.vtk" 86 | } 87 | 88 | @test "${ch_tag}/cone ranks=N PNG" { 89 | multiprocess_ok 90 | pict_ok 91 | pict_assert_equal "${indir}/cone.png" "${outdir}/cone.nranks.png" 1000 92 | } 93 | 94 | @test "${ch_tag}/revert image" { 95 | unpack_img_all_nodes "$ch_cray" 96 | } 97 | -------------------------------------------------------------------------------- /examples/seccomp/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: standard 2 | # ch-test-builder-include: ch-image 3 | FROM alpine:3.17 4 | RUN apk add gcc musl-dev strace 5 | RSYNC / / 6 | RUN gcc -std=c11 -Wall -Werror -fmax-errors=1 -o mknods mknods.c 7 | RUN strace ./mknods 8 | RUN ls -lh /_* 9 | RUN test $(ls /_* | wc -l) == 2 10 | RUN test -p /_mknod_fifo 11 | RUN test -p /_mknodat_fifo 12 | -------------------------------------------------------------------------------- /examples/seccomp/mknods.c: -------------------------------------------------------------------------------- 1 | /* Use mknod(2) and mknodat(2) to create character and block devices (which 2 | should be blocked by the seccomp filters) and FIFOs (which should not.) */ 3 | 4 | #define _GNU_SOURCE 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | #include 12 | 13 | #define DEVNULL makedev(1,3) // character device /dev/null 14 | #define DEVRAM0 makedev(1,0) // block device /dev/ram0 15 | #define Z_(x) if (x) (fprintf(stderr, "failed: %d: %s (%d)\n", \ 16 | __LINE__, strerror(errno), errno), \ 17 | exit(1)) 18 | 19 | int main(void) 20 | { 21 | Z_ (mknod("/_mknod_chr", S_IFCHR, DEVNULL)); 22 | Z_ (mknod("/_mknod_blk", S_IFBLK, DEVRAM0)); 23 | Z_ (mknod("/_mknod_fifo", S_IFIFO, 0)); 24 | 25 | Z_ (mknodat(AT_FDCWD, "./_mknodat_chr", S_IFCHR, DEVNULL)); 26 | Z_ (mknodat(AT_FDCWD, "./_mknodat_blk", S_IFBLK, DEVRAM0)); 27 | Z_ (mknodat(AT_FDCWD, "./_mknodat_fifo", S_IFIFO, 0)); 28 | } 29 | -------------------------------------------------------------------------------- /examples/seccomp/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "$CHTEST_DIR"/common.bash 3 | 4 | setup () { 5 | prerequisites_ok seccomp 6 | } 7 | 8 | @test "${ch_tag}/fifos only" { 9 | ch-run "$ch_img" -- sh -c 'ls -lh /_*' 10 | # shellcheck disable=SC2016 11 | ch-run "$ch_img" -- sh -c 'test $(ls /_* | wc -l) == 2' 12 | ch-run "$ch_img" -- test -p /_mknod_fifo 13 | ch-run "$ch_img" -- test -p /_mknodat_fifo 14 | } 15 | -------------------------------------------------------------------------------- /examples/spack/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | FROM almalinux:8 3 | 4 | # Note: Spack is a bit of an odd duck testing wise. Because it’s a package 5 | # manager, the key tests we want are to install stuff (this includes the Spack 6 | # test suite), and those don’t make sense at run time. Thus, most of what we 7 | # care about is here in the Dockerfile, and test.bats just has a few 8 | # trivialities. 9 | # 10 | # bzip, file, patch, unzip, and which are packages needed to install 11 | # Charliecloud with Spack. These are in Spack’s Docker example [2] but are not 12 | # documented as prerequisites [1]. texinfo is an undocumented dependency of 13 | # Spack’s m4, and that package is in PowerTools, which we enable using sed(1) 14 | # to avoid installing the config-manager DNF plugin. 15 | # 16 | # autoconf, git, openssl, pkg-config, python3, fuse3-libs, fuse3-devel, are 17 | # packages that are typically installed on systems. Thus we install them outside 18 | # of Spack and rely them as externals to speed up the build process. 19 | # 20 | # [1]: https://spack.readthedocs.io/en/latest/getting_started.html 21 | # [2]: https://spack.readthedocs.io/en/latest/workflows.html#using-spack-to-create-docker-images 22 | RUN sed -Ei 's/enabled=0/enabled=1/' \ 23 | /etc/yum.repos.d/almalinux-powertools.repo 24 | RUN dnf install -y --setopt=install_weak_deps=false \ 25 | autoconf \ 26 | automake \ 27 | bzip2 \ 28 | gcc \ 29 | gcc-c++ \ 30 | git \ 31 | gnupg2-smime \ 32 | file \ 33 | fuse3-devel \ 34 | fuse3-libs \ 35 | make \ 36 | patch \ 37 | pkg-config \ 38 | python38 \ 39 | texinfo \ 40 | unzip \ 41 | which \ 42 | && dnf clean all 43 | 44 | # Certain Spack packages (e.g., tar) puke if they detect themselves being 45 | # configured as UID 0. This is the override. See issue #540 and [2]. 46 | ARG FORCE_UNSAFE_CONFIGURE=1 47 | 48 | # Install Spack. This follows the documented procedure to run it out of the 49 | # source directory. There apparently is no “make install” type operation to 50 | # place it at a standard path (“spack clone” simply clones another working 51 | # directory to a new path). 52 | # 53 | # Depending on what’s commented below, we get either Spack’s “develop” branch 54 | # or the latest released version. Using develop catches problems earlier, but 55 | # that branch has a LOT more churn and some of the problems might not occur in 56 | # a released version. I expect the right choice will change over time. 57 | ARG SPACK_REPO=https://github.com/spack/spack 58 | #RUN git clone --depth 1 $SPACK_REPO # tip of develop; faster clone 59 | RUN git clone $SPACK_REPO && cd spack && git checkout releases/latest # slow 60 | RUN cd spack && git status && git rev-parse --short HEAD 61 | 62 | # Copy our Spack package file; by relying on external packages already installed 63 | # by the container we expedite the spack install process. We do this using 64 | # Spacks config hierarchy, e.g., /etc/spack; however, this file could also be 65 | # placed in the user $HOME/.spack directory. 66 | COPY packages.yaml /etc/spack/ 67 | 68 | # Apply a patch that resolves issues with Charliecloud 0.35 finding the 69 | # Squashfuse ll.h header. Remove after https://github.com/spack/spack/pull/43374 70 | # is merged and included in the latest spack release. 71 | COPY libfuse.patch / 72 | RUN patch -p 0 < libfuse.patch 73 | 74 | # Test some basic commands and install Charliecloud. 75 | # Kludge: here we specify an older python sphinx rtd_theme version because 76 | # newer default version, 0.5.0, introduces a dependency on node-js which doesn’t 77 | # appear to build on gcc 4.8 or gcc 8.3 78 | # (see: https://github.com/spack/spack/issues/19310). 79 | RUN source /spack/share/spack/setup-env.sh \ 80 | && spack --version \ 81 | && spack env create ch \ 82 | && spack env activate ch \ 83 | && spack compiler find \ 84 | && spack compiler list --scope=system \ 85 | && spack compiler list --scope=user \ 86 | && spack compilers \ 87 | && spack add charliecloud +docs +squashfuse ^py-sphinx-rtd-theme@0.4.3 \ 88 | && spack concretize --fresh --force \ 89 | && spack env depfile -o Makefile \ 90 | && make -j $(nproc) SPACK_COLOR=always \ 91 | && spack load charliecloud \ 92 | && ch-run --version \ 93 | && ldd $(which ch-run) 94 | 95 | # Clean up. 96 | RUN /spack/bin/spack clean --all 97 | -------------------------------------------------------------------------------- /examples/spack/libfuse.patch: -------------------------------------------------------------------------------- 1 | index 0e8f983545..b85ef9958a 100644 2 | --- spack/var/spack/repos/builtin/packages/charliecloud/package.py 3 | +++ spack/var/spack/repos/builtin/packages/charliecloud/package.py 4 | @@ -152,5 +152,7 @@ def configure_args(self): 5 | if "+squashfuse" in self.spec: 6 | squashfuse_prefix = "{0}".format(self.spec["squashfuse"].prefix) 7 | args.append("--with-libsquashfuse={0}".format(squashfuse_prefix)) 8 | + fuse_include = self.spec["fuse"].prefix.include.fuse3 9 | + args.append("CFLAGS=-I{0}".format(fuse_include)) 10 | -------------------------------------------------------------------------------- /examples/spack/packages.yaml: -------------------------------------------------------------------------------- 1 | packages: 2 | # The following packages are built externally to speed up the spack build 3 | # process; they can be built from spack without issue, e.g., you can remove 4 | # them from here without issue. 5 | autoconf: 6 | buildable: false 7 | externals: 8 | - spec: autoconf@2.69 9 | prefix: /usr 10 | automake: 11 | buildable: false 12 | externals: 13 | - spec: automake@1.16.1 14 | prefix: /usr 15 | git: 16 | buildable: false 17 | externals: 18 | - spec: git@2.39.3 19 | prefix: /usr 20 | perl: 21 | buildable: false 22 | externals: 23 | - spec: perl@5.26.3 24 | prefix: /usr 25 | pkgconf: 26 | buildable: false 27 | externals: 28 | - spec: pkgconf@1.4.2 29 | prefix: /usr 30 | python: 31 | buildable: false 32 | externals: 33 | - spec: python@3.8.17 34 | prefix: /usr 35 | openssl: 36 | buildable: false 37 | externals: 38 | - spec: openssl@1.1.1 39 | prefix: /usr 40 | 41 | # Unlike the above, the following packages require a sysadmin. Removing these 42 | # will likely cause issues. 43 | libfuse: 44 | buildable: false 45 | externals: 46 | - spec: libfuse@3.3.0 47 | prefix: /usr 48 | -------------------------------------------------------------------------------- /examples/spack/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | setup() { 5 | scope full 6 | prerequisites_ok spack 7 | export PATH=/spack/bin:$PATH 8 | } 9 | 10 | @test "${ch_tag}/version" { 11 | # Spack likes to write to $HOME/.spack; thus, we bind it. 12 | ch-run --home "$ch_img" -- spack --version 13 | } 14 | 15 | @test "${ch_tag}/compilers" { 16 | echo "spack compiler list" 17 | ch-run --home "$ch_img" -- spack compiler list 18 | echo "spack compiler list --scope=system" 19 | ch-run --home "$ch_img" -- spack compiler list --scope=system 20 | echo "spack compiler list --scope=user" 21 | ch-run --home "$ch_img" -- spack compiler list --scope=user 22 | echo "spack compilers" 23 | ch-run --home "$ch_img" -- spack compilers 24 | } 25 | 26 | @test "${ch_tag}/find" { 27 | run ch-run --home "$ch_img" -- spack find charliecloud 28 | echo "$output" 29 | [[ $status -eq 0 ]] 30 | [[ $output = *'charliecloud@'* ]] 31 | } 32 | -------------------------------------------------------------------------------- /examples/spark/Dockerfile: -------------------------------------------------------------------------------- 1 | # ch-test-scope: full 2 | # 3 | # Use Buster because Stretch JRE install fails with: 4 | # 5 | # tempnam() is so ludicrously insecure as to defy implementation. 6 | # tempnam: Cannot allocate memory 7 | # dpkg: error processing package openjdk-8-jre-headless:amd64 (--configure): 8 | # subprocess installed post-installation script returned error exit status 1 9 | 10 | FROM debian:buster 11 | 12 | ARG DEBIAN_FRONTEND=noninteractive 13 | # Install needed OS packages. 14 | RUN apt-get update \ 15 | && apt-get install -y --no-install-recommends \ 16 | default-jre-headless \ 17 | less \ 18 | procps \ 19 | python3 \ 20 | wget \ 21 | && rm -rf /var/lib/apt/lists/* 22 | 23 | # Download and install Spark. Notes: 24 | # 25 | # 1. We aren’t using SPARK_NO_DAEMONIZE to make sure can deal with daemonized 26 | # applications. 27 | # 28 | # 2. Spark is installed to /opt/spark, which is Spark’s new default location. 29 | ARG URLPATH=https://archive.apache.org/dist/spark/spark-3.2.0/ 30 | ARG DIR=spark-3.2.0-bin-hadoop3.2 31 | ARG TAR=$DIR.tgz 32 | RUN wget -nv $URLPATH/$TAR \ 33 | && tar xf $TAR \ 34 | && mv $DIR /opt/spark \ 35 | && rm $TAR 36 | 37 | # Very basic default configuration, to make it run and not do anything stupid. 38 | RUN printf '\ 39 | SPARK_LOCAL_IP=127.0.0.1\n\ 40 | SPARK_LOCAL_DIRS=/tmp\n\ 41 | SPARK_LOG_DIR=/tmp\n\ 42 | SPARK_WORKER_DIR=/tmp\n\ 43 | ' > /opt/spark/conf/spark-env.sh 44 | 45 | # Move config to /mnt/0 so we can provide a different config if we want 46 | RUN mv /opt/spark/conf /mnt/0 \ 47 | && ln -s /mnt/0 /opt/spark/conf 48 | -------------------------------------------------------------------------------- /examples/spark/slurm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #SBATCH --time=0:10:00 3 | 4 | # Run an example non-interactive Spark computation. Requires three arguments: 5 | # 6 | # 1. Image tarball 7 | # 2. Directory in which to unpack tarball 8 | # 3. High-speed network interface name 9 | # 10 | # Example: 11 | # 12 | # $ sbatch slurm.sh /scratch/spark.tar.gz /var/tmp ib0 13 | # 14 | # Spark configuration will be generated in ~/slurm-$SLURM_JOB_ID.spark; any 15 | # configuration already there will be clobbered. 16 | 17 | set -e 18 | 19 | if [[ -z $SLURM_JOB_ID ]]; then 20 | echo "not running under Slurm" 1>&2 21 | exit 1 22 | fi 23 | 24 | tar=$1 25 | img=$2 26 | img=${img}/spark 27 | dev=$3 28 | conf=${HOME}/slurm-${SLURM_JOB_ID}.spark 29 | 30 | # Make Charliecloud available (varies by site) 31 | module purge 32 | module load friendly-testing 33 | module load charliecloud 34 | 35 | # What IP address to use for master? 36 | if [[ -z $dev ]]; then 37 | echo "no high-speed network device specified" 38 | exit 1 39 | fi 40 | master_ip=$( ip -o -f inet addr show dev "$dev" \ 41 | | sed -r 's/^.+inet ([0-9.]+).+/\1/') 42 | master_url=spark://${master_ip}:7077 43 | if [[ -n $master_ip ]]; then 44 | echo "Spark master IP: ${master_ip}" 45 | else 46 | echo "no IP address for ${dev} found" 47 | exit 1 48 | fi 49 | 50 | # Unpack image 51 | srun ch-convert -o dir "$tar" "$img" 52 | 53 | # Make Spark configuration 54 | mkdir "$conf" 55 | chmod 700 "$conf" 56 | cat < "${conf}/spark-env.sh" 57 | SPARK_LOCAL_DIRS=/tmp/spark 58 | SPARK_LOG_DIR=/tmp/spark/log 59 | SPARK_WORKER_DIR=/tmp/spark 60 | SPARK_LOCAL_IP=127.0.0.1 61 | SPARK_MASTER_HOST=${master_ip} 62 | JAVA_HOME=/usr/lib/jvm/default-java/ 63 | EOF 64 | mysecret=$(cat /dev/urandom | tr -dc '0-9a-f' | head -c 48) 65 | cat < "${conf}/spark-defaults.sh" 66 | spark.authenticate true 67 | spark.authenticate.secret $mysecret 68 | EOF 69 | chmod 600 "${conf}/spark-defaults.sh" 70 | 71 | # Start the Spark master 72 | ch-run -b "$conf" "$img" -- /spark/sbin/start-master.sh 73 | sleep 10 74 | tail -7 /tmp/spark/log/*master*.out 75 | grep -Fq 'New state: ALIVE' /tmp/spark/log/*master*.out 76 | 77 | # Start the Spark workers 78 | srun sh -c " ch-run -b '${conf}' '${img}' -- \ 79 | /spark/sbin/start-slave.sh ${master_url} \ 80 | && sleep infinity" & 81 | sleep 10 82 | grep -F worker /tmp/spark/log/*master*.out 83 | tail -3 /tmp/spark/log/*worker*.out 84 | 85 | # Compute pi 86 | ch-run -b "$conf" "$img" -- \ 87 | /spark/bin/spark-submit --master "$master_url" \ 88 | /spark/examples/src/main/python/pi.py 1024 89 | # Let Slurm kill the workers and master 90 | -------------------------------------------------------------------------------- /examples/spark/test.bats: -------------------------------------------------------------------------------- 1 | CH_TEST_TAG=$ch_test_tag 2 | load "${CHTEST_DIR}/common.bash" 3 | 4 | # Note: If you get output like the following (piping through cat turns off 5 | # BATS terminal magic): 6 | # 7 | # $ ./bats ../examples/spark/test.bats | cat 8 | # 1..5 9 | # ok 1 spark/configure 10 | # ok 2 spark/start 11 | # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory 12 | # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory 13 | # [...]/test/bats.src/libexec/bats-exec-test: line 329: /tmp/bats.92406.src: No such file or directory 14 | # 15 | # that means that mpirun is starting too many processes per node (you want 1). 16 | # One solution is to export OMPI_MCA_rmaps_base_mapping_policy= (i.e., set but 17 | # empty). 18 | 19 | setup () { 20 | scope standard 21 | prerequisites_ok spark 22 | pmix_or_skip 23 | [[ $CH_TEST_PACK_FMT = *-unpack ]] || skip 'issue #1161' 24 | umask 0077 25 | 26 | # Unset these Java variables so the container doesn’t use host paths. 27 | unset JAVA_BINDIR JAVA_HOME JAVA_ROOT 28 | 29 | spark_dir=${TMP_}/spark # runs before each test, so no mktemp 30 | spark_config=$spark_dir 31 | spark_log=/tmp/sparklog 32 | confbind=${spark_config}:/mnt/0 33 | if [[ $ch_multinode ]]; then 34 | # We use hostname to determine the interface to use for this test, 35 | # avoiding complicated logic determining which interface is the HSN. 36 | # In many environments this likely results in the tests running over 37 | # the slower management interface, which is fine for testing, but 38 | # should be avoided for large scale runs. 39 | master_host="$(hostname)" 40 | # Start Spark workers using pdsh. We would really prefer to do this 41 | # using srun, but that doesn’t work; see issue #230. 42 | command -v pdsh >/dev/null 2>&1 || pedantic_fail "pdsh not in path" 43 | pernode="pdsh -R ssh -w ${SLURM_NODELIST} -- PATH='${PATH}'" 44 | else 45 | master_host=localhost 46 | pernode= 47 | fi 48 | master_url=spark://${master_host}:7077 49 | master_log="${spark_log}/*master.Master*.out" # expand globs later 50 | } 51 | 52 | 53 | @test "${ch_tag}/configure" { 54 | # check for restrictive umask 55 | run umask -S 56 | echo "$output" 57 | [[ $status -eq 0 ]] 58 | [[ $output = 'u=rwx,g=,o=' ]] 59 | # create config 60 | $ch_mpirun_node mkdir -p "$spark_config" 61 | # We set JAVA_HOME in the spark environment file as this appears to be the 62 | # idiomatic method for ensuring spark finds the java install. 63 | tee < "${spark_config}/spark-env.sh" 64 | SPARK_LOCAL_DIRS=/tmp/spark 65 | SPARK_LOG_DIR=$spark_log 66 | SPARK_WORKER_DIR=/tmp/spark 67 | SPARK_LOCAL_IP=127.0.0.1 68 | SPARK_MASTER_HOST=${master_host} 69 | JAVA_HOME=/usr/lib/jvm/default-java/ 70 | EOF 71 | my_secret=$(cat /dev/urandom | tr -dc '0-9a-f' | head -c 48) 72 | tee < "${spark_config}/spark-defaults.conf" 73 | spark.authenticate.true 74 | spark.authenticate.secret ${my_secret} 75 | EOF 76 | if [[ $ch_multinode ]]; then 77 | sbcast -f "${spark_config}/spark-env.sh" "${spark_config}/spark-env.sh" 78 | sbcast -f "${spark_config}/spark-defaults.conf" "${spark_config}/spark-defaults.conf" 79 | fi 80 | } 81 | 82 | 83 | @test "${ch_tag}/start" { 84 | # remove old master logs so new one has predictable name 85 | rm -Rf --one-file-system "$spark_log" 86 | # start the master 87 | ch-run -b "$confbind" "$ch_img" -- /opt/spark/sbin/start-master.sh 88 | sleep 15 89 | # shellcheck disable=SC2086 90 | cat $master_log 91 | # shellcheck disable=SC2086 92 | grep -Fq 'New state: ALIVE' $master_log 93 | # start the workers 94 | # shellcheck disable=SC2086 95 | $pernode ch-run -b "$confbind" "$ch_img" -- \ 96 | /opt/spark/sbin/start-worker.sh "$master_url" 97 | sleep 15 98 | } 99 | 100 | 101 | @test "${ch_tag}/worker count" { 102 | # Note that in the log, each worker shows up as 127.0.0.1, which might 103 | # lead you to believe that all the workers started on the same (master) 104 | # node. However, I believe this string is self-reported by the workers and 105 | # is an artifact of SPARK_LOCAL_IP=127.0.0.1 above, which AFAICT just 106 | # tells the workers to put their web interfaces on localhost. They still 107 | # connect to the master and get work OK. 108 | [[ -z $ch_multinode ]] && SLURM_NNODES=1 109 | # shellcheck disable=SC2086 110 | worker_ct=$(grep -Fc 'Registering worker' $master_log || true) 111 | echo "node count: $SLURM_NNODES; worker count: ${worker_ct}" 112 | [[ $worker_ct -eq "$SLURM_NNODES" ]] 113 | } 114 | 115 | 116 | @test "${ch_tag}/pi" { 117 | run ch-run -b "$confbind" "$ch_img" -- \ 118 | /opt/spark/bin/spark-submit --master "$master_url" \ 119 | /opt/spark/examples/src/main/python/pi.py 64 120 | echo "$output" 121 | [[ $status -eq 0 ]] 122 | # This computation converges quite slowly, so we only ask for two correct 123 | # digits of pi. 124 | [[ $output = *'Pi is roughly 3.1'* ]] 125 | } 126 | 127 | 128 | @test "${ch_tag}/stop" { 129 | $pernode ch-run -b "$confbind" "$ch_img" -- /opt/spark/sbin/stop-worker.sh 130 | ch-run -b "$confbind" "$ch_img" -- /opt/spark/sbin/stop-master.sh 131 | sleep 2 132 | # Any Spark processes left? 133 | # (Use egrep instead of fgrep so we don’t match the grep process.) 134 | # shellcheck disable=SC2086 135 | $pernode ps aux | ( ! grep -E '[o]rg\.apache\.spark\.deploy' ) 136 | } 137 | 138 | 139 | @test "${ch_tag}/hang" { 140 | # If there are any test processes remaining, this test will hang. 141 | true 142 | } 143 | -------------------------------------------------------------------------------- /lib/Makefile.am: -------------------------------------------------------------------------------- 1 | # Define an alias for pkglibdir to override Automake helpfulness: 2 | # 3 | # error: 'pkglibdir' is not a legitimate directory for 'DATA' 4 | # 5 | # See: https://www.gnu.org/software/automake/manual/html_node/Uniform.html 6 | mylibdir = $(pkglibdir) 7 | 8 | dist_mylib_DATA = base.sh \ 9 | build.py \ 10 | build_cache.py \ 11 | charliecloud.py \ 12 | filesystem.py \ 13 | force.py \ 14 | image.py \ 15 | misc.py \ 16 | modify.py \ 17 | pull.py \ 18 | push.py \ 19 | registry.py 20 | mylib_DATA = contributors.bash \ 21 | version.py \ 22 | version.sh \ 23 | version.txt 24 | 25 | # Bundled Lark (currently version 1.1.9); Automake does not support wildcards 26 | # [1], so list the files. Note it's version-specific. Hopefully if a new 27 | # version of Lark adds a file and we omit it here by mistake, the tests will 28 | # catch it. To get this list: 29 | # 30 | # $ (cd lib && find lark lark-*.dist-info -xtype f) | LC_ALL=C sort | sed -E 's/$/ \\/' 31 | # 32 | # Then, copy-n-paste & remove the last backslash. PROOFREAD YOUR DIFF!!! 33 | 34 | LARK = \ 35 | lark-1.1.9.dist-info/INSTALLER \ 36 | lark-1.1.9.dist-info/LICENSE \ 37 | lark-1.1.9.dist-info/METADATA \ 38 | lark-1.1.9.dist-info/RECORD \ 39 | lark-1.1.9.dist-info/WHEEL \ 40 | lark-1.1.9.dist-info/entry_points.txt \ 41 | lark-1.1.9.dist-info/top_level.txt \ 42 | lark/__init__.py \ 43 | lark/ast_utils.py \ 44 | lark/common.py \ 45 | lark/exceptions.py \ 46 | lark/grammar.py \ 47 | lark/grammars/__init__.py \ 48 | lark/grammars/common.lark \ 49 | lark/grammars/lark.lark \ 50 | lark/grammars/python.lark \ 51 | lark/grammars/unicode.lark \ 52 | lark/indenter.py \ 53 | lark/lark.py \ 54 | lark/lexer.py \ 55 | lark/load_grammar.py \ 56 | lark/parse_tree_builder.py \ 57 | lark/parser_frontends.py \ 58 | lark/parsers/__init__.py \ 59 | lark/parsers/cyk.py \ 60 | lark/parsers/earley.py \ 61 | lark/parsers/earley_common.py \ 62 | lark/parsers/earley_forest.py \ 63 | lark/parsers/grammar_analysis.py \ 64 | lark/parsers/lalr_analysis.py \ 65 | lark/parsers/lalr_interactive_parser.py \ 66 | lark/parsers/lalr_parser.py \ 67 | lark/parsers/lalr_parser_state.py \ 68 | lark/parsers/xearley.py \ 69 | lark/py.typed \ 70 | lark/reconstruct.py \ 71 | lark/tools/__init__.py \ 72 | lark/tools/nearley.py \ 73 | lark/tools/serialize.py \ 74 | lark/tools/standalone.py \ 75 | lark/tree.py \ 76 | lark/tree_matcher.py \ 77 | lark/tree_templates.py \ 78 | lark/utils.py \ 79 | lark/visitors.py 80 | if ENABLE_LARK 81 | nobase_dist_mylib_DATA = $(LARK) 82 | endif 83 | 84 | CLEANFILES = $(mylib_DATA) 85 | 86 | contributors.bash: ../README.rst 87 | rm -f $@ 88 | printf '# shellcheck shell=bash\n' >> $@ 89 | printf 'declare -a ch_contributors\n' >> $@ 90 | sed -En 's/^\*.+<(.+@.+)>.*$$/ch_contributors+=('"'"'\1'"'"')/p' < $< >> $@ 91 | 92 | # Remove empty charliecloud directories after uninstallation. 93 | uninstall-hook: 94 | rmdir $$(find $(pkglibdir) -type d | sort -r) 95 | 96 | version.txt: ../configure 97 | printf '@PACKAGE_VERSION@\n' > $@ 98 | 99 | version.py: ../configure 100 | printf "VERSION='@PACKAGE_VERSION@'\n" > $@ 101 | 102 | version.sh: ../configure 103 | printf "# shellcheck shell=sh disable=SC2034\n" > $@ 104 | printf "ch_version='@PACKAGE_VERSION@'\n" >> $@ 105 | -------------------------------------------------------------------------------- /misc/Makefile.am: -------------------------------------------------------------------------------- 1 | EXTRA_DIST = grep version 2 | -------------------------------------------------------------------------------- /misc/branches-tidy: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # FIXME: pretty colors? 4 | 5 | import argparse 6 | import collections 7 | import re 8 | import subprocess 9 | 10 | class Branch: 11 | __slots__ = ("local", "repo", "remote", "status") 12 | 13 | def __str__(self): 14 | s = self.local 15 | if (self.remote is not None): 16 | s += " → " 17 | if (self.repo == repo_max and self.remote == self.local): 18 | s += "•" 19 | else: 20 | s += "%s/%s" % (self.repo, self.remote) 21 | if (self.status is not None): 22 | s += " [%s]" % self.status 23 | return s 24 | 25 | def delete(name): 26 | subprocess.run(["git", "branch", "-qD", name], check=True) 27 | 28 | # globals 29 | remote_dangling = set() 30 | remote_matched = set() 31 | other = set() 32 | repos = collections.Counter() 33 | repo_max = None 34 | delete_ct = 0 35 | 36 | p = argparse.ArgumentParser( 37 | description = "List summary of Git branches.", 38 | epilog = "Dot (•) indicates branch is at most common remote with same name.") 39 | p.add_argument("-d", "--delete", 40 | action="store_true", 41 | help="delete dangling branches (remote branch missing)") 42 | p.add_argument("-r", "--delete-remote", 43 | metavar="REMOTE", 44 | action="append", default=list(), 45 | help="delete branches pointing to REMOTE (can be repeated)") 46 | args = p.parse_args() 47 | 48 | cp = subprocess.run(["git", "branch", "--format", 49 | "%(refname:short) %(upstream:short) %(upstream:track)"], 50 | stdout=subprocess.PIPE, encoding="UTF-8", check=True) 51 | for m in re.finditer(r"^(\S+)\s((\S+)/(\S+))?\s(\[(.+)\])?$", 52 | cp.stdout, re.MULTILINE): 53 | b = Branch() 54 | b.local = m[1] 55 | b.repo = m[3] 56 | b.remote = m[4] 57 | b.status = m[6] 58 | if (b.remote is None): 59 | other.add(b) 60 | else: 61 | repos[b.repo] += 1 62 | if (b.status == "gone"): 63 | remote_dangling.add(b) 64 | else: 65 | remote_matched.add(b) 66 | 67 | assert( len(cp.stdout.splitlines()) 68 | == len(other) + len(remote_matched) + len(remote_dangling)) 69 | 70 | (repo_max, repo_max_ct) = repos.most_common(1)[0] 71 | print("found %d repos; most common: %s (%d)" 72 | % (len(repos), repo_max, repo_max_ct)) 73 | 74 | print("remote dangling (%d):" % len(remote_dangling)) 75 | for b in remote_dangling: 76 | print(" %s" % b, end="") 77 | if (args.delete): 78 | delete(b.local) 79 | delete_ct += 1 80 | print(" ☠️", end="") 81 | print() 82 | 83 | print("remote (%d):" % len(remote_matched)) 84 | for b in remote_matched: 85 | print(" %s" % b, end="") 86 | if (b.repo in args.delete_remote): 87 | delete(b.local) 88 | delete_ct += 1 89 | print(" ☠️", end="") 90 | print() 91 | 92 | print("other (%d):" % len(other)) 93 | for b in other: 94 | print(" %s" % b) 95 | 96 | if (delete_ct > 0): 97 | print("deleted %d branches" % delete_ct) 98 | -------------------------------------------------------------------------------- /misc/grep: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The purpose of this script is to make it more convenient to search the 4 | # project source code. It’s a wrapper for grep(1) to exclude directories that 5 | # yield a ton of false positives, in particular the documentation’s JavaScript 6 | # index that is one line thousands of characters long. It's tedious to derive 7 | # the exclusions manually each time. 8 | # 9 | # grep(1) has an --exclude-dir option, but I can’t get it to work with rooted 10 | # directories, e.g., doc-src/_build vs _build anywhere in the tree, so we use 11 | # find(1) instead. You may hear complaints about how find is too slow for 12 | # this, but the project is small enough we don’t care. 13 | 14 | set -e 15 | 16 | cd "$(dirname "$0")"/.. 17 | 18 | find . \( -path ./.git \ 19 | -o -path ./autom4te.cache \ 20 | -o -path ./doc/doctrees \ 21 | -o -path ./bin/ch-checkns \ 22 | -o -path ./bin/ch-image \ 23 | -o -path ./bin/ch-run \ 24 | -o -path ./bin/ch-run-oci \ 25 | -o -path ./doc/html \ 26 | -o -path ./doc/man \ 27 | -o -path ./test/approved-trailing-whitespace \ 28 | -o -path './lib/lark*' \ 29 | -o -path '*.patch' \ 30 | -o -name '*.pyc' \ 31 | -o -name '*.vtk' \ 32 | -o -name ._.DS_Store \ 33 | -o -name .DS_Store \ 34 | -o -name configure \ 35 | -o -name 'config.*' \ 36 | -o -name Makefile \ 37 | -o -name Makefile.in \ 38 | \) -prune \ 39 | -o -type f \ 40 | -exec grep --color=auto -HIn "$@" {} \; 41 | -------------------------------------------------------------------------------- /misc/m4/README: -------------------------------------------------------------------------------- 1 | This directory contains additional M4 macros for the build system. 2 | 3 | Currently, these are all from Autoconf Archive. While many distributions have 4 | an Autoconf Archive package, which autogen.sh can use if present, it’s a 5 | little uncommon to have installed, and we keep running into boxen where we 6 | want to run autogen.sh, but Autoconf Archive is not installed and we can’t 7 | install it promptly. 8 | 9 | There is a licensing exception for these macros that lets us redistribute 10 | them: “Every single one of those macros can be re-used without imposing any 11 | restrictions whatsoever on the licensing of the generated configure script. In 12 | particular, it is possible to use all those macros in configure scripts that 13 | are meant for non-free software.” [1] 14 | 15 | To add a macro: 16 | 17 | 1. Browse the Autoconf Archive documentation [1] and select the macro you want 18 | to use. 19 | 20 | 2. Download the macro file from the "m4" directory of the GitHub source code 21 | mirror [2] and put it in this directory. Use a release tag rather than a 22 | random Git commit. You can "wget" the URL you get with the "raw" button. 23 | 24 | (You could also use the master Git repo on Savannah [3], but GitHub is a 25 | lot easier to use.) 26 | 27 | 3. Record the macro and its last updated version in the list below. 28 | 29 | Macros in use: 30 | 31 | v2021.02.19 AX_CHECK_COMPILE_FLAG 32 | v2021.02.19 AX_COMPARE_VERSION 33 | v2021.02.19 AX_PTHREAD 34 | v2021.02.19 AX_WITH_PROG 35 | 36 | [1]: https://www.gnu.org/software/autoconf-archive/ 37 | [2]: https://github.com/autoconf-archive/autoconf-archive 38 | [3]: http://savannah.gnu.org/projects/autoconf-archive/ 39 | -------------------------------------------------------------------------------- /misc/m4/ax_check_compile_flag.m4: -------------------------------------------------------------------------------- 1 | # =========================================================================== 2 | # https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html 3 | # =========================================================================== 4 | # 5 | # SYNOPSIS 6 | # 7 | # AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) 8 | # 9 | # DESCRIPTION 10 | # 11 | # Check whether the given FLAG works with the current language's compiler 12 | # or gives an error. (Warnings, however, are ignored) 13 | # 14 | # ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on 15 | # success/failure. 16 | # 17 | # If EXTRA-FLAGS is defined, it is added to the current language's default 18 | # flags (e.g. CFLAGS) when the check is done. The check is thus made with 19 | # the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to 20 | # force the compiler to issue an error when a bad flag is given. 21 | # 22 | # INPUT gives an alternative input source to AC_COMPILE_IFELSE. 23 | # 24 | # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this 25 | # macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. 26 | # 27 | # LICENSE 28 | # 29 | # Copyright (c) 2008 Guido U. Draheim 30 | # Copyright (c) 2011 Maarten Bosmans 31 | # 32 | # Copying and distribution of this file, with or without modification, are 33 | # permitted in any medium without royalty provided the copyright notice 34 | # and this notice are preserved. This file is offered as-is, without any 35 | # warranty. 36 | 37 | #serial 6 38 | 39 | AC_DEFUN([AX_CHECK_COMPILE_FLAG], 40 | [AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF 41 | AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl 42 | AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ 43 | ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS 44 | _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" 45 | AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], 46 | [AS_VAR_SET(CACHEVAR,[yes])], 47 | [AS_VAR_SET(CACHEVAR,[no])]) 48 | _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) 49 | AS_VAR_IF(CACHEVAR,yes, 50 | [m4_default([$2], :)], 51 | [m4_default([$3], :)]) 52 | AS_VAR_POPDEF([CACHEVAR])dnl 53 | ])dnl AX_CHECK_COMPILE_FLAGS 54 | -------------------------------------------------------------------------------- /misc/m4/ax_with_prog.m4: -------------------------------------------------------------------------------- 1 | # =========================================================================== 2 | # https://www.gnu.org/software/autoconf-archive/ax_with_prog.html 3 | # =========================================================================== 4 | # 5 | # SYNOPSIS 6 | # 7 | # AX_WITH_PROG([VARIABLE],[program],[VALUE-IF-NOT-FOUND],[PATH]) 8 | # 9 | # DESCRIPTION 10 | # 11 | # Locates an installed program binary, placing the result in the precious 12 | # variable VARIABLE. Accepts a present VARIABLE, then --with-program, and 13 | # failing that searches for program in the given path (which defaults to 14 | # the system path). If program is found, VARIABLE is set to the full path 15 | # of the binary; if it is not found VARIABLE is set to VALUE-IF-NOT-FOUND 16 | # if provided, unchanged otherwise. 17 | # 18 | # A typical example could be the following one: 19 | # 20 | # AX_WITH_PROG(PERL,perl) 21 | # 22 | # NOTE: This macro is based upon the original AX_WITH_PYTHON macro from 23 | # Dustin J. Mitchell . 24 | # 25 | # LICENSE 26 | # 27 | # Copyright (c) 2008 Francesco Salvestrini 28 | # Copyright (c) 2008 Dustin J. Mitchell 29 | # 30 | # Copying and distribution of this file, with or without modification, are 31 | # permitted in any medium without royalty provided the copyright notice 32 | # and this notice are preserved. This file is offered as-is, without any 33 | # warranty. 34 | 35 | #serial 17 36 | 37 | AC_DEFUN([AX_WITH_PROG],[ 38 | AC_PREREQ([2.61]) 39 | 40 | pushdef([VARIABLE],$1) 41 | pushdef([EXECUTABLE],$2) 42 | pushdef([VALUE_IF_NOT_FOUND],$3) 43 | pushdef([PATH_PROG],$4) 44 | 45 | AC_ARG_VAR(VARIABLE,Absolute path to EXECUTABLE executable) 46 | 47 | AS_IF(test -z "$VARIABLE",[ 48 | AC_MSG_CHECKING(whether EXECUTABLE executable path has been provided) 49 | AC_ARG_WITH(EXECUTABLE,AS_HELP_STRING([--with-EXECUTABLE=[[[PATH]]]],absolute path to EXECUTABLE executable), [ 50 | AS_IF([test "$withval" != yes && test "$withval" != no],[ 51 | VARIABLE="$withval" 52 | AC_MSG_RESULT($VARIABLE) 53 | ],[ 54 | VARIABLE="" 55 | AC_MSG_RESULT([no]) 56 | AS_IF([test "$withval" != no], [ 57 | AC_PATH_PROG([]VARIABLE[],[]EXECUTABLE[],[]VALUE_IF_NOT_FOUND[],[]PATH_PROG[]) 58 | ]) 59 | ]) 60 | ],[ 61 | AC_MSG_RESULT([no]) 62 | AC_PATH_PROG([]VARIABLE[],[]EXECUTABLE[],[]VALUE_IF_NOT_FOUND[],[]PATH_PROG[]) 63 | ]) 64 | ]) 65 | 66 | popdef([PATH_PROG]) 67 | popdef([VALUE_IF_NOT_FOUND]) 68 | popdef([EXECUTABLE]) 69 | popdef([VARIABLE]) 70 | ]) 71 | -------------------------------------------------------------------------------- /misc/version: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Compute and print out the full version number. See FAQ for details. 4 | # 5 | # This script should usually be run once, by Autotools, and the result 6 | # propagated using Autotools. This propagates the Git information into 7 | # tarballs, and otherwise, you can get a mismatch between different parts of 8 | # the software. 9 | 10 | set -e 11 | 12 | ch_base=$(cd "$(dirname "$0")" && pwd)/.. 13 | version_file=${ch_base}/VERSION 14 | version_simple=$(cat "$version_file") 15 | case $version_simple in 16 | *~*) 17 | prerelease=yes 18 | ;; 19 | *) 20 | prerelease= 21 | ;; 22 | esac 23 | 24 | if [ ! -e "${ch_base}/.git" ] || [ -z "$prerelease" ]; then 25 | # no Git or release version; use simple version 26 | printf "%s\n" "$version_simple" 27 | else 28 | # add Git stuff 29 | git_branch=$( git rev-parse --abbrev-ref HEAD \ 30 | | sed 's/[^A-Za-z0-9]//g' \ 31 | | sed 's/$/./g' \ 32 | | sed 's/master.//g') 33 | git_hash=$(git rev-parse --short HEAD) 34 | dirty=$(git diff-index --quiet HEAD || echo .dirty) 35 | printf '%s+%s%s%s\n' "$version_simple" \ 36 | "$git_branch" \ 37 | "$git_hash" \ 38 | "$dirty" 39 | fi 40 | -------------------------------------------------------------------------------- /packaging/Makefile.am: -------------------------------------------------------------------------------- 1 | EXTRA_DIST = \ 2 | README \ 3 | requirements.txt \ 4 | fedora/build \ 5 | fedora/charliecloud.rpmlintrc \ 6 | fedora/charliecloud.spec \ 7 | fedora/el7-pkgdir.patch \ 8 | fedora/upstream.spec 9 | -------------------------------------------------------------------------------- /packaging/README: -------------------------------------------------------------------------------- 1 | openSUSE 2 | -------- 3 | 4 | openSUSE packages are maintained and built using the openSUSE Build 5 | Service (OBS) at https://build.opensuse.org 6 | 7 | To use OBS to create your own charliecloud packages, you’ll need to 8 | create a user account and then copy the packaging from the devel 9 | package at: 10 | 11 | https://build.opensuse.org/package/show/network:cluster/charliecloud 12 | 13 | This can be done from the web interface clicking on “Branch package”. 14 | 15 | Then you will be able to fetch locally all the packaging files from 16 | your copy (or branch), make your changes and send them to OBS that will 17 | build packages and create package repositories for your branch. 18 | 19 | The beginner’s guide on how to use OBS with the osc command-line tool 20 | can be found at https://openbuildservice.org/help/manuals/obs-user-guide/ 21 | -------------------------------------------------------------------------------- /packaging/fedora/charliecloud.rpmlintrc: -------------------------------------------------------------------------------- 1 | # This file is used to supress false positive errors and warnings generated by 2 | # rpmlint when used with our charliecloud packages. 3 | 4 | ### charliecloud.spec ### 5 | 6 | # The chroot tests are very fragile and have been removed upstream. See: 7 | # https://github.com/rpm-software-management/rpmlint/commit/83f915a54d23f7a912ed42b84ccb4e373bec8ad9 8 | addFilter(r'missing-call-to-chdir-with-chroot') 9 | 10 | # The RPM build script will generate invalid source URLs for non-release 11 | # versions, e.g., '0.9.8~pre+epelpackage.41fe9fd'. 12 | addFilter(r'invalid-url') 13 | 14 | # charliecloud 15 | 16 | # We don't have architecture specific libraries, thus we can put files in /lib. 17 | # The rpm macro, _lib, expands to lib64, which is not what we want. Rather than 18 | # patch our install to an incorrect library path we ignore the lint error. 19 | addFilter(r'hardcoded-library-path') 20 | 21 | # Charliecloud uses pivot_root(2), not chroot(2), for containerization. The 22 | # calls to chroot(2) are part of the pivot_root(2) dance and not relevant to 23 | # Charliecloud's security posture. 24 | addFilter(r'missing-call-to-chdir-with-chroot') 25 | 26 | # The charliecloud example, chtest, has python scripts. 27 | addFilter(r'doc-file-dependency') 28 | 29 | # charliecloud-debuginfo 30 | 31 | # The only files under /usr/lib are those placed there by rpmbuild. 32 | addFilter(r'only-non-binary-in-usr-lib') 33 | 34 | # Ignore a false positive warning concerning pycache files and byte code. 35 | # https://bugzilla.redhat.com/show_bug.cgi?id=1286382 36 | addFilter(r'python-bytecode-without-source') 37 | 38 | # We don't specify a version because the offending package was not out long and 39 | # we intend to remove Obsolete lines in the near future. 40 | addFilter(r'unversioned-explicit-obsoletes') 41 | 42 | ### charliecloud-test ### 43 | 44 | # Charliecloud is a container runtime. These shared objects are never used in 45 | # the host environment; rather, they are compiled by the test suite (both 46 | # running and examination of which serve as end-user documentation) and injected 47 | # into the container (guest) via utility script 'ch-fromhost'. The ldconfig 48 | # links are generated inside the container runtime environment. For more 49 | # information, see the test file: test/run/ch-fromhost.bats (line 108). 50 | addFilter(r'no-ldconfig-symlink') 51 | addFilter(r'library-without-ldconfig-postin') 52 | addFilter(r'library-without-ldconfig-postun') 53 | 54 | # The test suite has a few C files, e.g. userns.c, pivot_root.c, 55 | # chroot-escape.c, sotest.c, setgroups.c, mknods.c, setuid.c, etc., that 56 | # document -- line-by-line in some cases -- various components of the open source 57 | # runtime. These C files serve to show end users how containers work; some of 58 | # them are used explicitly during test suite runtime. 59 | addFilter(r'devel-file-in-non-devel-package') 60 | 61 | # The symlink to /usr/bin is created and does exist. 62 | addFilter(r'dangling-relative-symlink') 63 | 64 | # Funky files used as test fixtures: 65 | addFilter(r'dangling-symlink') # to /tmp 66 | addFilter(r'hidden-file-or-dir') # .dockerignore 67 | addFilter(r'zero-length') # for file copy test 68 | 69 | -------------------------------------------------------------------------------- /packaging/fedora/el7-pkgdir.patch: -------------------------------------------------------------------------------- 1 | diff -ru charliecloud/bin/ch-test charliecloud-lib/bin/ch-test 2 | --- charliecloud/bin/ch-test 2020-04-07 12:19:37.054609706 -0600 3 | +++ charliecloud-lib/bin/ch-test 2020-04-15 16:36:55.128831767 -0600 4 | @@ -662,7 +662,7 @@ 5 | CHTEST_INSTALLED=yes 6 | CHTEST_GITWD= 7 | CHTEST_DIR=${ch_base}/libexec/charliecloud/test 8 | - CHTEST_EXAMPLES_DIR=${ch_base}/share/doc/charliecloud/examples 9 | + CHTEST_EXAMPLES_DIR=${ch_base}/share/doc/charliecloud-${ch_version}/examples 10 | else 11 | # build dir 12 | CHTEST_INSTALLED= 13 | -------------------------------------------------------------------------------- /packaging/fedora/printf.patch: -------------------------------------------------------------------------------- 1 | diff -ur charliecloud/bin/ch_misc.c charliecloud-patch/bin/ch_misc.c 2 | --- charliecloud/bin/ch_misc.c 2022-01-24 13:12:23.980046774 -0500 3 | +++ charliecloud-patch/bin/ch_misc.c 2022-01-24 13:25:34.854133321 -0500 4 | @@ -252,7 +252,7 @@ 5 | if (path == NULL) { 6 | T_ (where = strdup(line)); 7 | } else { 8 | - T_ (1 <= asprintf(&where, "%s:%lu", path, lineno)); 9 | + T_ (1 <= asprintf(&where, "%s:%zu", path, lineno)); 10 | } 11 | 12 | // Split line into variable name and value. 13 | -------------------------------------------------------------------------------- /packaging/requirements.txt: -------------------------------------------------------------------------------- 1 | # This file contains the list of required python packages for the project in 2 | # requirements format. NOTE: It is provided on a “best effort” basis and is 3 | # not supported or maintained by the Charliecloud team. The source of truth 4 | # for all version dependencies is configure.ac. However, patches to keep it up 5 | # date are always welcome. 6 | 7 | # Users can install all our python dependencies simply by running: 8 | # > pip install -r requirements.txt 9 | 10 | lark-parser 11 | wheel 12 | requests>=2.6.0 13 | -------------------------------------------------------------------------------- /test/.dockerignore: -------------------------------------------------------------------------------- 1 | # Nothing yet; used for testing ch-image warnings. 2 | -------------------------------------------------------------------------------- /test/Build.centos7xz: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Download an xz-compressed CentOS 7 tarball. These are the base images for 4 | # the official CentOS Docker images. 5 | # 6 | # https://github.com/CentOS/sig-cloud-instance-images 7 | # 8 | # This GitHub repository is arranged with CentOS version and architecture in 9 | # different branches. We download the latest for a given architecture. 10 | # 11 | # To check what version is in a tarball (on any architecture): 12 | # 13 | # $ tar xf centos-7-${arch}-docker.tar.xz --to-stdout ./etc/centos-release 14 | # 15 | # ch-test-scope: standard 16 | # ch-test-builder-exclude: none 17 | 18 | set -ex 19 | 20 | #srcdir=$1 # unused 21 | tarball=${2}.tar.xz 22 | #workdir=$3 # unused 23 | 24 | wget -nv -O "$tarball" "https://github.com/CentOS/sig-cloud-instance-images/blob/CentOS-7-$(uname -m)/docker/centos-7-$(uname -m)-docker.tar.xz?raw=true" 25 | -------------------------------------------------------------------------------- /test/Build.docker_pull: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # ch-test-scope: quick 3 | # ch-test-builder-include: docker 4 | # ch-test-need-sudo 5 | # 6 | # Pull a docker image directly from Dockerhub and pack it into an image tarball. 7 | 8 | set -e 9 | 10 | #srcdir=$1 # unused 11 | tarball_gz=${2}.tar.gz 12 | workdir=$3 13 | 14 | tag=docker_pull 15 | addr=alpine:3.17 16 | img=$tag:latest 17 | 18 | cd "$workdir" 19 | sudo docker pull "$addr" 20 | sudo docker tag "$addr" "$tag" 21 | 22 | # FIXME: do we need a ch_version_docker equivalent? 23 | sudo docker tag "$tag" "$img" 24 | 25 | 26 | hash_=$(sudo docker images -q "$img" | sort -u) 27 | if [[ -z $hash_ ]]; then 28 | echo "no such image '$img'" 29 | exit 1 30 | fi 31 | 32 | ch-convert -i docker "$tag" "$tarball_gz" 33 | -------------------------------------------------------------------------------- /test/Build.missing: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # ch-test-scope: quick 3 | 4 | # This image’s prerequisites can never be satisfied. 5 | exit 65 6 | -------------------------------------------------------------------------------- /test/Dockerfile.argenv: -------------------------------------------------------------------------------- 1 | # Test how ARG and ENV variables flow around. This does not address syntax 2 | # quirks; for that see test “Dockerfile: syntax quirks” in 3 | # build/50_dockerfile.bats. Results are checked in both test “Dockerfile: ARG 4 | # and ENV values” in build/50_dockerfile.bats and multiple tests in 5 | # run/ch-run_misc.bats. The latter is why this is a separate Dockerfile 6 | # instead of embedded in a .bats file. 7 | 8 | # ch-test-scope: standard 9 | FROM alpine:3.17 10 | 11 | ARG chse_arg1_df 12 | ARG chse_arg2_df=arg2 13 | ARG chse_arg3_df="arg3 ${chse_arg2_df}" 14 | ENV chse_env1_df env1 15 | ENV chse_env2_df="env2 ${chse_env1_df}" 16 | RUN env | sort 17 | -------------------------------------------------------------------------------- /test/Dockerfile.file-quirks: -------------------------------------------------------------------------------- 1 | # This Dockerfile is used to test that pull deals with quirky files, e.g. 2 | # replacement by different types (issues #819 and #825)`. Scope is “skip” 3 | # because we pull the image to test it; see test/build/50_pull.bats. 4 | # 5 | # To build and push: 6 | # 7 | # $ VERSION=$(date +%Y-%m-%d) # or other date as appropriate 8 | # $ sudo docker login # if needed 9 | # $ sudo docker build -t file-quirks -f Dockerfile.file-quirks . 10 | # $ sudo docker tag file-quirks:latest charliecloud/file-quirks:$VERSION 11 | # $ sudo docker images | fgrep file-quirks 12 | # $ sudo docker push charliecloud/file-quirks:$VERSION 13 | # 14 | # ch-test-scope: skip 15 | 16 | FROM alpine:3.17 17 | WORKDIR /test 18 | 19 | 20 | ## Replace symlink with symlink. 21 | 22 | # Set up a symlink & targets. 23 | RUN echo target1 > ss_target1 \ 24 | && echo target2 > ss_target2 \ 25 | && ln -s ss_target1 ss_link 26 | # link and target should both contain “target1” 27 | RUN ls -l \ 28 | && for i in ss_*; do printf '%s : ' $i; cat $i; done 29 | # Overwrite it with a new symlink. 30 | RUN rm ss_link \ 31 | && ln -s ss_target2 ss_link 32 | # Now link should still be a symlink but contain “target2”. 33 | RUN ls -l \ 34 | && for i in ss_*; do printf '%s : ' $i; cat $i; done 35 | 36 | 37 | ## Replace symlink with regular file (issue #819). 38 | 39 | # Set up a symlink. 40 | RUN echo target > sf_target \ 41 | && ln -s sf_target sf_link 42 | # Link and target should both contain “target”. 43 | RUN ls -l \ 44 | && for i in sf_*; do printf '%s : ' $i; cat $i; done 45 | # Overwrite it with a regular file. 46 | RUN rm sf_link \ 47 | && echo regular > sf_link 48 | # Now link should be a regular file and contain “regular”. 49 | RUN ls -l \ 50 | && for i in sf_*; do printf '%s : ' $i; cat $i; done 51 | 52 | 53 | ## Replace regular file with symlink. 54 | 55 | # Set up two regular files. 56 | RUN echo regular > fs_link \ 57 | && echo target > fs_target 58 | # Link should be a regular file and contain “regular”. 59 | RUN ls -l \ 60 | && for i in fs_*; do printf '%s : ' $i; cat $i; done 61 | # Overwrite it with a symlink. 62 | RUN rm fs_link \ 63 | && ln -s fs_target fs_link 64 | # Now link should be a symlink; both should contain “target”. 65 | RUN ls -l \ 66 | && for i in fs_*; do printf '%s : ' $i; cat $i; done 67 | 68 | 69 | ## Replace symlink with directory. 70 | 71 | # Set up a symlink. 72 | RUN echo target > sd_target \ 73 | && ln -s sd_target sd_link 74 | # link and target should both contain “target”. 75 | RUN ls -l \ 76 | && for i in sd_*; do printf '%s : ' $i; cat $i; done 77 | # Overwrite it with a directory. 78 | RUN rm sd_link \ 79 | && mkdir sd_link 80 | # Now link should be a directory. 81 | RUN ls -l 82 | 83 | 84 | ## Replace directory with symlink. 85 | 86 | # I think this is what’s in image ppc64le.neo4j/2.3.5, as reported in issue 87 | # #825, but it doesn’t cause the same infinite recursion. 88 | 89 | # Set up a directory and a target. 90 | RUN mkdir ds_link \ 91 | && echo target > ds_target 92 | # It should be a directory. 93 | RUN ls -l 94 | # Overwrite it with a symlink. 95 | RUN rmdir ds_link \ 96 | && ln -s ds_target ds_link 97 | # Now link should be a symlink; both should contain “target”. 98 | RUN ls -l \ 99 | && for i in ds_*; do printf '%s : ' $i; cat $i; done 100 | 101 | 102 | ## Replace regular file with directory. 103 | 104 | # Set up a file. 105 | RUN echo regular > fd_member 106 | # It should be a file. 107 | RUN ls -l \ 108 | && for i in fd_*; do printf '%s : ' $i; cat $i; done 109 | # Overwrite it with a directory. 110 | RUN rm fd_member \ 111 | && mkdir fd_member 112 | # Now it should be a directory. 113 | RUN ls -l 114 | 115 | 116 | ## Replace directory with regular file. 117 | 118 | # Set up a directory. 119 | RUN mkdir df_member 120 | # It should be a directory. 121 | RUN ls -l 122 | # Overwrite it with a file. 123 | RUN rmdir df_member \ 124 | && echo regular > df_member 125 | # Now it should be a file. 126 | RUN ls -l \ 127 | && for i in df_*; do printf '%s : ' $i; cat $i; done 128 | 129 | 130 | ## Symlink with cycle (https://bugs.python.org/file37774). 131 | 132 | # Set up a symlink pointing to itself. 133 | RUN ln -s link_self link_self 134 | # List. 135 | RUN ls -l 136 | 137 | 138 | ## Broken symlinks (https://bugs.python.org/file37774). 139 | 140 | # Set up a symlink pointing to (1) a nonexistent file and (2) a directory that 141 | # only exists in the image. 142 | RUN ln -s doesnotexist link_b0rken \ 143 | && ln -s /test link_imageonly 144 | # List. 145 | RUN ls -l 146 | -------------------------------------------------------------------------------- /test/Dockerfile.metadata: -------------------------------------------------------------------------------- 1 | # This Dockerfile is used to test metadata pulling (issue #651). It includes 2 | # all the instructions that seemed like they ought to create metadata, even if 3 | # unsupported by ch-image. 4 | # 5 | # Scope is “skip” because we pull the image to test it; see 6 | # test/build/50_pull.bats. 7 | # 8 | # To build and push: 9 | # 10 | # $ VERSION=$(date +%Y-%m-%d) # or other date as appropriate 11 | # $ sudo docker login # if needed 12 | # $ sudo docker build -t charliecloud/metadata:$VERSION \ 13 | # -f Dockerfile.metadata . 14 | # $ sudo docker images | fgrep metadata 15 | # $ sudo docker push charliecloud/metadata:$VERSION 16 | # 17 | # ch-test-scope: skip 18 | 19 | FROM alpine:3.17 20 | 21 | CMD ["bar", "baz"] 22 | ENTRYPOINT ["/bin/echo","foo"] 23 | ENV ch_foo=foo-ev ch_bar=bar-ev 24 | EXPOSE 867 5309/udp 25 | HEALTHCHECK --interval=60s --timeout=5s CMD ["/bin/true"] 26 | LABEL ch-foo=foo-label ch-bar=bar-label 27 | MAINTAINER charlie@example.com 28 | ONBUILD RUN echo hello 29 | RUN echo hello 30 | RUN ["/bin/echo", "world"] 31 | SHELL ["/bin/ash", "-c"] 32 | STOPSIGNAL SIGWINCH 33 | USER charlie:chargrp 34 | WORKDIR /mnt 35 | VOLUME /mnt/foo /mnt/bar /mnt/foo 36 | -------------------------------------------------------------------------------- /test/Dockerfile.ocimanifest: -------------------------------------------------------------------------------- 1 | # This Dockerfile is used to test image with an OCI manifest (issue #1184). 2 | # 3 | # WARNING: The manifest is produced by the build tool and is rather opaque. 4 | # Specifically, re-building the image might silently produce a different 5 | # manifest that also works, negating the value of this test. Building this 6 | # image with Podman 3.0.1 did trigger the above issue; Podman 3.4.0 very 7 | # likely has the same behavior. Bottom line, be very cautious about 8 | # re-building this image. One approach would be to comment out the content 9 | # types added by #1184 and see if the updated image still triggers the bug. 10 | # 11 | # Scope is “skip” because we pull it to test; see test/build/50_pull.bats. 12 | # ch-test-scope: skip 13 | # 14 | # To build and push: 15 | # 16 | # $ VERSION=$(date +%Y-%m-%d) 17 | # $ podman build -t charliecloud/ocimanifest:$VERSION \ 18 | # -f Dockerfile.ocimanifest . 19 | # $ podman images | fgrep ocimanifest 20 | # $ podman login 21 | # $ podman push charliecloud/ocimanifest:$VERSION 22 | # 23 | 24 | FROM alpine:3.17 25 | RUN echo hello 26 | -------------------------------------------------------------------------------- /test/Dockerfile.quick: -------------------------------------------------------------------------------- 1 | # Minimal test image to exercise a Dockerfile build in quick scope. 2 | # ch-test-scope: quick 3 | 4 | FROM alpine:3.17 5 | RUN apk add bc 6 | -------------------------------------------------------------------------------- /test/Makefile.am: -------------------------------------------------------------------------------- 1 | testdir = $(pkglibexecdir) 2 | 3 | # These test files require no special handling. 4 | testfiles = \ 5 | .dockerignore \ 6 | Dockerfile.argenv \ 7 | Dockerfile.quick \ 8 | approved-trailing-whitespace \ 9 | bucache/a.df \ 10 | bucache/a-fail.df \ 11 | bucache/argenv.df \ 12 | bucache/argenv-special.df \ 13 | bucache/argenv2.df \ 14 | bucache/b.df \ 15 | bucache/c.df \ 16 | bucache/copy.df \ 17 | bucache/difficult.df \ 18 | bucache/force.df \ 19 | bucache/from.df \ 20 | bucache/rsync.df \ 21 | build/10_sanity.bats \ 22 | build/40_pull.bats \ 23 | build/50_ch-image.bats \ 24 | build/50_dockerfile.bats \ 25 | build/50_localregistry.bats \ 26 | build/50_misc.bats \ 27 | build/99_cleanup.bats \ 28 | common.bash \ 29 | fixtures/empty-file \ 30 | fixtures/README \ 31 | make-auto.d/build.bats.in \ 32 | make-auto.d/build_custom.bats.in \ 33 | make-auto.d/builder_to_archive.bats.in \ 34 | make-auto.d/unpack.bats.in \ 35 | registry-config.yml \ 36 | run/build-rpms.bats \ 37 | run/ch-fromhost.bats \ 38 | run/ch-run_escalated.bats \ 39 | run/ch-run_isolation.bats \ 40 | run/ch-run_join.bats \ 41 | run/ch-run_misc.bats \ 42 | run/ch-run_uidgid.bats \ 43 | run_first.bats \ 44 | sotest/files_inferrable.txt \ 45 | sotest/libsotest.c \ 46 | sotest/sotest.c 47 | 48 | # Test files that should be executable. 49 | testfiles_exec = \ 50 | Build.centos7xz \ 51 | Build.docker_pull \ 52 | Build.missing \ 53 | docs-sane \ 54 | doctest \ 55 | doctest-auto \ 56 | force-auto \ 57 | make-perms-test \ 58 | old-storage \ 59 | order-py 60 | 61 | # Program and shared library used for testing shared library injection. It's 62 | # built according to the rules below. In principle, we could use libtool for 63 | # that, but I'm disinclined to add that in since it's one test program and 64 | # does not require any libtool portability. 65 | sobuilts = \ 66 | sotest/bin/sotest \ 67 | sotest/lib/libsotest.so.1.0 \ 68 | sotest/lib/libfabric/libsotest-fi.so \ 69 | sotest/libsotest.so \ 70 | sotest/libsotest.so.1 \ 71 | sotest/libsotest.so.1.0 \ 72 | sotest/sotest 73 | 74 | CLEANFILES = $(sobuilts) \ 75 | docs-sane \ 76 | doctest build/30_doctest-auto.bats \ 77 | force-auto force-auto.bats \ 78 | make-perms-test order-py 79 | 80 | if ENABLE_TEST 81 | nobase_test_DATA = $(testfiles) 82 | nobase_test_SCRIPTS = $(testfiles_exec) 83 | nobase_nodist_test_SCRIPTS = $(sobuilts) 84 | if ENABLE_CH_IMAGE # this means we have Python 85 | nobase_test_DATA += force-auto.bats 86 | force-auto.bats: force-auto 87 | ./$< > $@ 88 | nobase_test_DATA += build/30_doctest-auto.bats 89 | build/30_doctest-auto.bats: doctest-auto 90 | ./$< > $@ 91 | endif 92 | # See comment about symlinks in examples/Makefile.am. 93 | all-local: 94 | ln -fTs /tmp fixtures/symlink-to-tmp 95 | clean-local: 96 | rm -f fixtures/symlink-to-tmp 97 | install-data-hook: 98 | $(MKDIR_P) $(DESTDIR)$(testdir)/fixtures 99 | ln -fTs /tmp $(DESTDIR)$(testdir)/fixtures/symlink-to-tmp 100 | uninstall-hook: 101 | rm -f $(DESTDIR)$(testdir)/fixtures/symlink-to-tmp 102 | rmdir $(DESTDIR)$(testdir)/fixtures || true 103 | rmdir $$(find $(pkglibexecdir) -type d | sort -r) 104 | endif 105 | EXTRA_DIST = $(testfiles) \ 106 | $(testfiles_exec) \ 107 | docs-sane.py.in \ 108 | doctest.py.in \ 109 | force-auto.py.in \ 110 | make-perms-test.py.in \ 111 | order-py.py.in 112 | EXTRA_SCRIPTS = $(sobuilts) 113 | 114 | ## Python scripts - need text processing 115 | docs-sane doctest force-auto make-perms-test order-py: %: %.py.in 116 | rm -f $@ 117 | sed -E 's|%PYTHON_SHEBANG%|@PYTHON_SHEBANG@|' < $< > $@ 118 | chmod +rx,-w $@ # respects umask 119 | 120 | sotest/sotest: sotest/sotest.c sotest/libsotest.so.1.0 sotest/libsotest.so sotest/libsotest.so.1 121 | $(CC) -o $@ $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -L./sotest -lsotest $^ 122 | 123 | sotest/libsotest.so.1.0: sotest/libsotest.c 124 | $(CC) -o $@ $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -shared -fPIC -Wl,-soname,libsotest.so.1 -lc $^ 125 | 126 | sotest/libsotest.so: sotest/libsotest.so.1.0 127 | ln -fTs ./libsotest.so.1.0 $@ 128 | 129 | sotest/libsotest.so.1: sotest/libsotest.so.1.0 130 | ln -fTs ./libsotest.so.1.0 $@ 131 | 132 | sotest/bin/sotest: sotest/sotest 133 | mkdir -p sotest/bin 134 | cp -a $^ $@ 135 | 136 | sotest/lib/libsotest.so.1.0: sotest/libsotest.so.1.0 137 | mkdir -p sotest/lib 138 | cp -a $^ $@ 139 | 140 | sotest/lib/libfabric/libsotest-fi.so: sotest/libsotest.so.1.0 141 | mkdir -p sotest/lib/libfabric 142 | cp -a $^ $@ 143 | -------------------------------------------------------------------------------- /test/approved-trailing-whitespace: -------------------------------------------------------------------------------- 1 | ./misc/loc:60: filter rm_comments_in_strings " // 2 | ./test/build/50_dockerfile.bats:60:RUN true 3 | ./test/build/50_dockerfile.bats:63: 4 | ./test/build/50_dockerfile.bats:65: 5 | ./test/build/50_dockerfile.bats:66: 6 | ./test/build/50_dockerfile.bats:85:RUN echo test3\ 7 | ./test/build/50_dockerfile.bats:88:RUN echo test3\ 8 | ./test/build/50_dockerfile.bats:89:b\ 9 | ./test/build/50_dockerfile.bats:93:RUN echo test4 \ 10 | ./test/build/50_dockerfile.bats:96:RUN echo test4 \ 11 | ./test/build/50_dockerfile.bats:97:b \ 12 | ./test/build/50_dockerfile.bats:131: 4. RUN.S true 13 | ./test/build/50_dockerfile.bats:434:#ENV chse_1a value 1a 14 | ./test/build/50_dockerfile.bats:437:#ENV chse_1c=value\ 1c\ 15 | -------------------------------------------------------------------------------- /test/bucache/a-fail.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | RUN echo foo 3 | RUN false 4 | RUN echo bar 5 | -------------------------------------------------------------------------------- /test/bucache/a.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | RUN echo foo 3 | RUN echo bar 4 | -------------------------------------------------------------------------------- /test/bucache/argenv-special.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | ARG argA=vargA 3 | ARG SSH_AUTH_SOCK=sockA 4 | RUN echo $argA $SSH_AUTH_SOCK 5 | -------------------------------------------------------------------------------- /test/bucache/argenv.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | ARG argA=vargA 3 | ARG argB=vargB$argA 4 | ENV envA=venvA envB=venvB$argA 5 | RUN echo 1 $argA $argB $envA $envB 6 | -------------------------------------------------------------------------------- /test/bucache/argenv2.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | ARG argA=vargA 3 | ARG argB=vargB$argA 4 | ENV envA=venvA envB=venvB$argA 5 | RUN echo 2 $argA $argB $envA $envB 6 | -------------------------------------------------------------------------------- /test/bucache/b.df: -------------------------------------------------------------------------------- 1 | FROM a 2 | RUN echo baz 3 | -------------------------------------------------------------------------------- /test/bucache/c.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | RUN echo foo 3 | RUN echo qux 4 | -------------------------------------------------------------------------------- /test/bucache/copy.df: -------------------------------------------------------------------------------- 1 | # Context directory must be fixtures prepared in 55_cache.bats:COPY. 2 | FROM alpine:3.17 3 | COPY * / 4 | -------------------------------------------------------------------------------- /test/bucache/difficult.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | 3 | WORKDIR /test 4 | 5 | # Directory and file with full permissions. 6 | RUN mkdir dir_all && chmod 4777 dir_all 7 | RUN touch dir_all/file_all && chmod 4777 dir_all/file_all 8 | 9 | # Directory and file with minimal permissions. 10 | RUN mkdir dir_min && chmod 700 dir_min 11 | RUN touch dir_min/file_min && chmod 400 dir_min/file_min 12 | 13 | # FIFO 14 | RUN mkfifo fifo_ 15 | 16 | # Empty directories 17 | RUN mkdir dir_empty 18 | RUN mkdir -p dir_empty_empty/dir_empty 19 | 20 | # Hard link 21 | RUN touch hard_target 22 | RUN ln hard_target hard_src 23 | 24 | # Symlink 25 | RUN touch soft_target 26 | RUN ln -s soft_target soft_src 27 | 28 | # Git repository 29 | RUN apk add git 30 | RUN git init gitrepo 31 | 32 | # Well-known last instruction so we can check if it’s cached. 33 | RUN echo last 34 | -------------------------------------------------------------------------------- /test/bucache/force.df: -------------------------------------------------------------------------------- 1 | # Use an almalinux:8 image because it can install some RPMs without --force. 2 | 3 | FROM almalinux:8 4 | WORKDIR / 5 | RUN dnf install -y ed # doesn’t need --force 6 | WORKDIR /usr 7 | -------------------------------------------------------------------------------- /test/bucache/from.df: -------------------------------------------------------------------------------- 1 | FROM alpine:3.17 2 | -------------------------------------------------------------------------------- /test/bucache/rsync.df: -------------------------------------------------------------------------------- 1 | # Context directory must be fixtures prepared in 55_cache.bats:RSYNC. 2 | FROM alpine:3.17 3 | RSYNC /* / 4 | -------------------------------------------------------------------------------- /test/build/50_localregistry.bats: -------------------------------------------------------------------------------- 1 | load ../common 2 | tag='ch-image push' 3 | 4 | # Note: These tests use a local registry listening on localhost:5000 but do 5 | # not start it. Therefore, they do not depend on whether the pushed images are 6 | # already present. 7 | 8 | 9 | setup () { 10 | scope standard 11 | [[ $CH_TEST_BUILDER = ch-image ]] || skip 'ch-image only' 12 | localregistry_init 13 | } 14 | 15 | @test "${tag}: without destination reference" { 16 | # FIXME: This test copies an image manually so we can use it to push. 17 | # Remove when we have real aliasing support for images. 18 | ch-image build -t localhost:5000/alpine:3.17 - <<'EOF' 19 | FROM alpine:3.17 20 | EOF 21 | 22 | run ch-image -v --tls-no-verify push localhost:5000/alpine:3.17 23 | echo "$output" 24 | [[ $status -eq 0 ]] 25 | [[ $output = *'pushing image: localhost:5000/alpine:3.17'* ]] 26 | [[ $output = *"image path: ${CH_IMAGE_STORAGE}/img/localhost+5000%alpine+3.17"* ]] 27 | 28 | ch-image delete localhost:5000/alpine:3.17 29 | } 30 | 31 | @test "${tag}: without metadata history" { 32 | ch-image build -t tmpimg - <<'EOF' 33 | FROM alpine:3.17 34 | EOF 35 | 36 | ch-convert tmpimg "$BATS_TMPDIR/tmpimg" 37 | rm -rf "$BATS_TMPDIR/tmpimg/ch" 38 | 39 | ch-image delete tmpimg 40 | ch-image import "$BATS_TMPDIR/tmpimg" tmpimg 41 | 42 | run ch-image -v --tls-no-verify push tmpimg localhost:5000/tmpimg 43 | echo "$output" 44 | [[ $status -eq 0 ]] 45 | [[ $output = *'pushing image: tmpimg'* ]] 46 | [[ $output = *'destination: localhost:5000/tmpimg'* ]] 47 | [[ $output = *"image path: ${CH_IMAGE_STORAGE}/img/tmpimg"* ]] 48 | } 49 | 50 | @test "${tag}: with destination reference" { 51 | run ch-image -v --tls-no-verify push alpine:3.17 localhost:5000/alpine:3.17 52 | echo "$output" 53 | [[ $status -eq 0 ]] 54 | [[ $output = *'pushing image: alpine:3.17'* ]] 55 | [[ $output = *'destination: localhost:5000/alpine:3.17'* ]] 56 | [[ $output = *"image path: ${CH_IMAGE_STORAGE}/img/alpine+3.17"* ]] 57 | # FIXME: Can’t re-use layer from previous test because it’s a copy. 58 | #re='layer 1/1: [0-9a-f]{7}: already present' 59 | #[[ $output =~ $re ]] 60 | } 61 | 62 | @test "${tag}: with --image" { 63 | # NOTE: This also tests round-tripping and a more complex destination ref. 64 | 65 | img="$BATS_TMPDIR"/pushtest-up 66 | img2="$BATS_TMPDIR"/pushtest-down 67 | mkdir -p "$img" "$img"/{bin,dev,usr} 68 | 69 | # Set up setuid/setgid files and directories. 70 | touch "$img"/{setuid_file,setgid_file} 71 | chmod 4640 "$img"/setuid_file 72 | chmod 2640 "$img"/setgid_file 73 | mkdir -p "$img"/{setuid_dir,setgid_dir} 74 | chmod 4750 "$img"/setuid_dir 75 | chmod 2750 "$img"/setgid_dir 76 | ls -l "$img" 77 | [[ $(stat -c '%A' "$img"/setuid_file) = -rwSr----- ]] 78 | [[ $(stat -c '%A' "$img"/setgid_file) = -rw-r-S--- ]] 79 | [[ $(stat -c '%A' "$img"/setuid_dir) = drwsr-x--- ]] 80 | [[ $(stat -c '%A' "$img"/setgid_dir) = drwxr-s--- ]] 81 | 82 | # Create fake history. 83 | mkdir -p "$img"/ch 84 | cat <<'EOF' > "$img"/ch/metadata.json 85 | { 86 | "history": [ {"created_by": "ch-test" } ] 87 | } 88 | EOF 89 | 90 | # Push the image 91 | run ch-image -v --tls-no-verify push --image "$img" \ 92 | localhost:5000/foo/bar:weirdal 93 | echo "$output" 94 | [[ $status -eq 0 ]] 95 | [[ $output = *'pushing image: localhost:5000/foo/bar:weirdal'* ]] 96 | [[ $output = *"image path: ${img}"* ]] 97 | [[ $output = *'stripping unsafe setgid bit: ./setgid_dir'* ]] 98 | [[ $output = *'stripping unsafe setgid bit: ./setgid_file'* ]] 99 | [[ $output = *'stripping unsafe setuid bit: ./setuid_dir'* ]] 100 | [[ $output = *'stripping unsafe setuid bit: ./setuid_file'* ]] 101 | 102 | # Pull it back 103 | ch-image -v --tls-no-verify pull localhost:5000/foo/bar:weirdal 104 | ch-convert localhost:5000/foo/bar:weirdal "$img2" 105 | ls -l "$img2" 106 | [[ $(stat -c '%A' "$img2"/setuid_file) = -rw-r----- ]] 107 | [[ $(stat -c '%A' "$img2"/setgid_file) = -rw-r----- ]] 108 | [[ $(stat -c '%A' "$img2"/setuid_dir) = drwxr-x--- ]] 109 | [[ $(stat -c '%A' "$img2"/setgid_dir) = drwxr-x--- ]] 110 | } 111 | 112 | @test "${tag}: consistent layer hash" { 113 | run ch-image push --tls-no-verify alpine:3.17 localhost:5000/alpine:3.17 114 | echo "$output" 115 | [[ $status -eq 0 ]] 116 | push1=$(echo "$output" | grep -E 'layer 1/1: .+: checking') 117 | 118 | run ch-image push --tls-no-verify alpine:3.17 localhost:5000/alpine:3.17 119 | echo "$output" 120 | [[ $status -eq 0 ]] 121 | push2=$(echo "$output" | grep -E 'layer 1/1: .+: checking') 122 | 123 | diff -u <(echo "$push1") <(echo "$push2") 124 | } 125 | 126 | @test "${tag}: environment variables round-trip" { 127 | # Delete “tmpimg” from previous test to avoid issues 128 | ch-image delete tmpimg 129 | 130 | cat <<'EOF' | ch-image build -t tmpimg - 131 | FROM alpine:3.17 132 | ENV weird="al yankovic" 133 | EOF 134 | 135 | ch-image push --tls-no-verify tmpimg localhost:5000/tmpimg 136 | ch-image pull --tls-no-verify localhost:5000/tmpimg 137 | ch-convert localhost:5000/tmpimg "$BATS_TMPDIR"/tmpimg 138 | 139 | run ch-run "$BATS_TMPDIR"/tmpimg --unset-env='*' --set-env -- env 140 | echo "$output" 141 | [[ $status -eq 0 ]] 142 | [[ $output = *'weird=al yankovic'* ]] 143 | } 144 | -------------------------------------------------------------------------------- /test/build/50_misc.bats: -------------------------------------------------------------------------------- 1 | load ../common 2 | 3 | @test 'sotest executable works' { 4 | scope quick 5 | [[ $ch_libc = glibc ]] || skip 'glibc only' 6 | export LD_LIBRARY_PATH=./sotest 7 | ldd sotest/sotest 8 | sotest/sotest 9 | } 10 | -------------------------------------------------------------------------------- /test/build/60_force.bats: -------------------------------------------------------------------------------- 1 | load ../common 2 | tag='ch-image --force' 3 | 4 | setup () { 5 | [[ $CH_TEST_BUILDER = ch-image ]] || skip 'ch-image only' 6 | export CH_IMAGE_CACHE=disabled 7 | } 8 | 9 | @test "${tag}: no matching distro" { 10 | scope standard 11 | 12 | # with --force 13 | run ch-image -v build --force=fakeroot -t tmpimg -f - . <<'EOF' 14 | FROM hello-world:latest 15 | EOF 16 | echo "$output" 17 | [[ $status -eq 1 ]] 18 | [[ $output = *'--force=fakeroot not available (no suitable config found)'* ]] 19 | } 20 | 21 | @test "${tag}: misc errors" { 22 | scope standard 23 | 24 | run ch-image build --force=fakeroot --force-cmd=foo,bar . 25 | echo "$output" 26 | [[ $status -eq 1 ]] 27 | [[ $output = *'are incompatible'* ]] 28 | } 29 | 30 | @test "${tag}: multiple RUN" { 31 | scope standard 32 | 33 | # 1. List form of RUN. 34 | # 2. apt-get not at beginning. 35 | run ch-image -v build --force -t tmpimg -f - . <<'EOF' 36 | FROM debian:buster 37 | RUN true 38 | RUN true && apt-get update 39 | RUN ["apt-get", "install", "-y", "hello"] 40 | EOF 41 | echo "$output" 42 | [[ $status -eq 0 ]] 43 | [[ $(echo "$output" | grep -Fc 'RUN: new command:') -eq 2 ]] 44 | [[ $output = *'--force=seccomp: modified 2 RUN instructions'* ]] 45 | [[ $output = *'grown in 4 instructions: tmpimg'* ]] 46 | } 47 | 48 | @test "${tag}: dpkg(8)" { 49 | # Typically folks will use apt-get(8), but bare dpkg(8) also happens. 50 | scope standard 51 | [[ $(uname -m) = x86_64 ]] || skip 'amd64 only' 52 | 53 | # NOTE: This produces a broken system because we ignore openssh-client’s 54 | # dependencies, but it’s good enough to test --force. 55 | ch-image -v build --force -t tmpimg -f - . <<'EOF' 56 | FROM debian:buster 57 | RUN apt-get update && apt install -y wget 58 | RUN wget -nv https://snapshot.debian.org/archive/debian/20230213T151507Z/pool/main/o/openssh/openssh-client_8.4p1-5%2Bdeb11u1_amd64.deb 59 | RUN dpkg --install --force-depends *.deb 60 | EOF 61 | } 62 | 63 | @test "${tag}: rpm(8)" { 64 | # Typically folks will use yum(8) or dnf(8), but bare rpm(8) also happens. 65 | scope standard 66 | [[ $(uname -m) = x86_64 ]] || skip 'amd64 only' 67 | 68 | ch-image -v build --force -t tmpimg -f - . <<'EOF' 69 | FROM almalinux:8 70 | RUN curl -sSOL https://vault.almalinux.org/8.6/BaseOS/x86_64/os/Packages/openssh-8.0p1-13.el8.x86_64.rpm 71 | RUN rpm --install *.rpm 72 | EOF 73 | } 74 | 75 | @test "${tag}: list form" { 76 | scope standard 77 | 78 | ch-image -v build --force -t tmpimg -f - . <<'EOF' 79 | FROM debian:buster 80 | RUN ["apt-get", "update"] 81 | RUN ["apt-get", "install", "-y", "openssh-client"] 82 | EOF 83 | } 84 | -------------------------------------------------------------------------------- /test/build/99_cleanup.bats: -------------------------------------------------------------------------------- 1 | load ../common 2 | 3 | @test 'nothing unexpected in tarball directory' { 4 | scope quick 5 | run find "$ch_tardir" -mindepth 1 -maxdepth 1 \ 6 | -not \( -name 'WEIRD_AL_YANKOVIC' \ 7 | -o -name '*.sqfs' \ 8 | -o -name '*.tar.gz' \ 9 | -o -name '*.tar.xz' \ 10 | -o -name '*.pq_missing' \) 11 | echo "$output" 12 | [[ $output = '' ]] 13 | } 14 | -------------------------------------------------------------------------------- /test/doctest-auto: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Print (on stdout) BATS tests to run doctests on each file in lib/. 4 | 5 | set -e -o pipefail 6 | 7 | # Ensure reproducible output (#1849) 8 | export LC_ALL=C 9 | 10 | cat < 0] 53 | tests = [i for i in tests_nonempty if re.search(object_re, i.name_short)] 54 | print("will run %d/%d tests" % (len(tests), len(tests_nonempty))) 55 | 56 | 57 | # Run tests. 58 | 59 | out = "" 60 | def out_save(text): 61 | global out 62 | out += text 63 | runner = doctest.DocTestRunner(optionflags=( doctest.DONT_ACCEPT_TRUE_FOR_1 64 | | doctest.ELLIPSIS)) 65 | for test in tests: 66 | print("%s ... " % test.name_short, end="") 67 | out = "" 68 | results = runner.run(test, out=out_save) 69 | assert (results.attempted == len(test.examples)) 70 | if (results.failed == 0): 71 | print("ok (%d examples)" % results.attempted) 72 | else: 73 | print("%d/%d failed" % (results.failed, results.attempted)) 74 | print(out) 75 | print("big L, stopping tests") 76 | sys.exit(1) 77 | 78 | 79 | # Summarize. 80 | 81 | print("all tests passed") 82 | -------------------------------------------------------------------------------- /test/fixtures/README: -------------------------------------------------------------------------------- 1 | You can see what tests use the fixtures with "misc/grep 'fixtures/'". 2 | -------------------------------------------------------------------------------- /test/fixtures/empty-file: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpc/charliecloud/23de6eee5f6c7df6d88ea9efe89e605404afea0e/test/fixtures/empty-file -------------------------------------------------------------------------------- /test/make-auto.d/build.bats.in: -------------------------------------------------------------------------------- 1 | source common.bash # for ShellCheck; removed by ch-test 2 | 3 | @test 'build %(tag)s' { 4 | scope %(scope)s 5 | # shellcheck disable=SC2086 6 | build_ -t %(tag)s --file="%(path)s" "%(dirname)s" 7 | #sudo docker tag %(tag)s "%(tag)s:$ch_version_docker" 8 | builder_ok %(tag)s 9 | } 10 | -------------------------------------------------------------------------------- /test/make-auto.d/build_custom.bats.in: -------------------------------------------------------------------------------- 1 | source common.bash # for ShellCheck; removed by ch-test 2 | 3 | @test 'custom build %(tag)s' { 4 | scope %(scope)s 5 | out="${ch_tardir}/%(tag)s" 6 | pq="${ch_tardir}/%(tag)s.pq_missing" 7 | workdir="${ch_tardir}/%(tag)s.tmp" 8 | rm -f "$pq" 9 | mkdir "$workdir" 10 | cd "%(dirname)s" 11 | run ./%(basename)s "$PWD" "$out" "$workdir" 12 | echo "$output" 13 | if [[ $status -eq 0 ]]; then 14 | if [[ -f ${out}.tar.gz || -f ${out}.tar.xz ]]; then # tarball 15 | # Validate exactly one tarball came out. 16 | tarballs=( "$out".tar.* ) 17 | [[ ${#tarballs[@]} -eq 1 ]] 18 | tarball=${tarballs[0]} 19 | # Convert to SquashFS if needed. 20 | if [[ $CH_TEST_PACK_FMT = squash* ]]; then 21 | # With the centos7xz image, we run into permission errors if we 22 | # try to use the tar “--xattrs-include” option. Using strace(1), 23 | # we determined that with the xattrs option specified, tar first 24 | # calls mknodat(2) to create a file with permissions 000, then 25 | # openat(2) on the same file, which fails with EACCESS. Without 26 | # the xattrs option, the file is created by a call to openat(2) 27 | # with the O_CREAT flag (rather than mknodat(2)), so the 28 | # permission error is avoided. (See 29 | # https://savannah.gnu.org/support/index.php?110903). 30 | if [[ $tarball = *centos7xz* ]]; then 31 | xattrs_arg=--no-xattrs 32 | else 33 | xattrs_arg= 34 | fi 35 | ch-convert $xattrs_arg "$tarball" "${tarball/tar.?z/sqfs}" 36 | rm "$tarball" 37 | fi 38 | elif [[ -d $out ]]; then # directory 39 | case $CH_TEST_PACK_FMT in 40 | squash-*) 41 | ext=sqsh 42 | ;; 43 | tar-unpack) 44 | ext=tar.gz 45 | ;; 46 | *) 47 | false # unknown format 48 | ;; 49 | esac 50 | ch-convert "$out" "${out}.${ext}" 51 | else 52 | false # unknown format 53 | fi 54 | fi 55 | rm -Rf --one-file-system "$out" "$workdir" 56 | if [[ $status -eq 65 ]]; then 57 | touch "$pq" 58 | rm -Rf --one-file-system "$out".tar.{gz,xz} 59 | skip 'prerequisites not met' 60 | else 61 | return "$status" 62 | fi 63 | } 64 | -------------------------------------------------------------------------------- /test/make-auto.d/builder_to_archive.bats.in: -------------------------------------------------------------------------------- 1 | source common.bash # for ShellCheck; removed by ch-test 2 | 3 | @test 'builder to archive %(tag)s' { 4 | scope %(scope)s 5 | case $CH_TEST_PACK_FMT in 6 | squash*) 7 | ext=sqfs 8 | ;; 9 | tar-unpack) 10 | ext=tar.gz 11 | ;; 12 | *) 13 | false # unknown format 14 | ;; 15 | esac 16 | archive=${ch_tardir}/%(tag)s.${ext} 17 | ch-convert -i "$CH_TEST_BUILDER" '%(tag)s' "$archive" 18 | archive_grep "$archive" 19 | archive_ok "$archive" 20 | } 21 | -------------------------------------------------------------------------------- /test/make-auto.d/unpack.bats.in: -------------------------------------------------------------------------------- 1 | source common.bash # for ShellCheck; removed by ch-test 2 | 3 | @test 'unpack %(tag)s' { 4 | scope %(scope)s 5 | prerequisites_ok %(tag)s 6 | ch_tag=%(tag)s 7 | unpack_img_all_nodes "true" 8 | } 9 | -------------------------------------------------------------------------------- /test/old-storage: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -o pipefail 4 | 5 | 6 | ### Functions 7 | 8 | fatal_msg () { 9 | printf '💀💀💀 error: %s 💀💀💀\n' "$1" 1>&2 10 | } 11 | 12 | usage () { 13 | cat <<'EOF' 1>&2 14 | Test that ch-image can upgrade storage directories generated by old versions, 15 | i.e., unpack each old storage directory from a tarball, then try to upgrade it 16 | and run the test suite. 17 | 18 | Usage: 19 | 20 | $ old-storage.sh SCOPE WORKDIR (DIR|TAR1) [TAR2 ...] 21 | 22 | WORKDIR is where ch-image storage are unpacked. It must be empty and have 23 | enough space for one storage directory. 24 | 25 | TARn are old storage directories archived as tarballs. These must have certain 26 | properties: 27 | 28 | 1. Named storage-$VERSION.$ARCH.tar.gz, e.g. “storage-0.27.x86_64.tar.gz”. 29 | (Note: $ARCH is not currently validated but may be in the future.) 30 | 31 | 2. Is a tarbomb, e.g.: 32 | 33 | $ tar tf storage-0.26.x86_64.tar.gz | head -3 34 | ./ 35 | ./dlcache/ 36 | ./dlcache/alpine:3.9.fat.json 37 | 38 | 3. The result of: 39 | 40 | $ rm -Rf $(ch-image storage-path) && ch-test -b ch-image build 41 | 42 | or equivalent, though mostly rather than fully successful tests are fine. 43 | 44 | Note: Best practice is to generate the tarball at the time of release, 45 | because old test suites often don’t pass due to changing source images. 46 | 47 | If a directory DIR is given instead, use all tarballs in that directory that 48 | have last-modified dates less than one year in the past. (See #1507.) 49 | EOF 50 | } 51 | 52 | INFO () { 53 | printf '📣 %s\n' "$1" 54 | } 55 | 56 | 57 | ### Parse arguments & setup 58 | 59 | if [[ $1 = --help || $1 = -? ]]; then 60 | usage 61 | exit 0 62 | fi 63 | if [[ $# -lt 3 ]]; then 64 | usage 65 | exit 1 66 | fi 67 | scope=$1; shift 68 | workdir=$1; shift 69 | 70 | trap 'fatal_msg "command failed on line $LINENO"' ERR 71 | PATH=$(cd "$(dirname "$0")" && pwd)/../bin:$PATH 72 | export PATH 73 | 74 | if [[ -d $1 ]]; then 75 | oldtars=$(find "$1" -mindepth 1 -mtime -365 -print | sort) 76 | else 77 | oldtars=$(printf '%s ' "$@") # https://www.shellcheck.net/wiki/SC2124 78 | fi 79 | 80 | summary='' 81 | pass_ct=0 82 | fail_ct=0 83 | 84 | 85 | ### Main loop 86 | 87 | INFO "workdir: $workdir" 88 | for oldtar in $oldtars; do 89 | base=$(basename "$oldtar") 90 | base=${base%.*} # rm .gz 91 | base=${base%.*} # rm .tar 92 | base=${base%.*} # rm architecture 93 | storage=${workdir}/${base} 94 | 95 | INFO "old tar: $oldtar ($(stat -c %y "$oldtar"))" 96 | INFO "unpacking: $storage" 97 | [[ -d $workdir ]] 98 | [[ ! -d $storage ]] 99 | mkdir "$storage" 100 | tar xf "$oldtar" -C "$storage" 101 | [[ -d $storage ]] 102 | export CH_IMAGE_STORAGE=$storage 103 | INFO "unpacked: $(du -sh "$storage" | cut -f1) bytes in $(du --inodes -sh "$storage" | cut -f1) inodes" 104 | 105 | case ${storage#*-} in 106 | 0.29|0.30|0.31) 107 | INFO "working around bug fixed by PR #1662" 108 | (cd "$storage"/bucache && git branch -D alpine+latest) 109 | ;; 110 | esac 111 | 112 | INFO "upgrading" 113 | ch-image list 114 | # These are images that contain references to things on the internet that 115 | # go out of date, so builds based on them fail. Re-pull them to get a 116 | # current base image. 117 | ch-image pull archlinux:latest 118 | 119 | INFO "testing" 120 | if (ch-test -b ch-image --pedantic=no -s "$scope" all); then 121 | pass_ct=$((pass_ct + 1)) 122 | summary+="😁 ${oldtar}: PASS"$'\n' 123 | else 124 | fail_ct=$((fail_ct + 1)) 125 | summary+="🤦 ${oldtar}: FAIL"$'\n' 126 | fi 127 | 128 | 129 | INFO "deleting: $storage" 130 | rm -Rf --one-file-system "$storage" 131 | [[ ! -d $storage ]] 132 | done 133 | 134 | cat < 2 | #include 3 | 4 | int increment(int a); 5 | 6 | int main() 7 | { 8 | int b = 8675308; 9 | printf("libsotest says %d incremented is %d\n", b, increment(b)); 10 | exit(0); 11 | } 12 | -------------------------------------------------------------------------------- /test/unused/echo-euid.c: -------------------------------------------------------------------------------- 1 | /* This program prints the effective user ID on stdout and exits. It is useful 2 | for testing whether the setuid bit was effective. */ 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | int main(void) 9 | { 10 | printf("%u\n", geteuid()); 11 | return 0; 12 | } 13 | -------------------------------------------------------------------------------- /test/unused/su_wrap.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # This script tries to use su to gain root privileges, assuming that 4 | # /etc/shadow has been changed such that no password is required. It uses 5 | # pexpect to emulate the terminal that su requires. 6 | # 7 | # WARNING: This does not work. For example: 8 | # 9 | # $ whoami ; echo $UID EUID 10 | # reidpr 11 | # 1001 1001 12 | # $ /bin/su -c whoami 13 | # root 14 | # $ ./su_wrap.py 2>> /dev/null 15 | # SAFE escalation failed: empty password rejected 16 | # 17 | # That is, manual su can escalate without a password (and doesn't without the 18 | # /etc/shadow hack), but when this program tries to do apparently the same 19 | # thing, su wants a password. 20 | # 21 | # I have not been able to track down why this happens. I suspect that PAM has 22 | # some extra smarts about TTY that causes it to ask for a password under 23 | # pexpect. I'm leaving the code in the repository in case some future person 24 | # can figure it out. 25 | 26 | import sys 27 | import pexpect 28 | 29 | # Invoke su. This will do one of three things: 30 | # 31 | # 1. Print 'root'; the escalation was successful. 32 | # 2. Ask for a password; the escalation was unsuccessful. 33 | # 3. Something else; this is an error. 34 | # 35 | p = pexpect.spawn('/bin/su', ['-c', 'whoami'], timeout=5, 36 | encoding='UTF-8', logfile=sys.stderr) 37 | i = p.expect_exact(['root', 'Password:']) 38 | try: 39 | if (i == 0): # printed "root" 40 | print('RISK\tescalation successful: no password requested') 41 | elif (i == 1): # asked for password 42 | p.sendline() # try empty password 43 | i = p.expect_exact(['root', 'Authentication failure']) 44 | if (i == 0): # printed "root" 45 | print('RISK\tescalation successful: empty password accepted') 46 | elif (i == 1): # explicit failure 47 | print('SAFE\tescalation failed: empty password rejected') 48 | else: 49 | assert False 50 | else: 51 | assert False 52 | except p.EOF: 53 | print('ERROR\tsu exited unexpectedly') 54 | except p.TIMEOUT: 55 | print('ERROR\ttimed out waiting for su') 56 | except AssertionError: 57 | print('ERROR\tassertion failed') 58 | -------------------------------------------------------------------------------- /test/whiteout: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # This Python script produces (on stdout) a Dockerfile that produces a large 4 | # number of whiteouts. At the end, the Dockerfile prints some output that can 5 | # be compared with the flattened image. The purpose is to test whiteout 6 | # interpretation during flattening. 7 | # 8 | # See: https://github.com/opencontainers/image-spec/blob/master/layer.md 9 | # 10 | # There are a few factors to consider: 11 | # 12 | # * files vs. directories 13 | # * white-out explicit files vs. everything in a directory 14 | # * restore the files vs. not (in the same layer as deletion) 15 | # 16 | # Currently, we don't do recursion, operating only on the specified directory. 17 | # We do this at two different levels in the directory tree. 18 | # 19 | # It's easy to bump into the 127-layer limit with this script. 20 | # 21 | # To build and push: 22 | # 23 | # $ version=2020-01-09 # use today's date 24 | # $ sudo docker login # if needed 25 | # $ ./whiteout | sudo docker build -t whiteout -f - . 26 | # $ sudo docker tag whiteout:latest charliecloud/whiteout:$version 27 | # $ sudo docker images | fgrep whiteout 28 | # $ sudo docker push charliecloud/whiteout:$version 29 | # 30 | # Then your new image will be at: 31 | # 32 | # https://hub.docker.com/repository/docker/charliecloud/whiteout 33 | 34 | 35 | import sys 36 | 37 | INF = 99 38 | 39 | 40 | def discotheque(prefix, et): 41 | if (et == "file"): 42 | mk_cmd = "echo orig > %s" 43 | rm_cmd = "rm %s" 44 | rt_cmd = "echo rest > %s" 45 | elif (et == "dir"): 46 | mk_cmd = "mkdir -p %s/orig" 47 | rm_cmd = "rm -Rf %s" 48 | rt_cmd = "mkdir -p %s/rest" 49 | for mk_ct in [0, 1, 2]: 50 | for rm_ct in [0, 1, INF]: 51 | if ( (rm_ct == INF and mk_ct == 0) 52 | or (rm_ct != INF and rm_ct > mk_ct)): 53 | continue 54 | for rt_ct in [0, 1, 2]: 55 | if (rt_ct > rm_ct or rt_ct > mk_ct): 56 | continue 57 | base = "%s/%s_mk-%d_rm-%d_rt-%d" % (prefix, et, mk_ct, rm_ct, rt_ct) 58 | mks = ["mkdir %s" % base] 59 | rms = [] 60 | print("") 61 | for mk in range(mk_ct): 62 | mks.append(mk_cmd % ("%s/%d" % (base, mk))) 63 | if (rm_ct == INF): 64 | rms.append(rm_cmd % ("%s/*" % base)) 65 | else: 66 | for rm in range(rm_ct): 67 | rms.append(rm_cmd % ("%s/%d" % (base, rm))) 68 | for rt in range(rt_ct): 69 | rms.append(rt_cmd % ("%s/%d" % (base, rt))) 70 | if (len(mks) > 0): 71 | print("RUN " + " && ".join(mks)) 72 | if (len(rms) > 0): 73 | print("RUN " + " && ".join(rms)) 74 | 75 | 76 | print("FROM alpine:3.17") 77 | 78 | print("RUN mkdir /w /w/v") 79 | 80 | discotheque("/w", "file") 81 | discotheque("/w", "dir") 82 | discotheque("/w/v", "file") 83 | discotheque("/w/v", "dir") 84 | 85 | print("") 86 | print("RUN ls -aR /w") 87 | print("RUN find /w -type f -exec sh -c 'printf \"{} \" && cat {}' \; | sort") 88 | --------------------------------------------------------------------------------