├── .github ├── actions │ └── docker-images-verification │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── action.yml │ │ └── entrypoint.sh └── workflows │ └── docker-image-verify.yml ├── .gitignore ├── LICENSE ├── README.md ├── cleanup.py ├── cvmfs-singularity-sync ├── cvmfs-singularity-sync.service ├── cvmfs-singularity-sync.timer ├── docker-creds.example.json ├── docker_images.txt ├── dockerhub.py ├── osg-wn-nightly-build ├── osg-wn-nightly-build.service └── requirements.txt /.github/actions/docker-images-verification/Dockerfile: -------------------------------------------------------------------------------- 1 | # Container image that runs your code 2 | #FROM alpine:3.10 3 | FROM python 4 | 5 | RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py 6 | RUN python get-pip.py 7 | RUN pip install requests furl sqlitedict 8 | # Copies your code file from your action repository to the filesystem path `/` of the container 9 | 10 | COPY entrypoint.sh /entrypoint.sh 11 | 12 | # Code file to execute when the docker container starts up (`entrypoint.sh`) 13 | ENTRYPOINT ["/entrypoint.sh"] 14 | -------------------------------------------------------------------------------- /.github/actions/docker-images-verification/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 GitHub Actions 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/actions/docker-images-verification/README.md: -------------------------------------------------------------------------------- 1 | # Docker image text file verification docker action 2 | 3 | This action verifies wheather the images in docker_iamges.txt file is valid, by requesting their manifests. 4 | 5 | ```yaml 6 | uses: ./.github/actions/docker-images-verification@master 7 | ``` 8 | -------------------------------------------------------------------------------- /.github/actions/docker-images-verification/action.yml: -------------------------------------------------------------------------------- 1 | name: 'Docker images verification' 2 | description: 'Verify docker images by requesting their manifests' 3 | 4 | runs: 5 | using: 'docker' 6 | image: 'Dockerfile' 7 | -------------------------------------------------------------------------------- /.github/actions/docker-images-verification/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -l 2 | 3 | ./cvmfs-singularity-sync --images-file ./docker_images.txt --dry-run 4 | -------------------------------------------------------------------------------- /.github/workflows/docker-image-verify.yml: -------------------------------------------------------------------------------- 1 | name: Docker images verification 2 | # This workflow is triggered on pushes and pull requests to the repository. 3 | on: [push, pull_request] 4 | 5 | 6 | jobs: 7 | build: 8 | # This job runs on Linux 9 | runs-on: ubuntu-latest 10 | 11 | steps: 12 | - uses: actions/checkout@v1 13 | - uses: ./.github/actions/docker-images-verification 14 | with: 15 | username: ${{ github.actor }} 16 | password: ${{ secrets.GITGUB_PAT }} 17 | registry: ghcr.io 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | tokenfile 2 | __pycache__/ 3 | cleanup.py.bak 4 | cleanup.pyc 5 | cvmfs-singularity-sync.bak 6 | cvmfs-singularity-syncc 7 | dockerhub.py.bak 8 | dockerhub.pyc 9 | docker-creds.json 10 | johntest 11 | johntest2 12 | venv/ 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## CVMFS Singularly Image Repository 2 | 3 | To add images to the repository for use on OSG, follow the instructions on the 4 | documentation pages below: 5 | 6 | - [Containers Overview](https://portal.osg-htc.org/documentation/htc_workloads/using_software/containers-singularity/) 7 | - [Containers - Docker](https://portal.osg-htc.org/documentation/htc_workloads/using_software/containers-docker/) 8 | -------------------------------------------------------------------------------- /cleanup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | Cleanup for Singularity container 4 | 5 | Scan the images in the singularity CVMFS. If an image directory has not been "linked" to for 2 days, 6 | remove the image directory. 7 | 8 | Maintains state in a file in the root singularity directory named .missing_links.json 9 | 10 | """ 11 | import glob 12 | import os 13 | import json 14 | import shutil 15 | import argparse 16 | import time 17 | from datetime import datetime, timedelta 18 | 19 | # JSON structure: 20 | # { 21 | # "missing_links": { 22 | # "/cvmfs/singularity.opensciencegrid.org/.images/7d/ba009871baa50e01d655a80f79728800401bbd0f5e7e18b5055839e713c09f": "" 23 | # ... 24 | # } 25 | # } 26 | 27 | def remove_unlisted_images(current_images, singularity_base, test=False): 28 | """ 29 | Remove the images that are not in the current list 30 | """ 31 | # Get all the image paths 32 | named_image_dirs = set() 33 | for subdir, dirs, files in os.walk(singularity_base): 34 | try: 35 | images_index = dirs.index(".images") 36 | del dirs[images_index] 37 | except ValueError as ve: 38 | pass 39 | for directory in dirs: 40 | path = os.path.join(subdir, directory) 41 | if os.path.islink(path): 42 | named_image_dirs.add(path) 43 | 44 | # Compare the list of current images with the list of images from the FS 45 | for image in current_images: 46 | # Always has the registry as the first entry, remove it 47 | image_dir = image.split('/', 1)[-1] 48 | full_image_dir = os.path.join(singularity_base, image_dir) 49 | if full_image_dir in named_image_dirs: 50 | named_image_dirs.remove(full_image_dir) 51 | 52 | # named_image_dirs should now only contain containers that are 53 | # not in the images 54 | for image_dir in named_image_dirs: 55 | print("Removing deleted image: %s" % image_dir) 56 | if not test: 57 | try: 58 | os.unlink(image_dir) 59 | except OSError as e: 60 | print("Failed to remove deleted image: %s" % e) 61 | 62 | 63 | 64 | def cleanup(delay=2, test=False, 65 | singularity_base='/cvmfs/singularity.opensciencegrid.org', 66 | max_per_cycle=50): 67 | '''Clean up unlinked singularity images''' 68 | json_location = os.path.join(singularity_base, '.missing_links.json') 69 | # Read in the old json, if it exists 70 | json_missing_links = {} 71 | try: 72 | with open(json_location) as json_file: 73 | json_missing_links = json.load(json_file)['missing_links'] 74 | except (IOError, ValueError): 75 | # File is missing, unreadable, or damaged 76 | pass 77 | 78 | # Get all the images in the repo 79 | 80 | # Walk the directory /cvmfs/singularity.opensciencegrid.org/.images/* 81 | image_dirs = glob.glob(os.path.join(singularity_base, '.images/*/*')) 82 | 83 | # Walk the named image dirs 84 | named_image_dirs = [] 85 | for subdir, dirs, files in os.walk(singularity_base): 86 | try: 87 | images_index = dirs.index(".images") 88 | del dirs[images_index] 89 | except ValueError as ve: 90 | pass 91 | for directory in dirs: 92 | path = os.path.join(subdir, directory) 93 | if os.path.islink(path): 94 | named_image_dirs.append(path) 95 | 96 | # For named image dir, look at the what the symlink points at 97 | for named_image in named_image_dirs: 98 | link_target = os.readlink(named_image) 99 | while link_target in image_dirs: 100 | image_dirs.remove(link_target) 101 | # Remove linked image from json (in case link is restored) 102 | json_missing_links.pop(link_target, None) 103 | 104 | # Now, for each image, see if it's in the json 105 | for image_dir in image_dirs: 106 | if image_dir not in json_missing_links: 107 | # Add it to the json 108 | print("Newly found missing link: %s" % (image_dir)) 109 | json_missing_links[image_dir] = int(time.time()) 110 | 111 | # Loop through the json missing links, removing directories if over the `delay` days 112 | expiry = datetime.now() - timedelta(days=delay) 113 | images_removed = 0 114 | for image_dir, last_linked in list(json_missing_links.items()): 115 | date_last_linked = datetime.fromtimestamp(last_linked) 116 | if date_last_linked < expiry: 117 | # Confirm that we're inside the managed directory 118 | if not image_dir.startswith(singularity_base): 119 | continue 120 | # Remove the directory 121 | print("Removing missing link: %s" % image_dir) 122 | if not test: 123 | try: 124 | shutil.rmtree(image_dir) 125 | del json_missing_links[image_dir] 126 | except OSError as e: 127 | print("Failed to remove missing link: %s" % e) 128 | 129 | images_removed += 1 130 | if images_removed >= max_per_cycle: 131 | print("Reached limit of cleaning %d images. Stopping cleanup cycle." % images_removed) 132 | break 133 | 134 | # Write out the end json 135 | with open(json_location, 'w') as json_file: 136 | json.dump({"missing_links": json_missing_links}, json_file) 137 | 138 | def main(): 139 | '''Main function''' 140 | args = parse_args() 141 | cleanup(test=args.test) 142 | 143 | def parse_args(): 144 | '''Parse CLI options''' 145 | parser = argparse.ArgumentParser() 146 | 147 | parser.add_argument('--test', action='store_true', 148 | help="Don't remove files, but go through the motions of removing them.") 149 | return parser.parse_args() 150 | 151 | if __name__ == "__main__": 152 | main() 153 | -------------------------------------------------------------------------------- /cvmfs-singularity-sync: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # NOTE: 4 | # This file is forked from singularity 2.2's "cli.py". I have left the 5 | # copyright notice below untouched; this file remains under the same license. 6 | # - Brian Bockelman 7 | 8 | ''' 9 | 10 | bootstrap.py: python helper for Singularity command line tool 11 | 12 | Copyright (c) 2016, Vanessa Sochat. All rights reserved. 13 | 14 | "Singularity" Copyright (c) 2016, The Regents of the University of California, 15 | through Lawrence Berkeley National Laboratory (subject to receipt of any 16 | required approvals from the U.S. Dept. of Energy). All rights reserved. 17 | 18 | This software is licensed under a customized 3-clause BSD license. Please 19 | consult LICENSE file distributed with the sources of this project regarding 20 | your rights to use or distribute this software. 21 | 22 | NOTICE. This Software was developed under funding from the U.S. Department of 23 | Energy and the U.S. Government consequently retains certain rights. As such, 24 | the U.S. Government has been granted for itself and others acting on its 25 | behalf a paid-up, nonexclusive, irrevocable, worldwide license in the Software 26 | to reproduce, distribute copies to the public, prepare derivative works, and 27 | perform publicly and display publicly, and to permit other to do so. 28 | 29 | 30 | ''' 31 | 32 | import sys 33 | import argparse 34 | import os 35 | import errno 36 | import fnmatch 37 | import json 38 | import urllib.request, urllib.error, urllib.parse 39 | import hashlib 40 | import traceback 41 | import subprocess 42 | import dockerhub 43 | import cleanup 44 | import sqlitedict 45 | import glob 46 | 47 | DOCKER_CREDS = dict() 48 | 49 | def main(): 50 | parser = argparse.ArgumentParser(description="Bootstrap Docker images for Singularity containers deployed to CVMFS") 51 | 52 | # Name of the docker image, required 53 | parser.add_argument("--docker", 54 | dest='docker', 55 | help="name of Docker image to bootstrap, in format library/ubuntu:latest", 56 | type=str, 57 | default=None) 58 | 59 | # Link to the file of the docker images, required 60 | parser.add_argument("--images-url", 61 | dest='filelist', 62 | help="URL to download a list of Docker images from", 63 | type=str, 64 | default=None) 65 | 66 | # Name of the docker image file, required 67 | parser.add_argument("--images-file", 68 | dest='filelist_path', 69 | help="Local file path to a list of Docker images", 70 | type=str, 71 | default=None) 72 | 73 | # root file system of singularity image 74 | parser.add_argument("--rootfs", 75 | dest='rootfs', 76 | help="the path for the root filesystem to extract to", 77 | type=str, 78 | default=None) 79 | 80 | # Manifest cache file 81 | parser.add_argument("--sqlite-cache", 82 | help="SQLite database to cache Docker manifests", 83 | type=str, 84 | default=None) 85 | 86 | # Docker registry (default is registry-1.docker.io) 87 | parser.add_argument("--registry", 88 | dest='registry', 89 | help="the registry path to use, to replace registry-1.docker.io", 90 | type=str, 91 | default=None) 92 | 93 | # Flag to indicate a token is not required 94 | parser.add_argument("--no-token", 95 | dest='notoken', 96 | action="store_true", 97 | help="Indicate that auth is not required", 98 | default=False) 99 | 100 | # File with access token 101 | parser.add_argument("--docker-creds", 102 | help="JSON file with Docker credentials", 103 | type=argparse.FileType('r')) 104 | 105 | # Flag to indicate dry-run 106 | parser.add_argument("--dry-run", 107 | dest='dryrun', 108 | action="store_true", 109 | help="Indicate that this is a dry-run", 110 | default=False) 111 | 112 | try: 113 | args = parser.parse_args() 114 | except: 115 | parser.print_help() 116 | sys.exit(0) 117 | 118 | if args.docker_creds: 119 | global DOCKER_CREDS 120 | DOCKER_CREDS = json.load(args.docker_creds) 121 | args.docker_creds.close() 122 | 123 | manifest_cache = sqlitedict.SqliteDict(args.sqlite_cache, tablename='manifests', autocommit=True) 124 | 125 | # Find root filesystem location 126 | if args.rootfs: 127 | singularity_rootfs = args.rootfs 128 | else: 129 | singularity_rootfs = '/cvmfs/singularity.opensciencegrid.org' 130 | singularity_rootfs = os.path.abspath(singularity_rootfs) 131 | 132 | # Does the registry require a token? 133 | doauth = not args.notoken 134 | 135 | # Do we have a docker image specified? 136 | if not args.docker and not (args.filelist or args.filelist_path): 137 | print("No docker image or file list specified..", file=sys.stderr) 138 | return 1 139 | 140 | currentImages = [] 141 | if args.docker: 142 | image = args.docker 143 | if not args.dryrun: 144 | return publish_image(image, singularity_rootfs, args.registry, doauth, manifest_cache) 145 | else: 146 | return verify_image(image, args.registry, doauth, manifest_cache) 147 | else: 148 | final_retval = 0 149 | failed_images = [] 150 | #fp = urllib.request.urlopen(args.filelist).read().decode().split('\n') 151 | if args.filelist: 152 | fp = urllib.request.urlopen(args.filelist).read().decode().split('\n') 153 | else: 154 | if args.filelist_path: 155 | fp = open(args.filelist_path, "r").read().split('\n') 156 | for image in fp: 157 | image = image.strip() 158 | if image.startswith("#") or not image: 159 | continue 160 | registry, namespace, repo_name, repo_tag = parse_image(image) 161 | print("Working on image: {}".format(image)) 162 | 163 | if '*' in repo_tag: # Treat wildcards as a glob 164 | try: 165 | tag_names = get_tags(namespace, repo_name, registry=registry, auth=doauth) 166 | except Exception as ex: 167 | image = '%s/%s/%s' % (registry, namespace, repo_name) 168 | print("Failed to get tags for image: {}".format(image)) 169 | traceback.print_exc() 170 | print(ex) 171 | glob_path = os.path.join(singularity_rootfs, namespace, repo_name + ":" + repo_tag) 172 | existing_images = glob.glob(glob_path) 173 | 174 | # Remove the prepended singularity_rootfs and slash 175 | existing_images = [x[len(singularity_rootfs)+1:] for x in existing_images] 176 | for existing_image in existing_images: 177 | registry, namespace, repo_name, repo_tag = parse_image(existing_image) 178 | currentImages.append('%s/%s/%s:%s' % (registry, namespace, repo_name, repo_tag)) 179 | 180 | continue 181 | repo_tag = fnmatch.filter(tag_names, repo_tag) 182 | else: 183 | repo_tag = [repo_tag] 184 | 185 | for tag in repo_tag: 186 | image = '%s/%s/%s:%s' % (registry, namespace, repo_name, tag) 187 | currentImages.append(image) 188 | retval = 1 189 | tries = 3 190 | for i in range(tries): 191 | if not args.dryrun: 192 | try: 193 | retval = publish_image(image, singularity_rootfs, registry, doauth, manifest_cache) 194 | except Exception as ex: 195 | if i < tries -1: 196 | print("Failed to publish image: {}".format(image)) 197 | traceback.print_exc() 198 | print("Retry image {}".format(image)) 199 | continue 200 | else: 201 | print("Tried {} times ".format(tries) + "for image {}".format(image) + ", giving up") 202 | else: 203 | try: 204 | retval = verify_image(image, registry, doauth, manifest_cache) 205 | except Exception as ex: 206 | if i < tries -1: 207 | print("Failed to verify image: {}".format(image)) 208 | traceback.print_exc() 209 | print("Retry image {}".format(image)) 210 | continue 211 | else: 212 | print("Tried {} times ".format(tries) + "for image {}".format(image)) 213 | break 214 | if retval: 215 | final_retval = retval 216 | failed_images.append("{}".format(image)) 217 | print("All requested images have been attempted; final return code: %d" % final_retval) 218 | if final_retval: 219 | print("Failed images:") 220 | print(*failed_images, sep = "\n") 221 | if not args.dryrun: 222 | print("Cleaning up unlinked images") 223 | start_txn(singularity_rootfs) 224 | cleanup.remove_unlisted_images(currentImages, singularity_rootfs) 225 | cleanup.cleanup(singularity_base=singularity_rootfs) 226 | publish_txn() 227 | return final_retval 228 | 229 | 230 | _in_txn = False 231 | def start_txn(singularity_rootfs): 232 | global _in_txn 233 | if _in_txn: 234 | return 0 235 | if not singularity_rootfs.startswith("/cvmfs/singularity.opensciencegrid.org"): 236 | return 0 237 | if os.path.exists("/var/spool/cvmfs/singularity.opensciencegrid.org/in_transaction.lock"): 238 | result = os.system("cvmfs_server abort -f singularity.opensciencegrid.org") 239 | if result: 240 | print("Failed to abort lingering transaction (exit status %d)." % result, file=sys.stderr) 241 | return 1 242 | result = os.system("cvmfs_server transaction singularity.opensciencegrid.org") 243 | if result: 244 | print("Transaction start failed (exit status %d); will not attempt update." % result, file=sys.stderr) 245 | return 1 246 | _in_txn = True 247 | 248 | # Test CVMFS mount if applicable. 249 | test_dir = os.path.join(singularity_rootfs, "library") 250 | if not os.path.exists(test_dir): 251 | try: 252 | os.makedirs(test_dir) 253 | except OSError as oe: 254 | if oe.errno != errno.EEXIST: 255 | raise 256 | 257 | 258 | def get_tags(username, repo, registry=None, auth=None): 259 | if registry != "registry.hub.docker.com": 260 | if "://" not in registry: 261 | registry = "https://%s" % registry 262 | auth = DOCKER_CREDS.get(registry, {}) 263 | hub = dockerhub.DockerHub(url=registry, namespace=username, repo=repo, **auth) 264 | else: 265 | auth = DOCKER_CREDS.get('https://registry.hub.docker.com', {}) 266 | hub = dockerhub.DockerHub(**auth) 267 | tag_names = [] 268 | for tag in hub.tags(username, repo): 269 | tag_names.append(tag['name']) 270 | return tag_names 271 | 272 | def publish_txn(): 273 | global _in_txn 274 | if _in_txn: 275 | _in_txn = False 276 | return os.system("cvmfs_server publish singularity.opensciencegrid.org") 277 | return 0 278 | 279 | 280 | def make_final_symlink(image_dir, singularity_rootfs, namespace, repo_name, repo_tag): 281 | """ 282 | Create symlink: $ROOTFS/.images/$HASH -> $ROOTFS/$NAMESPACE/$IMAGE:$TAG 283 | """ 284 | final_path = os.path.join(singularity_rootfs, namespace, "%s:%s" % (repo_name, repo_tag)) 285 | final_dir = os.path.split(final_path)[0] 286 | if not os.path.exists(final_dir): 287 | retval = start_txn(singularity_rootfs) 288 | if retval: 289 | return retval 290 | try: 291 | os.makedirs(final_dir) 292 | except OSError as oe: 293 | if oe.errno != errno.EEXIST: 294 | raise 295 | if os.path.exists(final_path): 296 | # Final symlink exists and is already correct. 297 | link_value = os.readlink(final_path) 298 | if link_value == image_dir: 299 | print("Image is already latest revision.") 300 | return 0 301 | # Otherwise, clear the symlink; we will recreate. Since CVMFS is transactional, 302 | # we don't care that an unlink / symlink is not atomic. 303 | retval = start_txn(singularity_rootfs) 304 | if retval: 305 | return retval 306 | os.unlink(final_path) 307 | retval = start_txn(singularity_rootfs) 308 | if retval: 309 | return retval 310 | os.symlink(image_dir, final_path) 311 | return 0 312 | 313 | 314 | def parse_image(image): 315 | """ 316 | Parse an image string into image components, setting appropriate defaults. 317 | Returns a tuple of (registry, namespace, repo_name, repo_tag). 318 | """ 319 | 320 | # INPUT PARSING ------------------------------------------- 321 | # Parse registry, image name, repo name, and namespace 322 | # Support different formats: 323 | # opensciencegrid/osgvo-julia:latest 324 | # opensciencegrid/osgvo-ants 325 | # openjdk:8 326 | # containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.53:stretch 327 | registry = "registry.hub.docker.com" 328 | # First split the docker image name by / 329 | split_image = image.split('/') 330 | 331 | # If there are two parts, we have namespace with repo (and maybe tab) 332 | if len(split_image) == 2: 333 | namespace = split_image[0] 334 | image = split_image[1] 335 | elif len(split_image) > 2: 336 | # We have a custom registry 337 | registry = split_image[0] 338 | #print("Custom registry:", registry) 339 | namespace = split_image[1] 340 | image = "/".join(split_image[2:]) 341 | # Otherwise, we must be using library namespace 342 | else: 343 | namespace = "library" 344 | image = split_image[0] 345 | 346 | # Now split the docker image name by : 347 | image = image.split(':') 348 | if len(image) == 2: 349 | repo_name = image[0] 350 | repo_tag = image[1] 351 | 352 | # Otherwise, assume latest of an image 353 | else: 354 | repo_name = image[0] 355 | repo_tag = "latest" 356 | 357 | return registry, namespace, repo_name, repo_tag 358 | 359 | def get_manifest(hub, namespace, repo_name, repo_tag, manifest_cache): 360 | metadata = hub.manifest(namespace, repo_name, repo_tag, head=True) 361 | digest = metadata.headers['docker-content-digest'] 362 | 363 | if digest in manifest_cache: 364 | return manifest_cache[digest], digest 365 | else: 366 | manifest = hub.manifest(namespace, repo_name, repo_tag) 367 | manifest_cache[digest] = manifest 368 | return manifest, digest 369 | 370 | def publish_image(image, singularity_rootfs, registry, doauth, manifest_cache): 371 | 372 | # Tell the user the namespace, repo name and tag 373 | registry, namespace, repo_name, repo_tag = parse_image(image) 374 | print("Docker image to publish: %s/%s/%s:%s" % (registry, namespace, repo_name, repo_tag)) 375 | 376 | 377 | # IMAGE METADATA ------------------------------------------- 378 | # Use Docker Registry API (version 2.0) to get images ids, manifest 379 | 380 | # Get an image manifest - has image ids to parse, and will be 381 | # used later to get Cmd 382 | # Prepend "https://" to the registry 383 | if "://" not in registry: 384 | registry = "https://%s" % registry 385 | auth = DOCKER_CREDS.get(registry, {}) 386 | hub = dockerhub.DockerHub(url=registry, namespace=namespace, repo=repo_name, **auth) 387 | manifest, digest = get_manifest(hub, namespace, repo_name, repo_tag, manifest_cache) 388 | 389 | # Calculate a unique hash across all layers. We'll use that as the identifier 390 | # for the final image. 391 | if manifest['schemaVersion'] == 1 and 'fsLayers' in manifest: 392 | hasher = hashlib.sha256() 393 | for layerinfo in manifest['fsLayers']: 394 | layer_hash = layerinfo['blobSum'].split(":")[-1].encode('utf-8') 395 | hasher.update(layer_hash) 396 | image_hash = hasher.hexdigest() 397 | elif manifest['schemaVersion'] == 2 and 'layers' in manifest: 398 | hasher = hashlib.sha256() 399 | for layerinfo in manifest['layers']: 400 | layer_hash = layerinfo['digest'].split(":")[-1].encode('utf-8') 401 | hasher.update(layer_hash) 402 | image_hash = hasher.hexdigest() 403 | else: 404 | # Use the Docker-Content-Digest to identify the image 405 | image_hash = digest 406 | 407 | # The image is staged to $ROOTFS/.images/$HASH 408 | # Docker-Content-Digest uses format 'sha256:dead...beef' 409 | # Split it after the hash type ^^^^^^^^^ 410 | # plus two chars ^^^^^^^^^ 411 | sep = image_hash.find(':') 412 | image_parentdir = os.path.join(singularity_rootfs, ".images", image_hash[0:sep+3]) 413 | image_dir = os.path.join(image_parentdir, image_hash[sep+3:]) 414 | 415 | # If the image has already been staged, simply return. 416 | if os.path.exists(image_dir): 417 | make_final_symlink(image_dir, singularity_rootfs, namespace, repo_name, repo_tag) 418 | return publish_txn() 419 | else: 420 | print("Image dir, %s, does not exist; triggering CVMFS mount." % image_dir) 421 | retval = start_txn(singularity_rootfs) 422 | if os.path.exists(image_dir): # Same as above 423 | make_final_symlink(image_dir, singularity_rootfs, namespace, repo_name, repo_tag) 424 | return publish_txn() 425 | if retval: 426 | return retval 427 | try: 428 | # Create the parent directory, but not the image directory itself 429 | # Singularity will create the image directory on successful conversion 430 | os.makedirs(image_parentdir) 431 | except OSError as oe: 432 | if oe.errno != errno.EEXIST: 433 | raise 434 | 435 | # DOWNLOAD IMAGE -------------------------------------------- 436 | # Use /var/tmp for singularity temporary files 437 | singularity_env = os.environ.copy() 438 | singularity_env['TMPDIR'] = '/var/tmp' 439 | 440 | if 'username' in auth: 441 | singularity_env['SINGULARITY_DOCKER_USERNAME'] = auth['username'] 442 | if 'password' in auth: 443 | singularity_env['SINGULARITY_DOCKER_PASSWORD'] = auth['password'] 444 | 445 | print("Calling singularity to build sandbox from image") 446 | subprocess.check_call( 447 | ['singularity', '--silent', 'build', 448 | '--disable-cache=true', # Images are only downloaded once 449 | '--force', # Don't get stuck at a prompt if the target somehow exists 450 | '--fix-perms', 451 | '--sandbox', 452 | image_dir, 'docker://' + image], 453 | env=singularity_env) 454 | 455 | # Various fixups to make the image compatible with CVMFS and singularity. 456 | srv = os.path.join(image_dir, "srv") 457 | cvmfs = os.path.join(image_dir, "cvmfs") 458 | if not os.path.exists(srv): 459 | os.makedirs(srv) 460 | if not os.path.exists(cvmfs): 461 | os.makedirs(cvmfs) 462 | 463 | make_final_symlink(image_dir, singularity_rootfs, namespace, repo_name, repo_tag) 464 | # Publish CVMFS as necessary. 465 | return publish_txn() 466 | 467 | def verify_image(image, registry, doauth, manifest_cache): 468 | 469 | # Tell the user the namespace, repo name and tag 470 | registry, namespace, repo_name, repo_tag = parse_image(image) 471 | print("Docker image to verify: %s/%s/%s:%s" % (registry, namespace, repo_name, repo_tag)) 472 | 473 | # IMAGE METADATA ------------------------------------------- 474 | # Use Docker Registry API (version 2.0) to get images ids, manifest 475 | 476 | # Get an image manifest - has image ids to parse, and will be 477 | # used later to get Cmd 478 | # Prepend "https://" to the registry 479 | if "://" not in registry: 480 | registry = "https://%s" % registry 481 | auth = DOCKER_CREDS.get(registry, {}) 482 | hub = dockerhub.DockerHub(url=registry, namespace=namespace, repo=repo_name, **auth) 483 | retval = 0 484 | try: 485 | hub.manifest(namespace, repo_name, repo_tag, head=True) 486 | print(repo_name + ":" + repo_tag + " manifest found") 487 | retval = 0 488 | except Exception as ex: 489 | print(repo_name + ":" + repo_tag + " manifest not found") 490 | traceback.print_exc() 491 | retval = 1 492 | raise 493 | return retval 494 | 495 | if __name__ == '__main__': 496 | sys.exit(main()) 497 | -------------------------------------------------------------------------------- /cvmfs-singularity-sync.service: -------------------------------------------------------------------------------- 1 | 2 | [Unit] 3 | Description=singularity.opensciencegrid.org repo update 4 | After=network.target cvmfs-singularity.opensciencegrid.org.mount 5 | Wants=network.target cvmfs-singularity.opensciencegrid.org.mount 6 | 7 | [Service] 8 | ExecStart=/home/cse496/bbockelm/cvmfs-singularity-sync/cvmfs-singularity-sync --images-url https://raw.githubusercontent.com/opensciencegrid/cvmfs-singularity-sync/master/docker_images.txt 9 | WorkingDirectory=/home/cse496/bbockelm/cvmfs-singularity-sync 10 | #RestartSec=15min 11 | #Restart=always 12 | User=bbockelm 13 | Nice=19 14 | LimitNOFILE=65536 15 | 16 | # Make sure python sends its stderr immediately. 17 | Environment=PYTHONUNBUFFERED=1 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | 22 | -------------------------------------------------------------------------------- /cvmfs-singularity-sync.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=restart cvmfs-singularity-sync every 15 minutes 3 | After=syslog.target network.target 4 | Wants=network.target 5 | 6 | [Timer] 7 | OnActiveSec=0s 8 | OnUnitActiveSec=15m 9 | RandomizedDelaySec=1m 10 | 11 | [Install] 12 | WantedBy=timers.target 13 | -------------------------------------------------------------------------------- /docker-creds.example.json: -------------------------------------------------------------------------------- 1 | { 2 | "https://registry.hub.docker.com": { 3 | "username": "janedoe", 4 | "password": "hunter2" 5 | } 6 | } 7 | 8 | -------------------------------------------------------------------------------- /docker_images.txt: -------------------------------------------------------------------------------- 1 | # This file is a list of Docker images to synchronize to singularity.opensciencegrid.org. 2 | 3 | # Fairly common Linux distros 4 | debian:latest 5 | debian:stable 6 | debian:testing 7 | debian:unstable 8 | ubuntu:latest 9 | fedora:latest 10 | rockylinux:8 11 | 12 | # Common programming environments 13 | python:latest 14 | python:3.4 15 | python:2.7 16 | openjdk:latest 17 | openjdk:8 18 | openjdk:9 19 | gcc:latest 20 | julia:latest 21 | r-base:latest 22 | continuumio/anaconda 23 | gromacs/gromacs:* 24 | rocker/r-ver:latest 25 | lammps/lammps:stable_29Sep2021_centos7_openmpi_py3 26 | pytorch/pytorch:1.12.1-cuda11.3-cudnn8-devel 27 | pytorch/pytorch:1.13.0-cuda11.6-cudnn8-devel 28 | nvidia/opencl:runtime-ubuntu16.04 29 | 30 | # OSG-specific worker node images 31 | opensciencegrid/osg-3.3-wn-el6 32 | opensciencegrid/osg-3.3-wn-el7 33 | opensciencegrid/osg-wn:3.3-el7 34 | opensciencegrid/osg-wn:3.3-el6 35 | 36 | # OSG-specific images oriented to developers and testers 37 | opensciencegrid/osg-wn:3.3-testing-el7 38 | opensciencegrid/osg-wn:3.3-devel-el7 39 | 40 | # htc/ - this are the images replacing the osgvo-* ones 41 | hub.opensciencegrid.org/htc/centos:7 42 | hub.opensciencegrid.org/htc/debian:12 43 | hub.opensciencegrid.org/htc/deeplabcut:3.0.0rc4 44 | hub.opensciencegrid.org/htc/gromacs:2023.4 45 | hub.opensciencegrid.org/htc/gromacs:2024.2 46 | hub.opensciencegrid.org/htc/matlab-runtime:R2023a 47 | hub.opensciencegrid.org/htc/minimal:0 48 | hub.opensciencegrid.org/htc/pytorch:2.3.1-cuda11.8 49 | hub.opensciencegrid.org/htc/rocky:8 50 | hub.opensciencegrid.org/htc/rocky:9 51 | hub.opensciencegrid.org/htc/rocky:8-cuda-11.0.3 52 | hub.opensciencegrid.org/htc/rocky:9-cuda-12.6.0 53 | hub.opensciencegrid.org/htc/scikit-learn:1.3 54 | hub.opensciencegrid.org/htc/tensorflow:2.15 55 | hub.opensciencegrid.org/htc/ubuntu:20.04 56 | hub.opensciencegrid.org/htc/ubuntu:22.04 57 | hub.opensciencegrid.org/htc/ubuntu:24.04 58 | 59 | # OSGVO 60 | opensciencegrid/npjoodi 61 | opensciencegrid/osgvo-ants 62 | opensciencegrid/osgvo-beagle 63 | opensciencegrid/osgvo-biomedinfo 64 | opensciencegrid/osgvo-blaylockbk 65 | opensciencegrid/osgvo-debian-10:latest 66 | opensciencegrid/osgvo-el6 67 | opensciencegrid/osgvo-el6-cuda 68 | opensciencegrid/osgvo-el6-feltus 69 | opensciencegrid/osgvo-el7 70 | opensciencegrid/osgvo-el7-cuda10:* 71 | opensciencegrid/osgvo-el8 72 | opensciencegrid/osgvo-el9 73 | opensciencegrid/osgvo-freesurfer:* 74 | opensciencegrid/osgvo-gromacs:* 75 | opensciencegrid/osgvo-gromacs-gpu 76 | opensciencegrid/osgvo-gromacs-plumed 77 | opensciencegrid/osgvo-julia:* 78 | opensciencegrid/osgvo-matlab-runtime:* 79 | opensciencegrid/osgvo-opensim:latest 80 | opensciencegrid/osgvo-psi4:latest 81 | opensciencegrid/osgvo-quantum-espresso:* 82 | opensciencegrid/osgvo-r:* 83 | opensciencegrid/osgvo-raspa2:* 84 | opensciencegrid/osgvo-torch 85 | opensciencegrid/osgvo-ubuntu-18.04 86 | opensciencegrid/osgvo-ubuntu-20.04 87 | opensciencegrid/osgvo-ubuntu-xenial 88 | opensciencegrid/osgvo-ucsd-grover:latest 89 | opensciencegrid/osgvo-xenon:* 90 | opensciencegrid/tensorflow:* 91 | opensciencegrid/tensorflow-gpu:* 92 | paesanilab/psi4:latest 93 | pegasus/osg-el7:latest 94 | rinnocente/qe-full-6.2.1:latest 95 | rynge/einsteintoolkit:latest 96 | rynge/sra:latest 97 | rynge/osg-mcf10-mod:latest 98 | rynge/osg-tensorflow-gpu:latest 99 | rynge/respicio:latest 100 | rynge/tensorflow-cowsay:latest 101 | ssthapa/freesurferosg:5.1 102 | ssthapa/freesurferosg:5.3 103 | ssthapa/freesurferosg:6.0 104 | ssthapa/freesurferosg:latest 105 | lukasheinrich/folding:* 106 | foldingathome/fah-gpu:* 107 | djw8605/fah-gpu:* 108 | djw8605/rnamake:* 109 | krespicio/network-ga:* 110 | 111 | # OSGVO - user defined images 112 | deltarod/peaklearnerslurm:v2 113 | aahong/osg_fcc_text 114 | adwasser/slomo 115 | agladstein/data_science_popgen_notebook:* 116 | agladstein/msprime:latest 117 | agladstein/simprily:latest 118 | agladstein/simprily:version1 119 | agladstein/simprily_autobuild 120 | amogan/larcv2:ub20.04-cuda11.0-pytorch1.7.1-larndsim-cvmfs 121 | anniesoft/toolanalysis 122 | anniesoft/wcsim 123 | anniesoft/genie3 124 | anniesoft/g4dirt 125 | arburks/aris-convert:latest 126 | areias/viral-mutation:latest 127 | arnaudbecheler/quetzal-nest:* 128 | arnaudbecheler/quetzal-eggs:* 129 | arnaudbecheler/quetzal-open-science-grid:* 130 | blaylockbk/miniconda3_osg:latest 131 | blibgober/pdf_converter:latest 132 | bpschenke/ipglasma:latest 133 | cailmdaley/sptlab:osg* 134 | cathrine98/osg-clarkson_mondal:latest 135 | cathrine98/osg-beast2 136 | cathrine98/r-vmanthena:latest 137 | christinalk/slim:latest 138 | chunshen1987/iebe-music:* 139 | chunshen1987/ipglasmaframework:* 140 | clkwisconsin/spacetimer:latest 141 | cnatzke/griffin_2photon_sim:* 142 | cnatzke/griffin_simulation:* 143 | cnatzke/ntuple:* 144 | cnatzke/ntuple2eventtree_2photon:* 145 | cnatzke/prepare_files:* 146 | cnatzke/grsisort:* 147 | dmbala/r-3.4.1 148 | dmbala/r-ebcrimage 149 | drtmfigy/hjets_herwig7 150 | econtal/numpy-mkl:latest 151 | efajardo/osgvo-nsides:latest 152 | efajardo/astroflow:latest 153 | evbauer/mesa_lean:10108.01 154 | evbauer/mesa_lean:r22.05.1.01 155 | ghcr.io/devinbayly/ros_noetic_ouster:latest 156 | grassla/osg_testing:latest 157 | goodgolden5/randy:6.0 158 | goodgolden5/randy:7.0 159 | habg/lammps_sw0:23Jun2022 160 | huckb/clas6sim:latest 161 | jamessaxon/postgres-routing:latest 162 | jamessaxon/postgres-routing:a0 163 | jasoncpatton/qutip_kwant:v1.2 164 | jborrel00/pyccx-ubuntu16:latest 165 | jborrel00/pyccx-ubuntu16:development 166 | jiahe58/tensorflow:latest 167 | jonlam/osg_ubuntu:latest 168 | jml230/osg-amber:* 169 | justbennet/seas:lidar 170 | kai2019/osg-fsl:latest 171 | k3jph/torch-diffeq:latest 172 | k3jph/python-optimization:latest 173 | leofang/cthyb-ohmic 174 | m8zeng/julia-packages 175 | hub.opensciencegrid.org/matyasosg/testimage:v4 176 | mfrayer/perl-stats 177 | molssi/qcarchive_worker_openff:* 178 | nathanjmcl/gpaw-vdw-repository:latest 179 | nipy/mindboggle 180 | nkern/21cmfast_env 181 | npanicker/r-desolve:* 182 | npavlovikj/prokevo:latest 183 | npcooley/heron:latest 184 | npcooley/esmfold:* 185 | npcooley/synextend:* 186 | docker.opencarp.org/opencarp/opencarp:latest 187 | parabola505/geospatxgboost:* 188 | rafaelnalin/r-ver-openblas:latest 189 | rasa/rasa:latest 190 | rasa/rasa:2.8.15 191 | rasa/rasa-x:latest 192 | relugzosiraba/kwant_adaptive:v1 193 | relugzosiraba/juqbox_env:v1 194 | relugzosiraba/juqbox_env:v2 195 | researchcomputing/namd_212_multicore_osgvo-el6:latest 196 | shilpac/my_mindboggle 197 | showmic09/dream3d:6.5.121 198 | sjmay/zephyr:latest 199 | snirgaz/osg_julia:latest 200 | ssrujanaa/catsanddogs:latest 201 | sswiston/revbayes:* 202 | syavuz/tidyrstan:latest 203 | syavuz/modelcomparison:v1 204 | sylabsio/lolcow:latest 205 | teamcompas/compas:* 206 | vedularaghu/unet_wf:latest 207 | weiminghu123/panen:default 208 | weiphy/skopt 209 | xevra/gwalk:* 210 | xevra/sparse_kernel_docker:latest 211 | xevra/sparse_kernel_docker:RIFT 212 | xevra/sparse_kernel_docker:stable 213 | xwcl/xpipeline:* 214 | yxfu93/julia:latest 215 | ppaschos/koto-dev:latest 216 | chiehlin0212/koto-dev:latest 217 | sickleinafrica/bcftools:1.11 218 | sickleinafrica/plink1.9:1.9 219 | sickleinafrica/r-tidyverse-qqman:latest 220 | sickleinafrica/beagle:5.4 221 | sickleinafrica/eagle2:latest 222 | lifebitai/shapeit4:latest 223 | lifebitai/impute2:latest 224 | biocontainers/vcftools:v0.1.16-1-deb_cv1 225 | rhughwhite/minimac3:2.0.1 226 | biocontainers/minimac4:v1.0.0-2-deb_cv1 227 | nnesquivelr/decam_proc_base:* 228 | dxando/rift-container:latest 229 | mascencio/al9g4bnb:latest 230 | 231 | # Geant4 simulation and analysis tools 232 | physino/gears 233 | physino/mingle 234 | physino/heprapp 235 | physino/root:* 236 | 237 | # MINT project (rynge) 238 | ankushumn/rsatest 239 | mintproject/ankush:latest 240 | mintproject/base-ubuntu16:latest 241 | mintproject/base-ubuntu18:latest 242 | mintproject/floodseverityindex:* 243 | mintproject/cycles:* 244 | mintproject/dssat:* 245 | mintproject/economic:* 246 | mintproject/kimetrica:* 247 | mintproject/pihm:* 248 | mintproject/hand:* 249 | mintproject/sentinel:* 250 | mintproject/pihm2cycles:* 251 | mintproject/weather-generator:* 252 | mintproject/mintviz:* 253 | mintproject/modflow-2005:* 254 | mintproject/topoflow:* 255 | mintproject/droughtindices:* 256 | 257 | # XENONnT (rynge) 258 | xenonnt/base-environment:* 259 | xenonnt/montecarlo:* 260 | xenonnt/osg_dev:* 261 | 262 | # Lightweight images 263 | busybox 264 | alpine 265 | 266 | # Electron Ion Collider images 267 | whit2333/eic-slic:latest 268 | argonneeic/evochain:v* 269 | argonneeic/fpadsim:v* 270 | # old dockerhub globbing for backwards compatibility during transition to ghcr.io 271 | eicweb/eic_xl:nightly 272 | eicweb/eic_cuda:nightly 273 | eicweb/eic_dev_cuda:nightly 274 | eicweb/eic_xl:25.04.0-stable 275 | eicweb/eic_xl:25.04-stable 276 | eicweb/eic_xl:*-stable 277 | # globs fail on ghcr.io for now, so explicit listing 278 | # ghcr.io/eic/eic_xl:*-stable 279 | ghcr.io/eic/eic_xl:24.03.1-stable 280 | ghcr.io/eic/eic_xl:24.03-stable 281 | ghcr.io/eic/eic_xl:24.04.0-stable 282 | ghcr.io/eic/eic_xl:24.04-stable 283 | ghcr.io/eic/eic_xl:24.05.0-stable 284 | ghcr.io/eic/eic_xl:24.05.2-stable 285 | ghcr.io/eic/eic_xl:24.05-stable 286 | ghcr.io/eic/eic_xl:24.06.0-stable 287 | ghcr.io/eic/eic_xl:24.06-stable 288 | ghcr.io/eic/eic_xl:24.07.0-stable 289 | ghcr.io/eic/eic_xl:24.07-stable 290 | ghcr.io/eic/eic_xl:24.08.0-stable 291 | ghcr.io/eic/eic_xl:24.08.1-stable 292 | ghcr.io/eic/eic_xl:24.08-stable 293 | ghcr.io/eic/eic_xl:24.09.0-stable 294 | ghcr.io/eic/eic_xl:24.09-stable 295 | ghcr.io/eic/eic_xl:24.10.0-stable 296 | ghcr.io/eic/eic_xl:24.10.1-stable 297 | ghcr.io/eic/eic_xl:24.10-stable 298 | ghcr.io/eic/eic_xl:24.11.0-stable 299 | ghcr.io/eic/eic_xl:24.11.1-stable 300 | ghcr.io/eic/eic_xl:24.11.2-stable 301 | ghcr.io/eic/eic_xl:24.11-stable 302 | ghcr.io/eic/eic_xl:24.12.0-stable 303 | ghcr.io/eic/eic_xl:24.12-stable 304 | ghcr.io/eic/eic_xl:25.01.0-stable 305 | ghcr.io/eic/eic_xl:25.01.1-stable 306 | ghcr.io/eic/eic_xl:25.01-stable 307 | ghcr.io/eic/eic_xl:25.02.0-stable 308 | ghcr.io/eic/eic_xl:25.02-stable 309 | ghcr.io/eic/eic_xl:25.03.0-stable 310 | ghcr.io/eic/eic_xl:25.03.1-stable 311 | ghcr.io/eic/eic_xl:25.03-stable 312 | ghcr.io/eic/eic_xl:25.04.0-stable 313 | ghcr.io/eic/eic_xl:25.04-stable 314 | ghcr.io/eic/eic_xl:nightly 315 | ghcr.io/eic/eic_cuda:nightly 316 | ghcr.io/eic/eic_dev_cuda:nightly 317 | raygunkennesaw/tensorflow:1.2.0-py3 318 | 319 | # Common biology tools 320 | # Biocontainers :latest doesn't exist any longer. 321 | #biocontainers/blast 322 | cyverse/rsem-prepare 323 | 324 | # LIGO PyCBC compute nodes 325 | pycbc/pycbc-el7:v1.16.12 326 | pycbc/pycbc-el7:v1.18.3 327 | pycbc/pycbc-el8:v2.3.* 328 | pycbc/pycbc-el8:v2.8.* 329 | pycbc/pycbc-el8:latest 330 | 331 | # CMS worker node 332 | bbockelm/cms:rhel6 333 | bbockelm/cms:rhel7 334 | cmssw/cms:rhel6 335 | cmssw/cms:rhel7 336 | cmssw/cms:rhel8 337 | cmssw/cms:rhel9 338 | cmssw/cms:rhel6-x86_64 339 | cmssw/cms:rhel7-x86_64 340 | cmssw/cms:rhel8-x86_64 341 | cmssw/cms:rhel9-x86_64 342 | cmssw/cms:rhel8-aarch64 343 | cmssw/cms:rhel9-aarch64 344 | cmssw/cms:rhel8-ppc64le 345 | cmssw/cms:rhel6-itb 346 | cmssw/cms:rhel7-itb 347 | cmssw/cms:rhel8-itb 348 | cmssw/cms:rhel9-itb 349 | cmssw/cms:rhel6-itb-x86_64 350 | cmssw/cms:rhel7-itb-x86_64 351 | cmssw/cms:rhel8-itb-x86_64 352 | cmssw/cms:rhel9-itb-x86_64 353 | cmssw/cms:rhel8-itb-aarch64 354 | cmssw/cms:rhel9-itb-aarch64 355 | cmssw/cms:rhel8-itb-ppc64le 356 | cmssw/cms:rhel6-m* 357 | cmssw/cms:rhel7-m* 358 | cmssw/cms:rhel8-m* 359 | efajardo/docker-cms:tensorflow 360 | # CMS worker node with hadoop 361 | kreczko/workernode:centos6 362 | kreczko/workernode:centos7 363 | clelange/slc5-cms:latest 364 | # CMS L1 trigger analysis 365 | cmsl1tanalysis/cmsl1t-dev:* 366 | cmsl1tanalysis/cmsl1t:* 367 | 368 | # ATLAS worker node 369 | lincolnbryant/atlas-wn 370 | 371 | # ATLAS standalone images 372 | atlas/analysisbase:21.2.4 373 | atlas/athanalysis:21.2.4 374 | 375 | # ATLAS related images 376 | chekanov/centos7hepsim 377 | 378 | # Gluex worker node 379 | rjones30/gluex 380 | rjones30/gluexpro8 381 | rjones30/gluextest 382 | markito3/gluex_docker_devel 383 | markito3/gluex_docker_prod 384 | jeffersonlab/gluex_prod:v1 385 | jeffersonlab/gluex_devel:latest 386 | jeffersonlab/gluex_almalinux_9:latest 387 | 388 | # WIPAC (IceCube) 389 | wipac/fasig_scalable_radio_array 390 | wipac/npx-el6:latest 391 | wipac/npx-el6:test 392 | wipac/skylab:1.0.0 393 | wipac/pyglidein-el8-cuda11:main 394 | icecube/icetray-ml:icetray-v1.8.1-cuda11.8-cudnn8-graphnet-ubuntu22.04-devel 395 | icecube/icetray-base:devel-rocky8-cuda11.8.0-cudnn8 396 | icecube/icetray-base:devel-ubuntu22.04-cuda11.8.0-cudnn8 397 | 398 | 399 | #fMRI Prep 400 | poldracklab/fmriprep 401 | nipreps/fmriprep:20.2.1 402 | nipreps/fmriprep:23.0.0 403 | nxdens/fmriprep-afni:1.0 404 | 405 | # cyverse container camp 406 | evolinc/rmta:1.6 407 | evolinc/evolinc-i:1.6 408 | dajunluo/deepvariant 409 | 410 | # Images for Testing the Integration Between the CyVerse Discovery Environment and OSG 411 | discoenv/osg-word-count:1.0.0 412 | discoenv/osg-test:latest 413 | cyverse/osg-gl:1.0 414 | jbustamante35/testphytoshell:phytoshell 415 | evolinc/osg-rmta:2.1 416 | evolinc/osg-evolinc-i:1.7.4 417 | evolinc/osg-rmta:2.5 418 | evolinc/osg-rmta:2.6.3 419 | cyverse/ngmlr:0.2.7 420 | 421 | # JLab CLAS12 Simulations 422 | jeffersonlab/clas12software:production 423 | jeffersonlab/clas12software:devel 424 | 425 | # JLab Parity Simulations and Analysis 426 | jeffersonlab/remoll:latest 427 | jeffersonlab/remoll:develop 428 | jeffersonlab/japan:latest 429 | jeffersonlab/japan:develop 430 | 431 | # Syracuse University Gravitational Wave Group 432 | sugwg/dbrown:* 433 | sugwg/dfinstad:* 434 | sugwg/prp:* 435 | duncanabrown/nicer:* 436 | chaitanyaafle/nicer:* 437 | 438 | # brainlife.io - An online platform for reproducible neuroscience. 439 | brainlife/mrtrix3:3.0_RC3 440 | brainlife/mcr:neurodebian1604-r2017a 441 | brainlife/mcr:r2019a 442 | 443 | # Fermilab VO - Fermigrid worker nodes 444 | fermilab/fnal-wn-el9:* 445 | fermilab/fnal-wn-el8:* 446 | fermilab/fnal-wn-sl7:* 447 | fermilab/fnal-wn-sl6:* 448 | fermilab/fnal-dev-sl7:* 449 | fermilab/benchmark:* 450 | 451 | # NOvA Experiment 452 | ghcr.io/novaexperiment/el7-tensorflow-gpu:latest 453 | ghcr.io/novaexperiment/nova-sl7-novat2k:2020-10-27_freeze 454 | ghcr.io/novaexperiment/nova-sl7-novat2k:aborttest 455 | ghcr.io/novaexperiment/nova-sl7-novat2k:latest 456 | ghcr.io/novaexperiment/nova-sl7-novat2k:nightmare 457 | ghcr.io/novaexperiment/nova-sl7-novat2k:novatest 458 | ghcr.io/novaexperiment/nova-sl7-novat2k:poissondata 459 | ghcr.io/novaexperiment/nova-sl7-novat2k:v1_nonorms 460 | ghcr.io/novaexperiment/nova-sl7-novat2k:v2_withnorms 461 | ghcr.io/novaexperiment/nova-sl7-novat2k:v3_realnddata_cosmics 462 | ghcr.io/novaexperiment/nova-sl7-novat2k:v4_fixcosmicsrock 463 | ghcr.io/novaexperiment/nova-sl7-novat2k:v5_setasimov 464 | ghcr.io/novaexperiment/nova-sl7-novat2k:v6c_nightmareplus 465 | ghcr.io/novaexperiment/nova-sl7-novat2k:v7_pvalue 466 | ghcr.io/novaexperiment/nova-sl7-novat2k:scaledasimov 467 | ghcr.io/novaexperiment/sl7:latest 468 | ghcr.io/novaexperiment/sl7:master 469 | ghcr.io/novaexperiment/sl7:mini 470 | ghcr.io/novaexperiment/sl7:mpichdiy 471 | ghcr.io/novaexperiment/sl7:v1.1.0 472 | ghcr.io/novaexperiment/slf67:latest 473 | egoodman/t2knova_mach3_configdata:PostBANFF 474 | egoodman/mach3_binned_osc:PostBanff 475 | egoodman/mach3_pvalue:latest 476 | 477 | #holosim (tree migration) 478 | astrand/holosim 479 | astrand/popassemble 480 | astrand/foundadmix 481 | 482 | # HTMap/HTCondor Software 483 | htcondor/htmap-exec:* 484 | # cvmfs-singularity-sync does not yet support wildcard tags from hub.opensciencegrid.org 485 | #hub.opensciencegrid.org/htcondor/hpc-annex-pilot:* 486 | hub.opensciencegrid.org/htcondor/hpc-annex-pilot:el8 487 | hub.opensciencegrid.org/htcondor/hpc-annex-pilot:latest 488 | 489 | # LIGO - user defined images 490 | containers.ligo.org/bayeswave/igwn-wave-compare:latest 491 | containers.ligo.org/cody.messick/container:latest 492 | containers.ligo.org/joshua.willis/pycbc:latest 493 | containers.ligo.org/james-clark/bayeswave:latest 494 | containers.ligo.org/james-clark/bayeswave:test 495 | containers.ligo.org/james-clark/bilby_pipe_public:latest 496 | containers.ligo.org/james-clark/research-projects-rit/rift:test 497 | containers.ligo.org/james-clark/research-projects-rit/rift:latest 498 | containers.ligo.org/james-clark/research-projects-rit/rift:production 499 | containers.ligo.org/james-clark/research-projects-rit/containers-rift_o4b_jl-chadhenshaw-teobresums_eccentric:latest 500 | containers.ligo.org/rodrigo.tenorio/skyhough-post-processing:master 501 | containers.ligo.org/tessa.carver/pygrb_o3a:latest 502 | atanasi/matlab:v97 503 | atanasi/darkmatter:latest 504 | lucarvirgo/tensorflowmatlab:latest 505 | containers.ligo.org/james-clark/tgr_images/testing_gr_fta:latest 506 | containers.ligo.org/james-clark/tgr_images/lalsuite-master:latest 507 | containers.ligo.org/cwinpy/cwinpy-containers/cwinpy-dev-python38:latest 508 | containers.ligo.org/rhys.poulton/cw-frequencyhough-image:latest 509 | containers.ligo.org/rhys.poulton/cw-frequencyhough-image:escape-datalake 510 | containers.ligo.org/aei-tgr/cvmfs-images/pseob:rd-latest 511 | containers.ligo.org/aei-tgr/cvmfs-images/pseob:fti-latest 512 | containers.ligo.org/aei-tgr/cvmfs-images/pseob:testing 513 | containers.ligo.org/aei-tgr/cvmfs-images/pseob:envs 514 | containers.ligo.org/aei-tgr/cvmfs-images/seob-rom:latest 515 | containers.ligo.org/aei-tgr/cvmfs-images/seob-rom:v5 516 | containers.ligo.org/alan.knee/lal-images/lalsuite-fstatbinary:latest 517 | ghcr.io/ml4gw/hermes/tritonserver:22.12 518 | ghcr.io/ml4gw/hermes/tritonserver:22.07 519 | ghcr.io/ml4gw/hermes/tritonserver:22.02 520 | ghcr.io/ml4gw/hermes/tritonserver:23.01 521 | ghcr.io/ml4gw/hermes/tritonserver:24.01 522 | ghcr.io/ml4gw/pinto:main 523 | containers.ligo.org/yannick.lecoeuche/glitch-pe-container/glitch-pe:latest 524 | containers.ligo.org/computing/distributed/igwn-pool-exorciser/gpu_burn:latest 525 | containers.ligo.org/colm.talbot/bilby-pipe-image-sandbox/bilby-pipe-production-python311:latest 526 | containers.ligo.org/colm.talbot/bilby-pipe-image-sandbox/gwpopulation-python311:latest 527 | containers.ligo.org/tomasz.baka/tgr-docker-image/bilby_tgr:latest 528 | containers.ligo.org/nihar.gupte/dingo-ci/dingo:latest 529 | containers.ligo.org/pystamp/pystampas:latest 530 | 531 | # LIGO/VIRGO/KAGRA containers 532 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.59:el7 533 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.59:stretch 534 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.60:el7 535 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.60:stretch 536 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.62:el7 537 | containers.ligo.org/lscsoft/lalsuite/lalsuite-v6.62:stretch 538 | containers.ligo.org/lscsoft/bayeswave:latest 539 | containers.ligo.org/lscsoft/bayeswave:v1.0.6 540 | containers.ligo.org/lscsoft/bayeswave:v1.0.7 541 | containers.ligo.org/computing/rucio/containers/rucio-clients:latest 542 | containers.ligo.org/lscsoft/gstlal:master 543 | containers.ligo.org/lscsoft/gstlal:osg-dev 544 | containers.ligo.org/gstlal/o4a-containers:cvmfs1 545 | containers.ligo.org/gstlal/o4a-containers:cvmfs2 546 | containers.ligo.org/gstlal/o4b-containers:cvmfs1 547 | containers.ligo.org/gstlal/o4b-containers:cvmfs2 548 | containers.ligo.org/snax/snax:v0.5.1 549 | containers.ligo.org/snax/snax:v0.5.2 550 | containers.ligo.org/lmxbcrosscorr/containers:crosscorr-lattice-dev-clean 551 | containers.ligo.org/echoes-model-independent/bayeswave-echoes-image:v1.0.7_echoes_reviewed 552 | containers.ligo.org/calibration/gstlal-calibration:gstlal-calibration-1.5.10-v1 553 | 554 | # Lancaster U, Muon g-2 Beamline Simulations 555 | valetov/beam_track:* 556 | 557 | # Michigan State U, Center for Beam Theory and Dynamical Systems 558 | valetov/cosy:* 559 | valetov/g4bl:* 560 | valetov/gm2dev:* 561 | valetov/glyfada:* 562 | 563 | # Mu2e 564 | egstern/centos7-synergia2:* 565 | egstern/sl7-synergia2:* 566 | egstern/ubuntu1804-synergia2:* 567 | mu2e/synergia:v0 568 | egstern/wn-synergia2:latest 569 | 570 | # Mu3e 571 | # 4ndr85/mu3e:v3 (old) 572 | paolobeltrame/mu3e:v5.4 573 | paolobeltrame/mu3e:mctrk 574 | paolobeltrame/mu3e:v5.5 575 | 576 | #NEURON+Brian2+Python 577 | rtikid/python2-numpy-scipy-sympy-neuron-brian2-netpyne-inspyred-pyabf 578 | rtikid/python3-numpy-scipy-sympy-neuron-brian2-netpyne-inspyred-pyabf 579 | 580 | # WRENNCH project (rynge) 581 | wrenchproject/task-clustering:* 582 | 583 | # FAST-HEP images 584 | fasthep/fast-hep-docker:version-0.2.0 585 | 586 | # EHT/PIRE 587 | eventhorizontelescope/hops:* 588 | #casavlbi_ehtproduction:latest 589 | mjanssen2308/symba:latest 590 | mjanssen2308/symba:513f44b290d992213fb21e99515b0f0440f0d477 591 | ttrent808/ray:* 592 | ehtcon/img-env:* 593 | ehtcon/mcfe-env:* 594 | ehtcon/theory-env:* 595 | 596 | # Caltech_Rusholme 597 | nrstickley/jsp_apps:* 598 | 599 | #American Museum of Natural History (AMNH) 600 | amnh/osgimages:* 601 | amnh/herpetology-pseudo-it:* 602 | amnh/herpetology_iqtree_r_quibl:* 603 | 604 | # Single-cell Inference of Networks using Granger Ensembles (SINGE) 605 | agitter/singe:latest 606 | 607 | # Notre Dame images 608 | notredamedulac/el7-tensorflow-pytorch:latest 609 | notredamedulac/el7-pytorch-gpu:latest 610 | notredamedulac/el7-tensorflow-keras-gpu:latest 611 | notredamedulac/el7-jax-gpu:ubuntu 612 | notredamedulac/el7-deepshere-gpu:latest 613 | 614 | # LSST DESC stackvana 615 | beckermr/stackvana:latest 616 | 617 | # Neural Architecture Search for 2d-UNet 618 | jinnian/automl:2dunet 619 | jinnian/automl-osg:2dunet 620 | 621 | # LUX-ZEPLIN (LZ) Base OS 622 | luxzeplin/base_os:centos7 623 | luxzeplin/base_os:rocky9_3 624 | luxzeplin/offline_hosted:rocky9_3 625 | 626 | #Jetscape 627 | bardelch/jetscape-deploy:* 628 | bardelch/jetscape-compiled:* 629 | amitkr2410/jetscape-compiled:* 630 | dananjaya92/jetscape-compiled:* 631 | jetscape/base:* 632 | 633 | #osg.PortlandState_Feng 634 | mythril/myth 635 | 636 | # Hieu Nguyen 637 | nguyenatrowan/pytorch 638 | 639 | # JLab CLAS Simulations 640 | tylern4/clas6:latest 641 | 642 | # OpenGATE collaboration 643 | opengatecollaboration/gate:8.2 644 | opengatecollaboration/gate:9.0 645 | 646 | # Snowmass21 647 | snowmass21software/delphes-osg:* 648 | 649 | # Zwicky Transient Facility 650 | michaelwcoughlin/ztfperiodic:latest 651 | 652 | #intel 653 | intel/oneapi-hpckit 654 | 655 | # E1039/SpinQuest 656 | e1039/e1039-sl7 657 | 658 | # University of Manchester/MicroBooNE uboonecode + gallery-fmwk 659 | lmlepin9/slf7-ubcode-gallery-fmwk:2.3 660 | 661 | # University of Guam - Bioinformatics 662 | jagault/evolution-photosymbiosis:rscriptv3.5.3 663 | 664 | # GAPS - General Anti Particle Spectrometer 665 | gapscr/crane:1.6.4 666 | 667 | # University of Chicago - JonasLab Molecular Sim 668 | ericmjonas/osg:* 669 | 670 | # Justin Cha 671 | jcha40/chexmix:latest 672 | jcha40/python_env:latest 673 | 674 | # University of Wisconsin - Jason Kwan Lab 675 | jasonkwan/autometa:latest 676 | 677 | # UW-Madison & Reed College -- Gitter Lab, SPRAS project 678 | # https://hub.docker.com/r/reedcompbio 679 | reedcompbio/omics-integrator-1:latest 680 | reedcompbio/omics-integrator-1:no-conda 681 | reedcompbio/omics-integrator-2:v2 682 | reedcompbio/pathlinker:latest 683 | reedcompbio/meo:latest 684 | reedcompbio/mincostflow:latest 685 | reedcompbio/domino:latest 686 | reedcompbio/allpairs:latest 687 | reedcompbio/py4cytoscape:v2 688 | reedcompbio/tiedie:latest 689 | reedcompbio/random-walk-with-restart:latest 690 | 691 | # Network for Computational Modeling in the Social and Ecological Sciences (CoMSES Net, https://comses.net) images 692 | comses/osg-netlogo:* 693 | 694 | # NCBI 695 | ncbi/pgap:2022-04-14.build6021 696 | 697 | # STAR experiment at Brookhaven National Lab images 698 | ghcr.io/star-bnl/star-sw 699 | ghcr.io/star-bnl/star-sw:SL23f 700 | ghcr.io/star-bnl/star-sw:SL23e 701 | ghcr.io/star-bnl/star-sw:SL23d 702 | ghcr.io/star-bnl/star-sw:SL23c 703 | ghcr.io/star-bnl/star-sw:SL23b 704 | ghcr.io/star-bnl/star-sw:SL23a 705 | ghcr.io/star-bnl/star-sw:SL22c 706 | ghcr.io/star-bnl/star-sw:SL22b 707 | ghcr.io/star-bnl/star-sw:SL22a 708 | ghcr.io/star-bnl/star-sw:SL21d 709 | ghcr.io/star-bnl/star-sw:SL21c 710 | ghcr.io/star-bnl/star-sw:SL21b 711 | ghcr.io/star-bnl/star-sw:SL21a 712 | ghcr.io/star-bnl/star-sw:SL20c 713 | ghcr.io/star-bnl/star-sw:SL19e 714 | ghcr.io/star-bnl/star-sw:SL19b 715 | 716 | # REDTOP Collaboration 717 | redtopexp/redtop:* 718 | 719 | # FIU_Hamid FPHLM 720 | houpengg/fphlm:* 721 | 722 | # PosEiDon 723 | pegasus/1000genome-workflow:latest 724 | pegasus/montage-workflow-v3:latest 725 | 726 | # h2oai 727 | h2oai/h2o-open-source-k8s:latest 728 | -------------------------------------------------------------------------------- /dockerhub.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2017 Daniel Sullivan (mumblepins) 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | 24 | 25 | 26 | import json 27 | 28 | import requests 29 | from furl import furl 30 | from requests.auth import AuthBase 31 | import traceback 32 | import re 33 | 34 | 35 | class TimeoutError(Exception): 36 | pass 37 | 38 | 39 | class ConnectionError(Exception): 40 | pass 41 | 42 | 43 | class AuthenticationError(Exception): 44 | pass 45 | 46 | 47 | class DockerHubAuth(AuthBase): 48 | def __init__(self, requests_post, api_url, username=None, password=None, token=None, delete_creds=False, scope=None): 49 | """ 50 | 51 | Args: 52 | requests_post (:py:meth:`DockerHub._do_requests_post`): 53 | api_url (str): 54 | username (str, optional): 55 | password (str, optional): 56 | token (str, optional): 57 | delete_creds (bool, optional): 58 | """ 59 | self._token = None 60 | self._username = None 61 | self._password = None 62 | self._api_url = api_url 63 | self._requests_post = requests_post 64 | if token is not None: 65 | self._token = token 66 | return 67 | if username is not None and password is not None: 68 | self._username = username 69 | self._password = password 70 | self._get_authorization_token() 71 | if delete_creds: 72 | self._username = None 73 | self._password = None 74 | return 75 | 76 | # Otherwise, do anonymous login 77 | self._get_authorization_token() 78 | #raise ValueError("Need either username and password or token for authentication") 79 | 80 | @property 81 | def token(self): 82 | return self._token 83 | 84 | def __eq__(self, other): 85 | return self._token == getattr(other, '_token', None) 86 | 87 | def __ne__(self, other): 88 | return not self == other 89 | 90 | def __call__(self, r): 91 | r.headers['Authorization'] = "Bearer {}".format(self._token) 92 | return r 93 | 94 | def updateToken(self, scope, service=None, realm=None, **kwargs): 95 | if self._username: 96 | auth = (self._username,self._password) 97 | else: 98 | auth = None 99 | 100 | if scope: 101 | params = {'service': 'registry.docker.io', 'scope': scope} 102 | else: 103 | params = {'service': 'registry.docker.io'} 104 | if service: 105 | params['service'] = service 106 | if realm: 107 | r = requests.get(realm, params=params, auth=auth) 108 | else: 109 | r = requests.get("https://auth.docker.io/token", params=params, auth=auth) 110 | try: 111 | self._token = r.json()['token'] 112 | except KeyError as ke: 113 | print("Unable to get token from json") 114 | print(r.json()) 115 | raise ke 116 | 117 | def _get_authorization_token(self): 118 | """Actually gets the authentication token 119 | 120 | Raises: 121 | AuthenticationError: didn't login right 122 | 123 | """ 124 | 125 | if self._username == None and self._password == None and self._token == None: 126 | 127 | r = self._requests_post(self._api_url, noPage=True) 128 | 129 | else: 130 | r = self._requests_post( 131 | self._api_url, 132 | { 133 | "username": self._username, 134 | "password": self._password 135 | }) 136 | 137 | if not r.ok: 138 | raise AuthenticationError("Error Status {}:\n{}".format(r.status_code, json.dumps(r.json(), indent=2))) 139 | self._token = r.json()['token'] 140 | 141 | 142 | def parse_url(url): 143 | """Parses a url into the base url and the query params 144 | 145 | Args: 146 | url (str): url with query string, or not 147 | 148 | Returns: 149 | (str, `dict` of `lists`): url, query (dict of values) 150 | """ 151 | f = furl(url) 152 | query = f.args 153 | query = {a[0]: a[1] for a in query.listitems()} 154 | f.remove(query=True).path.normalize() 155 | url = f.url 156 | 157 | return url, query 158 | 159 | 160 | def user_cleaner(user): 161 | """Converts none or _ to library, makes username lowercase 162 | 163 | Args: 164 | user (str): 165 | 166 | Returns: 167 | str: cleaned username 168 | 169 | """ 170 | if user == "_" or user == "": 171 | return "library" 172 | try: 173 | return user.lower() 174 | except AttributeError: 175 | return user 176 | 177 | 178 | class DockerHub(object): 179 | """Actual class for making API calls 180 | 181 | Args: 182 | username (str, optional): 183 | password(str, optional): 184 | token(str, optional): 185 | url(str, optional): Url of api (https://hub.docker.com) 186 | namespace(str, optional): Namespace of a docker image 187 | repo(str, optional): Repo of the image 188 | version(str, optional): Api version (v2) 189 | delete_creds (bool, optional): Whether to delete password after logging in (default True) 190 | return_lists (bool, optional): Whether to return a `generator` from calls that return multiple values 191 | (False, default), or to return a simple `list` (True) 192 | """ 193 | 194 | # 195 | def __init__(self, username=None, password=None, token=None, url=None, namespace=None, repo=None, version='v2', delete_creds=True, 196 | return_lists=False): 197 | 198 | self._version = version 199 | self._url = '{0}/{1}'.format(url or 'https://hub.docker.com', self.version) 200 | self._namespace = None 201 | self._repo = None 202 | self._session = requests.Session() 203 | self._auth = None 204 | self._token = None 205 | self._username = None 206 | self._password = None 207 | self._return_lists = return_lists 208 | self.login(username, password, token, namespace, repo, delete_creds) 209 | 210 | def __enter__(self): 211 | return self 212 | 213 | def __exit__(self, exc_type, exc_val, exc_tb): 214 | self.close() 215 | 216 | def close(self): 217 | self._session.close() 218 | 219 | # 220 | 221 | # 222 | @property 223 | def return_lists(self): 224 | """Whether functions should return generators (False) or lists (True) 225 | 226 | Returns: 227 | bool 228 | 229 | """ 230 | return self._return_lists 231 | 232 | @return_lists.setter 233 | def return_lists(self, value): 234 | self._return_lists = value 235 | 236 | @property 237 | def username(self): 238 | if self._username is None and self.logged_in: 239 | self._get_username() 240 | return self._username 241 | 242 | @property 243 | def logged_in(self): 244 | return self.token is not None 245 | 246 | @property 247 | def version(self): 248 | return self._version 249 | 250 | @property 251 | def url(self): 252 | return self._url 253 | 254 | @property 255 | def namespace(self): 256 | return self._namespace 257 | 258 | @property 259 | def repo(self): 260 | return self._repo 261 | 262 | @property 263 | def token(self): 264 | return self._token 265 | 266 | @token.setter 267 | def token(self, value): 268 | self._token = value 269 | self._get_username() 270 | 271 | # 272 | 273 | # 274 | 275 | def _do_request(self, method, address, **kwargs): 276 | try: 277 | if 'timeout' not in kwargs: 278 | kwargs['timeout'] = (5, 15) 279 | 280 | if 'ttl' not in kwargs: 281 | ttl = 1 282 | else: 283 | ttl = kwargs['ttl'] 284 | del kwargs['ttl'] 285 | 286 | if 'auth' not in kwargs: 287 | kwargs['auth'] = self._auth 288 | 289 | if 'headers' not in kwargs: 290 | kwargs['headers'] = {"Content-Type": "application/json"} 291 | elif 'Content-Type' not in kwargs['headers']: 292 | kwargs['headers']['Content-Type'] = "application/json" 293 | 294 | url, query = parse_url(address) 295 | if query: 296 | address = url 297 | if 'params' in kwargs: 298 | query.update(kwargs['params']) 299 | kwargs['params'] = query 300 | 301 | resp = self._session.request(method, address, **kwargs) 302 | #print(address) 303 | #print(kwargs) 304 | 305 | except requests.exceptions.Timeout as e: 306 | raise TimeoutError('Connection Timeout. Download failed: {0}'.format(e)) 307 | except requests.exceptions.RequestException as e: 308 | raise ConnectionError('Connection Error. Download failed: {0}'.format(e)) 309 | else: 310 | if resp.status_code == 401 and ttl > 0: 311 | # Update the auth token with the scope, and try again 312 | # Parse the Www-Authenticate line, looks like: 313 | # Bearer realm="https://git.ligo.org/jwt/auth",service="container_registry",scope="repository:lscsoft/lalsuite/lalsuite-v6.53:pull",error="invalid_token" 314 | reg=re.compile('(\w+)[=] ?"?([\w\:\/\.\-]+)"?') 315 | values = dict(reg.findall(resp.headers['Www-Authenticate'])) 316 | self._auth.updateToken(**values) 317 | kwargs['ttl'] = ttl-1 318 | return self._do_request(method, address, **kwargs) 319 | try: 320 | resp.raise_for_status() 321 | except: 322 | try: 323 | print(resp.json()) 324 | except: 325 | print(resp.content) 326 | print(resp.headers) 327 | raise 328 | return resp 329 | 330 | def _do_requests_get(self, address, **kwargs): 331 | if 'params' not in kwargs: 332 | kwargs['params'] = {} 333 | if 'perPage' not in kwargs['params'] and 'noPage' not in kwargs: 334 | kwargs['params']['perPage'] = 100 335 | kwargs['params']['page_size'] = 100 336 | if 'noPage' in kwargs: 337 | del kwargs['noPage'] 338 | return self._do_request('GET', address, **kwargs) 339 | 340 | def _do_requests_head(self, address, **kwargs): 341 | return self._do_request('HEAD', address, **kwargs) 342 | 343 | def _do_requests_post(self, address, json_data=None, **kwargs): 344 | return self._do_request('POST', address, json=json_data, **kwargs) 345 | 346 | def _do_requests_put(self, address, json_data=None, **kwargs): 347 | return self._do_request('PUT', address, json=json_data, **kwargs) 348 | 349 | def _do_requests_patch(self, address, json_data, **kwargs): 350 | return self._do_request('PATCH', address, json=json_data, **kwargs) 351 | 352 | def _do_requests_delete(self, address, **kwargs): 353 | return self._do_request('DELETE', address, **kwargs) 354 | 355 | def _iter_requests_get(self, address, **kwargs): 356 | if self.return_lists: 357 | return list(self._iter_requests_get_generator(address, **kwargs)) 358 | return self._iter_requests_get_generator(address, **kwargs) 359 | 360 | def _iter_requests_get_generator(self, address, **kwargs): 361 | _next = None 362 | resp = self._do_requests_get(address, **kwargs) 363 | 364 | while True: 365 | if _next: 366 | resp = self._do_requests_get(_next) 367 | # print _next 368 | 369 | resp = resp.json() 370 | 371 | for i in resp['results']: 372 | yield i 373 | 374 | if resp['next']: 375 | _next = resp['next'] 376 | continue 377 | return 378 | 379 | def _api_url(self, path): 380 | return '{0}/{1}'.format(self.url, path) 381 | 382 | def _get_username(self): 383 | if self.logged_in: 384 | self._username = user_cleaner(self.logged_in_user()['username']) 385 | else: 386 | self._username = None 387 | 388 | # 389 | 390 | def login(self, username=None, password=None, token=None, namespace=None, repo=None, delete_creds=True): 391 | """Logs into Docker hub and gets a token 392 | 393 | Either username and password or token should be specified 394 | 395 | Args: 396 | username (str, optional): 397 | password (str, optional): 398 | token (str, optional): 399 | namespace (str, optional): required if the registry is ghcr.io or hub.opensciencegrid.org 400 | repo (str, optional): required if the registry is ghcr.io or hub.opensciencegrid.org 401 | delete_creds (bool, optional): 402 | 403 | Returns: 404 | 405 | """ 406 | 407 | self._username = user_cleaner(username) 408 | self._password = password 409 | self._token = token 410 | if token is not None: 411 | # login with token 412 | self._auth = DockerHubAuth(self._do_requests_post, self._api_url('users/login'), token=token) 413 | elif username is not None and password is not None: 414 | # login with user/pass 415 | self._auth = DockerHubAuth(self._do_requests_post, self._api_url('users/login'), username=username, 416 | password=password) 417 | elif 'ghcr.io' in self.url: 418 | self._auth = DockerHubAuth(self._do_requests_get, "https://ghcr.io/token?service=ghcr.io&scope=repository:"+namespace+"/"+repo+":pull") 419 | elif 'hub.opensciencegrid.org' in self.url: 420 | self._auth = DockerHubAuth(self._do_requests_get, "https://hub.opensciencegrid.org/service/token?service=harbor-registry&scope=repository:"+namespace+"/"+repo+":pull") 421 | else: 422 | self._auth = DockerHubAuth(self._do_requests_get, "https://auth.docker.io/token?service=registry.docker.io") 423 | 424 | if delete_creds: 425 | self._password = None 426 | 427 | self._token = self._auth.token 428 | 429 | def comments(self, user, repository, **kwargs): 430 | """ 431 | 432 | Args: 433 | user: 434 | repository: 435 | **kwargs: 436 | 437 | Returns: 438 | 439 | """ 440 | user = user_cleaner(user) 441 | url = self._api_url('repositories/{0}/{1}/comments'.format(user, repository)) 442 | return self._iter_requests_get(url, **kwargs) 443 | 444 | def repository(self, user, repository, **kwargs): 445 | """ 446 | 447 | Args: 448 | user: 449 | repository: 450 | **kwargs: 451 | 452 | Returns: 453 | 454 | """ 455 | user = user_cleaner(user) 456 | url = self._api_url('repositories/{0}/{1}'.format(user, repository)) 457 | return self._do_requests_get(url, **kwargs).json() 458 | 459 | def repositories(self, user, **kwargs): 460 | """ 461 | 462 | Args: 463 | user: 464 | **kwargs: 465 | 466 | Returns: 467 | 468 | """ 469 | user = user_cleaner(user) 470 | url = self._api_url('repositories/{0}'.format(user)) 471 | return self._iter_requests_get(url, **kwargs) 472 | 473 | def repositories_starred(self, user, **kwargs): 474 | """ 475 | 476 | Args: 477 | user: 478 | **kwargs: 479 | 480 | Returns: 481 | 482 | """ 483 | user = user_cleaner(user) 484 | url = self._api_url('users/{0}/repositories/starred'.format(user)) 485 | return self._iter_requests_get(url, **kwargs) 486 | 487 | def tags(self, user, repository, **kwargs): 488 | """ 489 | 490 | Args: 491 | user: 492 | repository: 493 | **kwargs: 494 | 495 | Returns: 496 | 497 | """ 498 | user = user_cleaner(user) 499 | url = self._api_url('repositories/{0}/{1}/tags'.format(user, repository)) 500 | self._auth = None 501 | return self._iter_requests_get(url, **kwargs) 502 | 503 | def manifest(self, user, repository, tag, head=False, **kwargs): 504 | """ 505 | 506 | Args: 507 | user: 508 | repository: 509 | tag: 510 | head (bool, optional): 511 | **kwargs: 512 | 513 | Returns: 514 | 515 | """ 516 | url = self._api_url('{0}/{1}/manifests/{2}'.format(user, repository, tag)) 517 | 518 | manifest_mime = ', '.join(( 519 | 'application/vnd.docker.distribution.manifest.v1+json', 520 | 'application/vnd.docker.distribution.manifest.v2+json', 521 | 'application/vnd.docker.distribution.manifest.list.v2+json', 522 | 'application/vnd.oci.image.manifest.v1+json', 523 | 'application/vnd.oci.image.index.v1+json', 524 | )) 525 | 526 | #Added support to retrieve oci images for hub.opensciencegrid.org 527 | #https://gitlab.com/gitlab-org/gitlab-foss/-/merge_requests/31127 528 | headers_has_accept = False 529 | if 'headers' not in kwargs: 530 | kwargs['headers'] = {'ACCEPT': manifest_mime} 531 | else: 532 | for headers_key in kwargs['headers'].keys(): 533 | if 'accept' in headers_key.casefold(): 534 | headers_has_accept = True 535 | kwargs['headers'][headers_key]+=', ' + manifest_mime 536 | break 537 | if not headers_has_accept: 538 | kwargs['headers']['ACCEPT'] = manifest_mime 539 | 540 | if head: 541 | return self._do_requests_head(url, **kwargs) 542 | else: 543 | return self._do_requests_get(url, **kwargs).json() 544 | 545 | def user(self, user, **kwargs): 546 | """ 547 | 548 | Args: 549 | user: 550 | **kwargs: 551 | 552 | Returns: 553 | 554 | """ 555 | user = user_cleaner(user) 556 | url = self._api_url('users/{0}'.format(user)) 557 | return self._do_requests_get(url, **kwargs).json() 558 | 559 | # ------ Logged In Section 560 | 561 | def logged_in_user(self): 562 | """ 563 | 564 | Returns: 565 | 566 | """ 567 | return self._do_requests_get(self._api_url('user')).json() 568 | 569 | def add_collaborator(self, user, repository, collaborator): 570 | """ 571 | 572 | Args: 573 | user: 574 | repository: 575 | collaborator: 576 | 577 | Returns: 578 | 579 | """ 580 | user = user_cleaner(user) 581 | url = self._api_url('repositories/{}/{}/collaborators'.format(user, repository)) 582 | return self._do_requests_post(url, { 583 | "user": collaborator.lower() 584 | }).json() 585 | 586 | def build_details(self, user, repository, code): 587 | """ 588 | 589 | Args: 590 | user: 591 | repository: 592 | code: 593 | 594 | Returns: 595 | 596 | """ 597 | user = user_cleaner(user) 598 | url = self._api_url('repositories/{}/{}/buildhistory/{}'.format(user, repository, code)) 599 | return self._do_requests_get(url).json() 600 | 601 | def build_history(self, user, repository, **kwargs): 602 | """ 603 | 604 | Args: 605 | user: 606 | repository: 607 | **kwargs: 608 | 609 | Returns: 610 | 611 | """ 612 | user = user_cleaner(user) 613 | url = self._api_url('repositories/{}/{}/buildhistory'.format(user, repository)) 614 | return self._iter_requests_get(url, **kwargs) 615 | 616 | def build_links(self, user, repository, **kwargs): 617 | """ 618 | 619 | Args: 620 | user: 621 | repository: 622 | **kwargs: 623 | 624 | Returns: 625 | 626 | """ 627 | user = user_cleaner(user) 628 | url = self._api_url('repositories/{}/{}/links'.format(user, repository)) 629 | return self._iter_requests_get(url, **kwargs) 630 | 631 | def build_settings(self, user, repository): 632 | """ 633 | 634 | Args: 635 | user: 636 | repository: 637 | 638 | Returns: 639 | 640 | """ 641 | user = user_cleaner(user) 642 | url = self._api_url('repositories/{}/{}/autobuild'.format(user, repository)) 643 | return self._do_requests_get(url).json() 644 | 645 | def build_trigger(self, user, repository): 646 | """ 647 | 648 | Args: 649 | user: 650 | repository: 651 | 652 | Returns: 653 | 654 | """ 655 | user = user_cleaner(user) 656 | url = self._api_url('repositories/{}/{}/buildtrigger'.format(user, repository)) 657 | return self._do_requests_get(url).json() 658 | 659 | def build_trigger_history(self, user, repository, **kwargs): 660 | """ 661 | 662 | Args: 663 | user: 664 | repository: 665 | **kwargs: 666 | 667 | Returns: 668 | 669 | """ 670 | user = user_cleaner(user) 671 | url = self._api_url('repositories/{}/{}/buildtrigger/history'.format(user, repository)) 672 | return self._iter_requests_get(url, **kwargs) 673 | 674 | def collaborators(self, user, repository, **kwargs): 675 | """ 676 | 677 | Args: 678 | user: 679 | repository: 680 | **kwargs: 681 | 682 | Returns: 683 | 684 | """ 685 | user = user_cleaner(user) 686 | url = self._api_url('repositories/{}/{}/collaborators'.format(user, repository)) 687 | return self._iter_requests_get(url, **kwargs) 688 | 689 | def create_build_link(self, user, repository, to_repo): 690 | """ 691 | 692 | Args: 693 | user: 694 | repository: 695 | to_repo: 696 | 697 | Returns: 698 | 699 | """ 700 | user = user_cleaner(user) 701 | url = self._api_url('repositories/{}/{}/links'.format(user, repository)) 702 | return self._do_requests_post(url, { 703 | "to_repo": to_repo 704 | }).json() 705 | 706 | def create_build_tag(self, user, repository, details): 707 | """ 708 | 709 | Args: 710 | user: 711 | repository: 712 | details: 713 | 714 | Returns: 715 | 716 | """ 717 | user = user_cleaner(user) 718 | url = self._api_url('repositories/{}/{}/autobuild/tags'.format(user, repository)) 719 | return self._do_requests_post(url, { 720 | 'isNew': True, 721 | 'namespace': user, 722 | 'repoName': repository, 723 | 'name': details['name'] if 'name' in details else 'latest', 724 | 'dockerfile_location': details['dockerfile_location'] if 'dockerfile_location' in details else '/', 725 | 'source_type': details['source_type'] if 'source_type' in details else 'Branch', 726 | 'source_name': details['source_name'] if 'source_name' in details else 'master' 727 | }).json() 728 | 729 | def create_repository(self, user, repository, details): 730 | """ 731 | 732 | Args: 733 | user: 734 | repository: 735 | details: 736 | 737 | Returns: 738 | 739 | """ 740 | user = user_cleaner(user) 741 | url = self._api_url('repositories') 742 | data = { 743 | 'name': repository, 744 | 'namespace': user, 745 | } 746 | details.update(data) 747 | return self._do_requests_post(url, details).json() 748 | 749 | def create_automated_build(self, user, repository, details): 750 | """ 751 | 752 | Args: 753 | user: 754 | repository: 755 | details: 756 | 757 | Returns: 758 | 759 | """ 760 | user = user_cleaner(user) 761 | url = self._api_url('repositories/{}/{}/autobuild'.format(user, repository)) 762 | data = { 763 | 'name': repository, 764 | 'namespace': user, 765 | 'active': True, 766 | 'dockerhub_repo_name': "{}/{}".format(user, repository) 767 | } 768 | 769 | details.update(data) 770 | return self._do_requests_post(url, details).json() 771 | 772 | def create_webhook(self, user, repository, webhook_name): 773 | """ 774 | 775 | Args: 776 | user: 777 | repository: 778 | webhook_name: 779 | 780 | Returns: 781 | 782 | """ 783 | user = user_cleaner(user) 784 | url = self._api_url('repositories/{}/{}/webhooks'.format(user, repository)) 785 | data = { 786 | 'name': webhook_name 787 | } 788 | return self._do_requests_post(url, data).json() 789 | 790 | def create_webhook_hook(self, user, repository, webhook_id, webhook_url): 791 | """ 792 | 793 | Args: 794 | user: 795 | repository: 796 | webhook_id: 797 | webhook_url: 798 | 799 | Returns: 800 | 801 | """ 802 | user = user_cleaner(user) 803 | url = self._api_url('repositories/{}/{}/webhooks/{}/hooks'.format(user, repository, webhook_id)) 804 | data = { 805 | 'hook_url': webhook_url 806 | } 807 | return self._do_requests_post(url, data).json() 808 | 809 | def delete_build_link(self, user, repository, build_id): 810 | """ 811 | 812 | Args: 813 | user: 814 | repository: 815 | build_id: 816 | 817 | Returns: 818 | boolean: returns true if successful delete call 819 | 820 | """ 821 | user = user_cleaner(user) 822 | url = self._api_url('repositories/{}/{}/links/{}'.format(user, repository, build_id)) 823 | resp = self._do_requests_delete(url) 824 | # print_response(resp) 825 | return resp.status_code == 204 826 | 827 | def delete_build_tag(self, user, repository, tag_id): 828 | """ 829 | 830 | Args: 831 | user: 832 | repository: 833 | tag_id: 834 | 835 | Returns: 836 | boolean: returns true if successful delete call 837 | 838 | """ 839 | user = user_cleaner(user) 840 | url = self._api_url('repositories/{}/{}/autobuild/tags/{}'.format(user, repository, tag_id)) 841 | resp = self._do_requests_delete(url) 842 | return resp.status_code == 204 843 | 844 | def delete_tag(self, user, repository, tag): 845 | """ 846 | 847 | Args: 848 | user: 849 | repository: 850 | tag: 851 | 852 | Returns: 853 | boolean: returns true if successful delete call 854 | 855 | """ 856 | user = user_cleaner(user) 857 | url = self._api_url('repositories/{}/{}/tags/{}'.format(user, repository, tag)) 858 | resp = self._do_requests_delete(url) 859 | return resp.status_code == 204 860 | 861 | def delete_collaborator(self, user, repository, collaborator): 862 | """ 863 | 864 | Args: 865 | user: 866 | repository: 867 | collaborator: 868 | 869 | Returns: 870 | boolean: returns true if successful delete call 871 | 872 | """ 873 | user = user_cleaner(user) 874 | url = self._api_url('repositories/{}/{}/collaborators/{}'.format(user, repository, collaborator.lower())) 875 | resp = self._do_requests_delete(url) 876 | return resp.status_code in [200, 201, 202, 203, 204] 877 | 878 | def delete_repository(self, user, repository): 879 | """ 880 | 881 | Args: 882 | user: 883 | repository: 884 | 885 | Returns: 886 | boolean: returns true if successful delete call 887 | 888 | """ 889 | user = user_cleaner(user) 890 | url = self._api_url('repositories/{}/{}'.format(user, repository)) 891 | resp = self._do_requests_delete(url) 892 | # print_response(resp) 893 | return resp.status_code in [200, 201, 202, 203, 204] 894 | 895 | def delete_webhook(self, user, repository, webhook_id): 896 | """ 897 | 898 | Args: 899 | user: 900 | repository: 901 | webhook_id: 902 | 903 | Returns: 904 | boolean: returns true if successful delete call 905 | 906 | """ 907 | user = user_cleaner(user) 908 | url = self._api_url('repositories/{}/{}/webhooks/{}'.format(user, repository, webhook_id)) 909 | resp = self._do_requests_delete(url) 910 | # print_response(resp) 911 | return resp.status_code in [200, 201, 202, 203, 204] 912 | 913 | def registry_settings(self): 914 | """ 915 | 916 | Returns: 917 | 918 | """ 919 | url = self._api_url('users/{}/registry-settings'.format(self.username)) 920 | return self._do_requests_get(url).json() 921 | 922 | def set_build_tag(self, user, repository, build_id, details): 923 | """ 924 | 925 | Args: 926 | user: 927 | repository: 928 | build_id: 929 | details: 930 | 931 | Returns: 932 | 933 | """ 934 | user = user_cleaner(user) 935 | url = self._api_url('repositories/{}/{}/autobuild/tags/{}'.format(user, repository, build_id)) 936 | data = { 937 | 'id': build_id, 938 | 'name': 'latest', 939 | 'dockerfile_location': '/', 940 | 'source_type': 'Branch', 941 | 'source_name': 'master' 942 | } 943 | data.update(details) 944 | return self._do_requests_put(url, details).json() 945 | 946 | def set_repository_description(self, user, repository, descriptions): 947 | """ 948 | 949 | Args: 950 | user: 951 | repository: 952 | descriptions: 953 | 954 | Returns: 955 | 956 | """ 957 | user = user_cleaner(user) 958 | url = self._api_url('repositories/{}/{}'.format(user, repository)) 959 | data = {} 960 | if 'full' in descriptions: 961 | data['full_description'] = descriptions['full'] 962 | if 'short' in descriptions: 963 | data['description'] = descriptions['short'] 964 | if not data: 965 | raise ValueError("Need either 'short' or 'full' description specified") 966 | 967 | return self._do_requests_patch(url, data).json() 968 | 969 | def star_repository(self, user, repository): 970 | """ 971 | 972 | Args: 973 | user: 974 | repository: 975 | 976 | Returns: 977 | boolean: returns true if successful 978 | 979 | """ 980 | user = user_cleaner(user) 981 | url = self._api_url('repositories/{}/{}/stars'.format(user, repository)) 982 | resp = self._do_requests_post(url, {}) 983 | # print_response(resp) 984 | return resp.status_code in [200, 201, 202, 203, 204] 985 | 986 | def unstar_repository(self, user, repository): 987 | """ 988 | 989 | Args: 990 | user: 991 | repository: 992 | 993 | Returns: 994 | boolean: returns true if successful 995 | 996 | """ 997 | user = user_cleaner(user) 998 | url = self._api_url('repositories/{}/{}/stars'.format(user, repository)) 999 | resp = self._do_requests_delete(url) 1000 | # print_response(resp) 1001 | return resp.status_code in [200, 201, 202, 203, 204] 1002 | 1003 | def trigger_build(self, user, repository, details): 1004 | """ 1005 | 1006 | Args: 1007 | user: 1008 | repository: 1009 | details: 1010 | 1011 | Returns: 1012 | 1013 | """ 1014 | user = user_cleaner(user) 1015 | url = self._api_url('repositories/{}/{}/autobuild/trigger-build'.format(user, repository)) 1016 | data = { 1017 | 'dockerfile_location': '/', 1018 | 'source_type': 'Branch', 1019 | 'source_name': 'master' 1020 | } 1021 | data.update(details) 1022 | return self._do_requests_post(url, data).json() 1023 | 1024 | def webhooks(self, user, repository, **kwargs): 1025 | """ 1026 | 1027 | Args: 1028 | user: 1029 | repository: 1030 | **kwargs: 1031 | 1032 | Returns: 1033 | 1034 | """ 1035 | user = user_cleaner(user) 1036 | url = self._api_url('repositories/{}/{}/webhooks'.format(user, repository)) 1037 | return self._iter_requests_get(url, **kwargs) 1038 | 1039 | 1040 | if __name__ == '__main__': 1041 | pass 1042 | 1043 | __all__ = ["DockerHub", "DockerHubAuth", "AuthenticationError", "ConnectionError", "TimeoutError"] 1044 | -------------------------------------------------------------------------------- /osg-wn-nightly-build: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import sys 4 | import json 5 | import urllib 6 | import urllib2 7 | import argparse 8 | import urlparse 9 | 10 | if '/usr/libexec/singularity/python' not in sys.path: 11 | sys.path.append('/usr/libexec/singularity/python') 12 | 13 | import docker.api 14 | 15 | def main(): 16 | parser = argparse.ArgumentParser(description="Tool for triggering automated builds in Docker") 17 | parser.add_argument("-t", "--tokenfile", help="Path to file containing DockerHub security token", dest="tokenfile", required=True) 18 | 19 | args = parser.parse_args() 20 | 21 | with open(args.tokenfile, "r") as fp: 22 | token = fp.read().strip() 23 | 24 | all_tags = [i for i in docker.api.get_tags(namespace="opensciencegrid", repo_name="osg-wn") if (('testing' in i) or ('devel' in i))] 25 | 26 | token_escaped = urllib.quote(token) 27 | url = urlparse.urljoin("https://registry.hub.docker.com/u/opensciencegrid/osg-wn/trigger/", token_escaped) + "/" 28 | 29 | #handler=urllib2.HTTPSHandler(debuglevel=1) 30 | #opener = urllib2.build_opener(handler) 31 | #urllib2.install_opener(opener) 32 | 33 | for tag in all_tags: 34 | print "Requesting rebuild of tag", tag 35 | request_data = json.dumps({"docker_tag": tag}) 36 | req = urllib2.Request(url, request_data, {"Content-Type": "application/json", "Content-Length": len(request_data)}) 37 | f = urllib2.urlopen(req) 38 | print f.read() 39 | f.close() 40 | 41 | if __name__ == '__main__': 42 | main() 43 | 44 | -------------------------------------------------------------------------------- /osg-wn-nightly-build.service: -------------------------------------------------------------------------------- 1 | 2 | [Unit] 3 | Description=osg-wn DockerHub nightly build 4 | After=network.target 5 | Wants=network.target 6 | 7 | [Service] 8 | ExecStart=/home/cse496/bbockelm/cvmfs-singularity-sync/osg-wn-nightly-build --tokenfile /home/cse496/bbockelm/cvmfs-singularity-sync/tokenfile 9 | WorkingDirectory=/home/cse496/bbockelm/cvmfs-singularity-sync 10 | RestartSec=24hr 11 | Restart=always 12 | User=bbockelm 13 | Nice=19 14 | LimitNOFILE=65536 15 | 16 | # Make sure python sends its stderr immediately. 17 | Environment=PYTHONUNBUFFERED=1 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | 22 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | docker==2.0.0 2 | furl 3 | requests 4 | sqlitedict 5 | --------------------------------------------------------------------------------