├── .git-crypt ├── .gitattributes └── keys │ └── default │ └── 0 │ ├── 3673DF529D9049477F76B37566E3C7DC03D6E495.gpg │ └── 4A9C0AACCB3DADF3CD7205DD6DD1340E11D9E00F.gpg ├── .gitattributes ├── .gitignore ├── Makefile ├── README.md ├── buildbot ├── .env.buildbot ├── .env.buildog ├── .env.generic ├── .gitignore ├── Makefile ├── buildbot │ ├── Dockerfile │ ├── aws_credentials │ ├── buildbot_secret.py │ ├── entrypoint.sh │ ├── julia.gpg │ ├── sign_tarball.sh │ └── try_thrice ├── docker-compose.yml └── frontend │ ├── Dockerfile │ ├── buildbot_frontend.conf │ └── localhost_workaround.conf ├── buildworker ├── Dockerfile.template ├── Makefile ├── README.md ├── docker-compose.template.yml ├── rr_profile.json ├── secret.env └── start_worker.sh ├── common.mk ├── crossbuild ├── Makefile ├── README.md ├── cmake_toolchains │ ├── aarch64-linux-gnu.toolchain │ ├── aarch64-linux-musl.toolchain │ ├── arm-linux-gnueabihf.toolchain │ ├── arm-linux-musleabihf.toolchain │ ├── i686-linux-gnu.toolchain │ ├── i686-linux-musl.toolchain │ ├── i686-w64-mingw32.toolchain │ ├── powerpc64le-linux-gnu.toolchain │ ├── x86_64-apple-darwin14.toolchain │ ├── x86_64-linux-gnu.toolchain │ ├── x86_64-linux-musl.toolchain │ ├── x86_64-unknown-freebsd11.1.toolchain │ └── x86_64-w64-mingw32.toolchain ├── crossbase-x64.Dockerfile ├── crossshard-aarch64-linux-gnu-x64.Dockerfile ├── crossshard-aarch64-linux-musl-x64.Dockerfile ├── crossshard-arm-linux-gnueabihf-x64.Dockerfile ├── crossshard-arm-linux-musleabihf-x64.Dockerfile ├── crossshard-i686-linux-gnu-x64.Dockerfile ├── crossshard-i686-linux-musl-x64.Dockerfile ├── crossshard-i686-w64-mingw32-x64.Dockerfile ├── crossshard-powerpc64le-linux-gnu-x64.Dockerfile ├── crossshard-x86_64-apple-darwin14-x64.Dockerfile ├── crossshard-x86_64-linux-gnu-x64.Dockerfile ├── crossshard-x86_64-linux-musl-x64.Dockerfile ├── crossshard-x86_64-unknown-freebsd11.1-x64.Dockerfile ├── crossshard-x86_64-w64-mingw32-x64.Dockerfile └── lib ├── dockerchain ├── dockerdeps ├── freebsd └── telegraf_sysctl_probe ├── julia ├── Dockerfile ├── Makefile └── README.md ├── macos └── provision.sh ├── tabularasa ├── Makefile ├── alpine3_8-x86_64.Dockerfile ├── centos6_9-x86_64.Dockerfile ├── debian8-aarch64.Dockerfile ├── debian8-armv7l.Dockerfile ├── debian8_9-i686.Dockerfile └── debian9-ppc64le.Dockerfile ├── telegraf ├── .env ├── Dockerfile ├── Makefile ├── docker-compose.yml └── telegraf.conf ├── windows ├── bootstrap.ps1 └── provision.ps1 └── workerbase ├── Makefile ├── README.md ├── alpine3_8-x86_64.Dockerfile ├── centos6_9-x86_64.Dockerfile ├── debian8-aarch64.Dockerfile ├── debian8-armv7l.Dockerfile ├── debian8_9-i686.Dockerfile ├── debian9-ppc64le.Dockerfile ├── lib ├── alpha.Dockerfile ├── binutils_install.Dockerfile ├── build_tools.Dockerfile ├── builddeps_apk.Dockerfile ├── builddeps_apt.Dockerfile ├── builddeps_yum.Dockerfile ├── ccache_install.Dockerfile ├── cmake_install.Dockerfile ├── crossbuild │ ├── binutils_install.Dockerfile │ ├── build.sh │ ├── cctools_install.Dockerfile │ ├── dsymutil_install.Dockerfile │ ├── freebsd_components_install.Dockerfile │ ├── gcc_bootstrap.Dockerfile │ ├── gcc_download.Dockerfile │ ├── gcc_install.Dockerfile │ ├── glibc_install.Dockerfile │ ├── kernel_headers_install.Dockerfile │ ├── libtapi_install.Dockerfile │ ├── llvm_clang_install.Dockerfile │ ├── llvm_download.Dockerfile │ ├── mingw_stage1.Dockerfile │ ├── mingw_stage2.Dockerfile │ ├── musl_install.Dockerfile │ ├── osx_sdk_install.Dockerfile │ └── version_defaults.Dockerfile ├── docker_install.Dockerfile ├── download_unpack.sh ├── fake_sha512sum.sh ├── fake_uname.sh ├── freebsd_crosscompiler_install.Dockerfile ├── gcc_install.Dockerfile ├── git_install.Dockerfile ├── install_cygwin.ps1 ├── install_msys2.ps1 ├── libtool_install.Dockerfile ├── linux_glibc_crosscompiler_install.Dockerfile ├── linux_musl_crosscompiler_install.Dockerfile ├── multiarch.Dockerfile ├── objconv_install.Dockerfile ├── omega.Dockerfile ├── osx_crosscompiler_install.Dockerfile ├── patchelf_install.Dockerfile ├── python_install.Dockerfile ├── rr_install.Dockerfile ├── super_binutils_install.Dockerfile ├── tar_install.Dockerfile ├── tar_wrapper.sh ├── update_configure_scripts.sh ├── win_crosscompiler_install.Dockerfile └── wine_install.Dockerfile ├── patches ├── cctools_musl.patch ├── cmake_install.patch ├── dsymutil_llvm_dynlib.patch ├── gcc_libmpx_limits.patch ├── glibc-sunrpc.patch ├── glibc_arm_gcc_fix.patch ├── glibc_gcc_version.patch ├── glibc_i686_asm.patch ├── glibc_nocommon.patch ├── glibc_powerpc64le_gcc_fix.patch ├── glibc_regexp_nocommon.patch ├── libtapi_llvm_dynlib.patch ├── llvm_ar_options.patch ├── mingw_gcc710_i686.patch └── wine_nopie.patch └── qemu_register.sh /.git-crypt/.gitattributes: -------------------------------------------------------------------------------- 1 | # Do not edit this file. To specify the files to encrypt, create your own 2 | # .gitattributes file in the directory where your files are. 3 | * !filter !diff 4 | *.gpg binary 5 | -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/3673DF529D9049477F76B37566E3C7DC03D6E495.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/.git-crypt/keys/default/0/3673DF529D9049477F76B37566E3C7DC03D6E495.gpg -------------------------------------------------------------------------------- /.git-crypt/keys/default/0/4A9C0AACCB3DADF3CD7205DD6DD1340E11D9E00F.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/.git-crypt/keys/default/0/4A9C0AACCB3DADF3CD7205DD6DD1340E11D9E00F.gpg -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | buildbot/.env.* filter=git-crypt diff=git-crypt 2 | buildbot/.env.generic !filter !diff 3 | secret.env filter=git-crypt diff=git-crypt 4 | buildbot_secret.py filter=git-crypt diff=git-crypt 5 | julia.gpg filter=git-crypt diff=git-crypt 6 | awssecret filter=git-crypt diff=git-crypt 7 | aws_credentials filter=git-crypt diff=git-crypt 8 | ssh_host_rsa_key filter=git-crypt diff=git-crypt 9 | *.p12 filter=git-crypt diff=git-crypt 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # I use resilio-sync to develop across multiple computers 2 | .sync 3 | Icon? 4 | *.swp 5 | 6 | # This is just a symlink 7 | tabularasa/lib 8 | crossbuild/lib 9 | crossbuild/patches 10 | 11 | # Site-specific overrides 12 | buildworker/override.env 13 | 14 | # I've walked back on this decision 15 | build 16 | 17 | # None of that vscode nonsense 18 | .vscode 19 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | include common.mk 2 | 3 | all: 4 | $(MAKE) -C workerbase 5 | $(MAKE) -C tabularasa 6 | $(MAKE) -C buildworker 7 | 8 | clean: 9 | $(MAKE) -C buildworker clean 10 | $(MAKE) -C workerbase clean 11 | $(MAKE) -C tabularasa clean 12 | $(MAKE) -C buildbot clean 13 | 14 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Julia Docker 2 | ============ 3 | 4 | A collection of `Dockerfile`s related to the [Julia language](http://julialang.org) project. Most `Dockerfile`s are generated by the Makefiles held within the top-level directories for each project: 5 | 6 | * `julia`: This project containsa a Makefile that generates the combinatorial explosion of `Dockerfile`s needed to support the `staticfloat/julia` docker images, which contain the generic linux binaries built and hosted [on the main Julia website](https://julialang.org/downloads). These `Dockerfile`s simply download and unpack the binary distributions hosted there across a wide range of versions, allowing for quick and easy testing of your code on any version/architecture by simply specifying the proper image to pull. 7 | 8 | * `buildbot`: This project contains the configuration for a buildbot master instance. Use this to build a new `build.julialang.org`, the configuration for which is held in the [julia-buildbot repository](https://github.com/staticfloat/julia-buildbot). 9 | 10 | * `workerbase`: This project contains the scripts that generate `Dockerfile`s to make the base worker images. These images have cutting-edge versions of `gcc`, `python`, `cmake`, etc... all built and ready to compile cutting-edge Julia programs. These are built across a variety of OS's and hardware architectures, and [get uploaded to the Docker Hub](https://hub.docker.com/search/?isAutomated=0&isOfficial=0&page=1&pullCount=0&q=julia_workerbase&starCount=0). 11 | 12 | * `tabularasa`: This project contains the scripts that generate `Dockerfile`s to make "blank" images that run buildbot worker instances for testing of built julia versions. These are used to ensure that our releases are completely self-contained, and as such do not contain things like `libgfortran` preinstalled within them. 13 | 14 | * `buildworker`: This project contains the scripts that generate `Dockerfile`s to make the final worker docker-compose configurations. These configurations can be used to easily start buildworker instances as services, connecting to the buildbot master instance and building away. 15 | 16 | * `vagrant`: This is a mostly-abandoned attempt to setup [Vagrant](https://www.vagrantup.com) configurations for Windows and MacOS buildbots. Probably horrificly bitrotted, and will be deleted as soon as a more reliable setup is chanced upon. 17 | 18 | * `crossbuild`: The source for the `staticfloat/julia_crossbuild` docker images used by [`BinaryBuilder.jl`](https://github.com/JuliaPackaging/BinaryBuilder.jl) to perform crossbuilds of various binaries for use with [`BinaryProvider.jl`](https://github.com/JuliaPackaging/BinaryProvider.jl). 19 | 20 | In every case, if `Dockerfile`s or `docker-compose.yml` files are generated, they will be placed within a `build/` directory. The aim of this repository is to keep the `build/` directories up to date, so that interested users can download this repository and use the prebuilt `Dockerfile`/`docker-compose.yml` contents to immediately get up and running, without having to bother with the generation steps. 21 | 22 | Sensitive information (such as buildbot authentication passwords, AWS access credentials, etc...) are encrypted within this repository. To decrypt, you'll need a GPG key that @staticfloat and @tkelman guard zealously with their very lives. After obtaining that, download/install [`git-crypt`](https://github.com/AGWA/git-crypt), install `gnupg`, import the secret key, then run `git crypt unlock` within the git repository. This will turn encrypted files such as `buidlworker/secret.env` from a bunch of binary gibberish to a list of environment variables. 23 | -------------------------------------------------------------------------------- /buildbot/.env.buildbot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildbot/.env.buildbot -------------------------------------------------------------------------------- /buildbot/.env.buildog: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildbot/.env.buildog -------------------------------------------------------------------------------- /buildbot/.env.generic: -------------------------------------------------------------------------------- 1 | # This is essentially the site-specific setup. It does not need to be encrypted as it is not used in any production environments. 2 | FQDN=localhost 3 | BUILDBOT_BRANCH=master 4 | DB_USER=buildbot 5 | DB_PASSWORD=password 6 | -------------------------------------------------------------------------------- /buildbot/.gitignore: -------------------------------------------------------------------------------- 1 | # This is auto-generated 2 | .env 3 | -------------------------------------------------------------------------------- /buildbot/Makefile: -------------------------------------------------------------------------------- 1 | # The hostname of build.julialang.org is "buildbot" 2 | # The hostname of buildog.julialang.org is "buildog" 3 | ENV_TEMPLATE=.env.$(HOSTNAME) 4 | ifneq ($(shell [ -f $(ENV_TEMPLATE) ] && echo "exists"),exists) 5 | ENV_TEMPLATE=.env.generic 6 | endif 7 | 8 | .env: $(ENV_TEMPLATE) 9 | @echo "Auto-generating .env from $<..." 10 | @echo "## This file autogeneated from $<" > "$@" 11 | @cat "$<" >> "$@" 12 | 13 | deploy: .env 14 | docker-compose build --pull && \ 15 | docker-compose up -d 16 | 17 | shell: 18 | docker-compose exec buildbot bash 19 | 20 | down: 21 | docker-compose down --remove-orphans 22 | 23 | destroy: 24 | docker-compose down --remove-orphans -v 25 | 26 | logs: 27 | docker-compose logs -f --tail=100 28 | 29 | run_localhost: 30 | docker-compose build --pull && \ 31 | docker-compose run -p 9899:9899 -p 8010:8010 buildbot 32 | 33 | clean: down 34 | -------------------------------------------------------------------------------- /buildbot/buildbot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | RUN mkdir -p /buildbot 4 | WORKDIR /buildbot 5 | 6 | # First, grab builddeps and buildbot itself 7 | RUN apt update && apt install -y git libsqlite3-dev libssl-dev 8 | RUN pip install --upgrade pip 9 | RUN pip install psycopg2 requests ipython docker awscli txrequests urllib3[secure] mock buildbot[tls,bundle] buildbot-badges 10 | #RUN git clone https://github.com/iblis17/buildbot -b ib/reporter-verbose /tmp/buildbot_build 11 | #RUN pip install /tmp/buildbot_build/master 12 | #RUN rm -rf /tmp/buildbot_build 13 | RUN pip install buildbot_profiler git+https://github.com/iblis17/buildbot-freebsd.git 14 | 15 | # Clone/configure buildbot configuration 16 | # the ADD command is to rebuild the rest of the image from here on out 17 | # without caching in case the remote repo has changed) 18 | ARG BUILDBOT_BRANCH 19 | ADD https://api.github.com/repos/staticfloat/julia-buildbot/git/refs/heads/${BUILDBOT_BRANCH} /julia-buildbot_version.json 20 | RUN git clone -b ${BUILDBOT_BRANCH} https://github.com/staticfloat/julia-buildbot.git . 21 | RUN buildbot create-master master 22 | 23 | # Install secret files (Note, you must have unlocked this repo, as these are all 24 | # encrypted, and failing to do so will give strange unicode errors!) 25 | ARG db_user 26 | ARG db_password 27 | ARG GITHUB_OAUTH_CLIENT_ID 28 | ARG GITHUB_OAUTH_CLIENT_SECRET 29 | ARG GITHUB_STATUS_OAUTH_TOKEN 30 | ARG SLACK_FAILURE_REPORT_SLUG 31 | ARG FREEBSDCI_OAUTH_TOKEN 32 | ARG MACOS_CODESIGN_IDENTITY 33 | ARG FQDN 34 | COPY buildbot_secret.py /buildbot/master 35 | RUN echo "db_user='${db_user}'" >> /buildbot/master/buildbot_secret.py 36 | RUN echo "db_password='${db_password}'" >> /buildbot/master/buildbot_secret.py 37 | RUN echo "FQDN='${FQDN}'" >> /buildbot/master/buildbot_secret.py 38 | RUN echo "BUILDBOT_BRANCH='${BUILDBOT_BRANCH}'" >> /buildbot/master/buildbot_secret.py 39 | RUN echo "GITHUB_OAUTH_CLIENT_ID='${GITHUB_OAUTH_CLIENT_ID}'" >> /buildbot/master/buildbot_secret.py 40 | RUN echo "GITHUB_OAUTH_CLIENT_SECRET='${GITHUB_OAUTH_CLIENT_SECRET}'" >> /buildbot/master/buildbot_secret.py 41 | RUN echo "GITHUB_STATUS_OAUTH_TOKEN='${GITHUB_STATUS_OAUTH_TOKEN}'" >> /buildbot/master/buildbot_secret.py 42 | RUN echo "FREEBSDCI_OAUTH_TOKEN='${FREEBSDCI_OAUTH_TOKEN}'" >> /buildbot/master/buildbot_secret.py 43 | RUN echo "MACOS_CODESIGN_IDENTITY='${MACOS_CODESIGN_IDENTITY}'" >> /buildbot/master/buildbot_secret.py 44 | RUN echo "SLACK_FAILURE_REPORT_SLUG='${SLACK_FAILURE_REPORT_SLUG}'" >> /buildbot/master/buildbot_secret.py 45 | 46 | COPY julia.gpg sign_tarball.sh /root/ 47 | RUN chmod +x /root/sign_tarball.sh 48 | RUN mkdir -p /root/.aws 49 | COPY aws_credentials /root/.aws/credentials 50 | RUN chmod 0600 /root/.aws/credentials 51 | RUN mkdir -p /root/.gnupg 52 | RUN gpg --batch --import /root/julia.gpg 53 | COPY try_thrice /root/bin/try_thrice 54 | 55 | # GPG needs a tty, apparently 56 | ENV GPG_TTY=/dev/pts/0 57 | 58 | # Install entrypoint and run it! 59 | COPY entrypoint.sh /entrypoint.sh 60 | ENTRYPOINT ["/bin/sh"] 61 | CMD ["/entrypoint.sh"] 62 | -------------------------------------------------------------------------------- /buildbot/buildbot/aws_credentials: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildbot/buildbot/aws_credentials -------------------------------------------------------------------------------- /buildbot/buildbot/buildbot_secret.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildbot/buildbot/buildbot_secret.py -------------------------------------------------------------------------------- /buildbot/buildbot/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Helper function to watch logfiles once they are created 3 | watch_the_log() 4 | { 5 | while [ ! -f "$1" ]; do 6 | sleep 1; 7 | done 8 | tail -f "$1" 2>/dev/null 9 | } 10 | # Start a log watcher in the background for twistd.log 11 | watch_the_log /buildbot/master/twistd.log & 12 | 13 | # Start our buildbot! 14 | cd /buildbot/master 15 | 16 | # We're in docker, we don't care about twistd.pid 17 | rm -f twistd.pid 18 | buildbot upgrade-master 19 | exec twistd -ny buildbot.tac 20 | -------------------------------------------------------------------------------- /buildbot/buildbot/julia.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildbot/buildbot/julia.gpg -------------------------------------------------------------------------------- /buildbot/buildbot/sign_tarball.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpg -u julia --armor --detach-sig --batch --yes "$1" 4 | -------------------------------------------------------------------------------- /buildbot/buildbot/try_thrice: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if ! "$@"; then 4 | echo "Failed once, trying again..." 1>&2 5 | if ! "$@"; then 6 | echo "Failed twice, trying once more..." 1>&2 7 | "$@" 8 | fi 9 | fi 10 | -------------------------------------------------------------------------------- /buildbot/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | db: 5 | restart: unless-stopped 6 | image: postgres 7 | container_name: db 8 | expose: 9 | - 5432 10 | environment: 11 | - POSTGRES_USER=${DB_USER} 12 | - POSTGRES_PASSWORD=${DB_PASSWORD} 13 | volumes: 14 | - db_data:/var/lib/postgresql/data 15 | buildbot: 16 | restart: unless-stopped 17 | depends_on: 18 | - db 19 | build: 20 | context: buildbot 21 | args: 22 | - db_user=${DB_USER} 23 | - db_password=${DB_PASSWORD} 24 | - BUILDBOT_BRANCH=${BUILDBOT_BRANCH} 25 | - GITHUB_OAUTH_CLIENT_ID=${GITHUB_OAUTH_CLIENT_ID} 26 | - GITHUB_OAUTH_CLIENT_SECRET=${GITHUB_OAUTH_CLIENT_SECRET} 27 | - GITHUB_STATUS_OAUTH_TOKEN=${GITHUB_STATUS_OAUTH_TOKEN} 28 | - FREEBSDCI_OAUTH_TOKEN=${FREEBSDCI_OAUTH_TOKEN} 29 | - MACOS_CODESIGN_IDENTITY=${MACOS_CODESIGN_IDENTITY} 30 | - SLACK_FAILURE_REPORT_SLUG=${SLACK_FAILURE_REPORT_SLUG} 31 | - FQDN=${FQDN} 32 | container_name: buildbot 33 | expose: 34 | - 8010 35 | ports: 36 | - 9989:9989/tcp 37 | volumes: 38 | - /var/run/docker.sock:/var/run/docker.sock 39 | frontend: 40 | restart: unless-stopped 41 | depends_on: 42 | - buildbot 43 | build: 44 | context: frontend 45 | args: 46 | - fqdn=${FQDN} 47 | environment: 48 | - CERTBOT_EMAIL=staticfloat@gmail.com 49 | container_name: frontend 50 | ports: 51 | - 80:80/tcp 52 | - 443:443/tcp 53 | volumes: 54 | db_data: 55 | -------------------------------------------------------------------------------- /buildbot/frontend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM jonasal/nginx-certbot 2 | 3 | ARG fqdn 4 | 5 | # Deploy nginx configuration, templated properly for this server's name/SSL cert paths 6 | COPY buildbot_frontend.conf /etc/nginx/user_conf.d/${fqdn}.conf 7 | COPY localhost_workaround.conf /etc/nginx/user_conf.d/localhost_workaround.conf 8 | RUN sed -i.bak -e "s&{fqdn}&${fqdn}&g" /etc/nginx/user_conf.d/${fqdn}.conf 9 | RUN rm -f /etc/nginx/user_conf.d/*.bak 10 | -------------------------------------------------------------------------------- /buildbot/frontend/buildbot_frontend.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 443 ssl http2; 3 | server_name {fqdn}; 4 | ssl_certificate /etc/letsencrypt/live/{fqdn}/fullchain.pem; 5 | ssl_certificate_key /etc/letsencrypt/live/{fqdn}/privkey.pem; 6 | 7 | # Send everything off to :8010, which is our buildbot server 8 | location / { 9 | proxy_pass http://buildbot:8010; 10 | } 11 | 12 | location /sse { 13 | proxy_buffering off; 14 | proxy_pass http://buildbot:8010/sse/; 15 | } 16 | 17 | location /ws { 18 | proxy_http_version 1.1; 19 | proxy_set_header Upgrade $http_upgrade; 20 | proxy_set_header Connection "upgrade"; 21 | proxy_pass http://buildbot:8010/ws; 22 | proxy_read_timeout 6000s; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /buildbot/frontend/localhost_workaround.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | # Send everything off to :8010, which is our buildbot server 6 | location / { 7 | proxy_pass http://buildbot:8010; 8 | } 9 | 10 | location /sse { 11 | proxy_buffering off; 12 | proxy_pass http://buildbot:8010/sse/; 13 | } 14 | 15 | location /ws { 16 | proxy_http_version 1.1; 17 | proxy_set_header Upgrade $http_upgrade; 18 | proxy_set_header Connection "upgrade"; 19 | proxy_pass http://buildbot:8010/ws; 20 | proxy_read_timeout 6000s; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /buildworker/Dockerfile.template: -------------------------------------------------------------------------------- 1 | USER root 2 | RUN mkdir -p /buildworker 3 | RUN chown buildworker:buildworker /buildworker 4 | WORKDIR /buildworker 5 | 6 | # These arguments are passed in from `docker-compose` 7 | ARG L32 8 | 9 | # First, install the buildbot-worker package (no need for virtualenv because 10 | # we're within a Docker container!), and cleanup pip cache once we're done 11 | RUN $L32 pip install buildbot-worker 12 | RUN rm -rf /root/.cache 13 | 14 | # Install our startup script 15 | COPY start_worker.sh /buildworker/start_worker.sh 16 | RUN chmod +x /buildworker/start_worker.sh 17 | CMD ["/buildworker/start_worker.sh"] 18 | 19 | # Switch back to our buildworker user, and configure the buildbot-worker! 20 | ARG buildbot_server 21 | ARG buildbot_port 22 | ARG buildbot_password 23 | ARG buildworker_name 24 | USER buildworker 25 | RUN $L32 buildbot-worker create-worker --keepalive=100 --umask 0o022 worker $buildbot_server:$buildbot_port $buildworker_name $buildbot_password 26 | RUN echo "Elliot Saba " > worker/info/admin 27 | RUN echo "Julia $buildworker_name buildworker" > worker/info/host 28 | 29 | -------------------------------------------------------------------------------- /buildworker/Makefile: -------------------------------------------------------------------------------- 1 | include ../common.mk 2 | 3 | buildall: 4 | 5 | # Build mapping from buildbot name to worker base image 6 | BOTNAMES= 7 | 8 | # Add x86_64 botnames 9 | BOTNAMES += linux-x86_64-nanosoldier1_1 linux-x86_64-nanosoldier2_1 linux-x86_64-nanosoldier2_2 linux-x86_64-nanosoldier4_1 linux-x86_64-nanosoldier4_2 10 | BOTNAMES += linux-x86_64-amdci6_1 linux-x86_64-amdci6_2 linux-x86_64-amdci6_3 11 | BOTNAMES += musl-x86_64-openstack1 12 | # Add i686 botnames 13 | BOTNAMES += linux-i686-nanosoldier1_1 linux-i686-nanosoldier3_1 linux-i686-nanosoldier3_2 14 | # Add ppc64le botnames 15 | BOTNAMES += linux-ppc64le-osu_1 linux-ppc64le-osu_2 linux-ppc64le-osu_3 linux-ppc64le-osu_4 16 | # Add aarch64 botnames 17 | BOTNAMES += linux-aarch64-aws_1 linux-aarch64-aws_2 linux-aarch64-aws_3 linux-aarch64-aws_4 18 | BOTNAMES += linux-aarch64-aws_5 linux-aarch64-aws_6 linux-aarch64-aws_7 linux-aarch64-aws_8 19 | # Add armv7l botnames 20 | BOTNAMES += linux-armv7l-firefly_1 linux-armv7l-firefly_2 linux-armv7l-firefly_3 linux-armv7l-rock64_1 21 | 22 | # Extract the arch from a botname 23 | define bot_arch 24 | $(strip $(basename $(subst -,.,$(patsubst linux-%,%,$(1))))) 25 | endef 26 | 27 | # Build mapping function from botname to worker base image 28 | define bot_image 29 | $(strip $(call arch_filt,$(call bot_arch,$(1)),$(HFS))) 30 | endef 31 | 32 | # Build "buildall" target that builds the docker-compose directories for each bot 33 | $(foreach b,$(BOTNAMES),$(eval $(call add_dep,buildall,build-$(b)))) 34 | 35 | # Build "deployall" target that brings up all images that are compatible with our arch 36 | BUILD_BOTNAMES=$(call arch_filt,$(BUILD_ARCHS),$(BOTNAMES)) 37 | $(foreach b,$(BUILD_BOTNAMES),$(eval $(call add_dep,deployall,deploy-$(b)))) 38 | 39 | # Build "downall" target that takes down all workers that are compatible with our arch 40 | $(foreach b,$(BUILD_BOTNAMES),$(eval $(call add_dep,downall,down-$(b)))) 41 | 42 | 43 | # Dummy rule for if we don't have one 44 | override.env: 45 | touch $@ 46 | 47 | # Here's where we take our templates and build docker-compose files out of them 48 | define build_dockercompose 49 | build-$(1): build/$(1)/docker-compose.yml build/$(1)/worker/Dockerfile build/$(1)/tabularasa/Dockerfile 50 | 51 | dockerdeps-$(1): $(shell ../dockerdeps ../workerbase/$(call bot_image,$(1)).Dockerfile) 52 | 53 | build/$(1)/tabularasa/Dockerfile: Dockerfile.template start_worker.sh dockerdeps-$(1) 54 | @echo $(1)/tabularasa 55 | @# Build tabularasa directory 56 | @mkdir -p "build/$(1)/tabularasa" 57 | @echo "## This file was autogenerated" > "build/$(1)/tabularasa/Dockerfile" 58 | @echo "# Do not edit directly; edit the template files" >> "build/$(1)/tabularasa/Dockerfile" 59 | @echo "FROM $(call tabularasa_tag_name,$(call bot_image,$(1)))" >> "build/$(1)/tabularasa/Dockerfile" 60 | @cat Dockerfile.template >> "build/$(1)/tabularasa/Dockerfile" 61 | @cp ./start_worker.sh "build/$(1)/tabularasa/start_worker.sh" 62 | @cp ./rr_profile.json "build/$(1)/tabularasa/rr_profile.json" 63 | 64 | build/$(1)/worker/Dockerfile: Dockerfile.template start_worker.sh dockerdeps-$(1) 65 | @echo $(1)/worker 66 | @# Build worker directory 67 | @mkdir -p "build/$(1)/worker" 68 | @echo "## This file was autogenerated" > "build/$(1)/worker/Dockerfile" 69 | @echo "# Do not edit directly; edit the template files" >> "build/$(1)/worker/Dockerfile" 70 | @echo "FROM $(call worker_tag_name,$(call bot_image,$(1)))" >> "build/$(1)/worker/Dockerfile" 71 | @cat Dockerfile.template >> "build/$(1)/worker/Dockerfile" 72 | @cp ./start_worker.sh "build/$(1)/worker/start_worker.sh" 73 | 74 | build/$(1)/docker-compose.yml: docker-compose.template.yml override.env secret.env dockerdeps-$(1) 75 | @echo $(1) 76 | @mkdir -p "build/$(1)" 77 | @echo "## Autogenerated from secret.env and override.env" > "build/$(1)/.env" 78 | @cat secret.env >> "build/$(1)/.env" 79 | @[ ! -f override.env ] || cat override.env >> "build/$(1)/.env" 80 | @sed -e "s/{service_name}/$(1)/g" docker-compose.template.yml > "build/$(1)/docker-compose.yml" 81 | @sed -i.bak -e "s&{home}&$(HOME)&g" "build/$(1)/docker-compose.yml" 82 | @case $(call worker_tag_name,$(1)) in \ 83 | *x86) sed -i.bak -e "s/{linux32}/linux32/g" "build/$(1)/docker-compose.yml";; \ 84 | *) sed -i.bak -e "s/{linux32}/ /g" "build/$(1)/docker-compose.yml";; \ 85 | esac 86 | @rm -f build/$(1)/*.bak 87 | 88 | 89 | pull-$(1): 90 | docker pull $(call worker_tag_name,$(1)) 91 | 92 | deploy-$(1): build-$(1) ipv6_internal ccache_volume srccache_volume 93 | @# Build our new worker, take down the old one and bring the new one up 94 | @cd build/$(1); \ 95 | docker-compose build --pull; \ 96 | COMPOSE_HTTP_TIMEOUT=300 docker-compose up -d 97 | 98 | down-$(1): 99 | @if [ -d build/$(1) ]; then \ 100 | cd build/$(1); \ 101 | docker-compose down --remove-orphans; \ 102 | fi 103 | endef 104 | 105 | # Makefile target to make our internal ipv6 network 106 | ipv6_internal: 107 | @if [ -z "$(shell docker network ls | grep ipv6_internal)" ]; then \ 108 | echo "Creating internal ipv6 docker network..."; \ 109 | docker network create --internal --ipv6 --subnet "fdeb:feed:face::/48" ipv6_internal; \ 110 | fi 111 | 112 | # Call build_dockercompose on each and every bot 113 | $(foreach b,$(BOTNAMES),$(eval $(call build_dockercompose,$(b)))) 114 | 115 | # Makefile target to make our ccache volume 116 | define init_volume 117 | $(1)_volume: 118 | @if [ -z "$$(shell docker volume ls | grep $(1))" ]; then \ 119 | echo -n "Creating $(1) docker volume..."; \ 120 | docker volume create --name=$(1); \ 121 | docker run -ti -v $(1):/$(1) alpine /bin/sh -c "chown 1337:1337 /$(1)"; \ 122 | fi 123 | endef 124 | 125 | # Instantiate our volume initialization targets 126 | $(eval $(call init_volume,ccache)) 127 | $(eval $(call init_volume,srccache)) 128 | 129 | 130 | clean: 131 | rm -rf build 132 | -------------------------------------------------------------------------------- /buildworker/README.md: -------------------------------------------------------------------------------- 1 | buildworker docker images 2 | ========================= 3 | 4 | This repository auto-generates a bunch of [docker-compose](https://docs.docker.com/compose/) configurations to run the julia buildworker instances on top of the worker base images generated in the `workerbase` directory in the root of this repository. `docker-compose` is used as a convenient way to run docker images with some slight modification (E.g. the installation of the `buildbot-worker` python application, the configuration of the image with sensitive information such as the buildbot authentication password). 5 | 6 | The configuration is contained within the two template files sitting in this directory, the templates are generated using the `Makefile`, which auto-generates a template for each worker base image defined in the `workerbase` directory in the root of this repository. 7 | 8 | As usual, after building the configurations with `make`, all will be contained within the `build` directory. To run a buildworker, simply enter the directory and run `docker-compose up --build`. 9 | -------------------------------------------------------------------------------- /buildworker/docker-compose.template.yml: -------------------------------------------------------------------------------- 1 | # This template has variables that are substituted in when it is installed into 2 | # the `build` directory by our Makefile. The variables are surrounded by curly 3 | # braces and substituted through simple `sed` commands. 4 | version: '2.1' 5 | services: 6 | {service_name}: 7 | restart: unless-stopped 8 | build: 9 | context: worker 10 | args: 11 | # These first three arguments are secret, but shared across all workers, 12 | # so they're stored in the encrypted `secret.env` and transparently 13 | # included by symlinking `secret.env` into each build directory as the 14 | # special file `.env`, which is always transparently included by the 15 | # docker-compose build process 16 | - buildbot_server 17 | - buildbot_port 18 | - buildbot_password 19 | # These two arguments are per-worker deals, and are substituted in by 20 | # our make-shift make templating chops 21 | - buildworker_name={service_name} 22 | - L32={linux32} 23 | pids_limit: 1000 24 | networks: 25 | - default 26 | - ipv6_internal 27 | volumes: 28 | - srccache:/srccache 29 | - ccache:/home/buildworker/.ccache 30 | - /var/run/docker.sock:/var/run/docker.sock 31 | - {home}/.docker:/home/buildworker/.docker 32 | # Copy of above, but we don't build julia within this worker. This is a 33 | # second buildworker instance that is used to test the output of the first 34 | # but without the benefit of the build tree; to ensure it works cleanly. 35 | tabularasa-{service_name}: 36 | restart: unless-stopped 37 | build: 38 | context: tabularasa 39 | args: 40 | - buildbot_server 41 | - buildbot_port 42 | - buildbot_password 43 | - buildworker_name=tabularasa_{service_name} 44 | - L32={linux32} 45 | security_opt: 46 | - seccomp=./tabularasa/rr_profile.json 47 | cap_add: 48 | - SYS_PTRACE 49 | pids_limit: 1000 50 | networks: 51 | - default 52 | - ipv6_internal 53 | # By specifying manual `dns` entries, we disable using Docker's 54 | # builtin DNS resolution algorithms, which apparently have problems 55 | # with doing `getnameinfo("0.1.1.1")`, resulting in `EAI_AGAIN` 56 | # when it should really return a resolution failure. 57 | dns: 58 | - 8.8.8.8 59 | - 1.1.1.1 60 | - 8.8.4.4 61 | - 4.4.4.4 62 | volumes: 63 | - /var/run/docker.sock:/var/run/docker.sock 64 | networks: 65 | ipv6_internal: 66 | external: 67 | name: ipv6_internal 68 | volumes: 69 | ccache: 70 | external: true 71 | srccache: 72 | external: true 73 | -------------------------------------------------------------------------------- /buildworker/rr_profile.json: -------------------------------------------------------------------------------- 1 | { 2 | "defaultAction": "SCMP_ACT_ERRNO", 3 | "archMap": [ 4 | { 5 | "architecture": "SCMP_ARCH_X86_64", 6 | "subArchitectures": [ 7 | "SCMP_ARCH_X86", 8 | "SCMP_ARCH_X32" 9 | ] 10 | }, 11 | { 12 | "architecture": "SCMP_ARCH_AARCH64", 13 | "subArchitectures": [ 14 | "SCMP_ARCH_ARM" 15 | ] 16 | }, 17 | { 18 | "architecture": "SCMP_ARCH_MIPS64", 19 | "subArchitectures": [ 20 | "SCMP_ARCH_MIPS", 21 | "SCMP_ARCH_MIPS64N32" 22 | ] 23 | }, 24 | { 25 | "architecture": "SCMP_ARCH_MIPS64N32", 26 | "subArchitectures": [ 27 | "SCMP_ARCH_MIPS", 28 | "SCMP_ARCH_MIPS64" 29 | ] 30 | }, 31 | { 32 | "architecture": "SCMP_ARCH_MIPSEL64", 33 | "subArchitectures": [ 34 | "SCMP_ARCH_MIPSEL", 35 | "SCMP_ARCH_MIPSEL64N32" 36 | ] 37 | }, 38 | { 39 | "architecture": "SCMP_ARCH_MIPSEL64N32", 40 | "subArchitectures": [ 41 | "SCMP_ARCH_MIPSEL", 42 | "SCMP_ARCH_MIPSEL64" 43 | ] 44 | }, 45 | { 46 | "architecture": "SCMP_ARCH_S390X", 47 | "subArchitectures": [ 48 | "SCMP_ARCH_S390" 49 | ] 50 | } 51 | ], 52 | "syscalls": [ 53 | { 54 | "names": [ 55 | "accept", 56 | "accept4", 57 | "access", 58 | "adjtimex", 59 | "alarm", 60 | "bind", 61 | "brk", 62 | "capget", 63 | "capset", 64 | "chdir", 65 | "chmod", 66 | "chown", 67 | "chown32", 68 | "clock_getres", 69 | "clock_getres_time64", 70 | "clock_gettime", 71 | "clock_gettime64", 72 | "clock_nanosleep", 73 | "clock_nanosleep_time64", 74 | "close", 75 | "connect", 76 | "copy_file_range", 77 | "creat", 78 | "dup", 79 | "dup2", 80 | "dup3", 81 | "epoll_create", 82 | "epoll_create1", 83 | "epoll_ctl", 84 | "epoll_ctl_old", 85 | "epoll_pwait", 86 | "epoll_wait", 87 | "epoll_wait_old", 88 | "eventfd", 89 | "eventfd2", 90 | "execve", 91 | "execveat", 92 | "exit", 93 | "exit_group", 94 | "faccessat", 95 | "fadvise64", 96 | "fadvise64_64", 97 | "fallocate", 98 | "fanotify_mark", 99 | "fchdir", 100 | "fchmod", 101 | "fchmodat", 102 | "fchown", 103 | "fchown32", 104 | "fchownat", 105 | "fcntl", 106 | "fcntl64", 107 | "fdatasync", 108 | "fgetxattr", 109 | "flistxattr", 110 | "flock", 111 | "fork", 112 | "fremovexattr", 113 | "fsetxattr", 114 | "fstat", 115 | "fstat64", 116 | "fstatat64", 117 | "fstatfs", 118 | "fstatfs64", 119 | "fsync", 120 | "ftruncate", 121 | "ftruncate64", 122 | "futex", 123 | "futex_time64", 124 | "futimesat", 125 | "getcpu", 126 | "getcwd", 127 | "getdents", 128 | "getdents64", 129 | "getegid", 130 | "getegid32", 131 | "geteuid", 132 | "geteuid32", 133 | "getgid", 134 | "getgid32", 135 | "getgroups", 136 | "getgroups32", 137 | "getitimer", 138 | "getpeername", 139 | "getpgid", 140 | "getpgrp", 141 | "getpid", 142 | "getppid", 143 | "getpriority", 144 | "getrandom", 145 | "getresgid", 146 | "getresgid32", 147 | "getresuid", 148 | "getresuid32", 149 | "getrlimit", 150 | "get_robust_list", 151 | "getrusage", 152 | "getsid", 153 | "getsockname", 154 | "getsockopt", 155 | "get_thread_area", 156 | "gettid", 157 | "gettimeofday", 158 | "getuid", 159 | "getuid32", 160 | "getxattr", 161 | "inotify_add_watch", 162 | "inotify_init", 163 | "inotify_init1", 164 | "inotify_rm_watch", 165 | "io_cancel", 166 | "ioctl", 167 | "io_destroy", 168 | "io_getevents", 169 | "io_pgetevents", 170 | "io_pgetevents_time64", 171 | "ioprio_get", 172 | "ioprio_set", 173 | "io_setup", 174 | "io_submit", 175 | "io_uring_enter", 176 | "io_uring_register", 177 | "io_uring_setup", 178 | "ipc", 179 | "kill", 180 | "lchown", 181 | "lchown32", 182 | "lgetxattr", 183 | "link", 184 | "linkat", 185 | "listen", 186 | "listxattr", 187 | "llistxattr", 188 | "_llseek", 189 | "lremovexattr", 190 | "lseek", 191 | "lsetxattr", 192 | "lstat", 193 | "lstat64", 194 | "madvise", 195 | "memfd_create", 196 | "mincore", 197 | "mkdir", 198 | "mkdirat", 199 | "mknod", 200 | "mknodat", 201 | "mlock", 202 | "mlock2", 203 | "mlockall", 204 | "mmap", 205 | "mmap2", 206 | "mprotect", 207 | "mq_getsetattr", 208 | "mq_notify", 209 | "mq_open", 210 | "mq_timedreceive", 211 | "mq_timedreceive_time64", 212 | "mq_timedsend", 213 | "mq_timedsend_time64", 214 | "mq_unlink", 215 | "mremap", 216 | "msgctl", 217 | "msgget", 218 | "msgrcv", 219 | "msgsnd", 220 | "msync", 221 | "munlock", 222 | "munlockall", 223 | "munmap", 224 | "nanosleep", 225 | "newfstatat", 226 | "_newselect", 227 | "open", 228 | "openat", 229 | "pause", 230 | "pipe", 231 | "pipe2", 232 | "poll", 233 | "ppoll", 234 | "ppoll_time64", 235 | "prctl", 236 | "pread64", 237 | "preadv", 238 | "preadv2", 239 | "prlimit64", 240 | "pselect6", 241 | "pselect6_time64", 242 | "pwrite64", 243 | "pwritev", 244 | "pwritev2", 245 | "read", 246 | "readahead", 247 | "readlink", 248 | "readlinkat", 249 | "readv", 250 | "recv", 251 | "recvfrom", 252 | "recvmmsg", 253 | "recvmmsg_time64", 254 | "recvmsg", 255 | "remap_file_pages", 256 | "removexattr", 257 | "rename", 258 | "renameat", 259 | "renameat2", 260 | "restart_syscall", 261 | "rmdir", 262 | "rt_sigaction", 263 | "rt_sigpending", 264 | "rt_sigprocmask", 265 | "rt_sigqueueinfo", 266 | "rt_sigreturn", 267 | "rt_sigsuspend", 268 | "rt_sigtimedwait", 269 | "rt_sigtimedwait_time64", 270 | "rt_tgsigqueueinfo", 271 | "sched_getaffinity", 272 | "sched_getattr", 273 | "sched_getparam", 274 | "sched_get_priority_max", 275 | "sched_get_priority_min", 276 | "sched_getscheduler", 277 | "sched_rr_get_interval", 278 | "sched_rr_get_interval_time64", 279 | "sched_setaffinity", 280 | "sched_setattr", 281 | "sched_setparam", 282 | "sched_setscheduler", 283 | "sched_yield", 284 | "seccomp", 285 | "select", 286 | "semctl", 287 | "semget", 288 | "semop", 289 | "semtimedop", 290 | "semtimedop_time64", 291 | "send", 292 | "sendfile", 293 | "sendfile64", 294 | "sendmmsg", 295 | "sendmsg", 296 | "sendto", 297 | "setfsgid", 298 | "setfsgid32", 299 | "setfsuid", 300 | "setfsuid32", 301 | "setgid", 302 | "setgid32", 303 | "setgroups", 304 | "setgroups32", 305 | "setitimer", 306 | "setpgid", 307 | "setpriority", 308 | "setregid", 309 | "setregid32", 310 | "setresgid", 311 | "setresgid32", 312 | "setresuid", 313 | "setresuid32", 314 | "setreuid", 315 | "setreuid32", 316 | "setrlimit", 317 | "set_robust_list", 318 | "setsid", 319 | "setsockopt", 320 | "set_thread_area", 321 | "set_tid_address", 322 | "setuid", 323 | "setuid32", 324 | "setxattr", 325 | "shmat", 326 | "shmctl", 327 | "shmdt", 328 | "shmget", 329 | "shutdown", 330 | "sigaltstack", 331 | "signalfd", 332 | "signalfd4", 333 | "sigprocmask", 334 | "sigreturn", 335 | "socket", 336 | "socketcall", 337 | "socketpair", 338 | "splice", 339 | "stat", 340 | "stat64", 341 | "statfs", 342 | "statfs64", 343 | "statx", 344 | "symlink", 345 | "symlinkat", 346 | "sync", 347 | "sync_file_range", 348 | "syncfs", 349 | "sysinfo", 350 | "tee", 351 | "tgkill", 352 | "time", 353 | "timer_create", 354 | "timer_delete", 355 | "timer_getoverrun", 356 | "timer_gettime", 357 | "timer_gettime64", 358 | "timer_settime", 359 | "timer_settime64", 360 | "timerfd_create", 361 | "timerfd_gettime", 362 | "timerfd_gettime64", 363 | "timerfd_settime", 364 | "timerfd_settime64", 365 | "times", 366 | "tkill", 367 | "truncate", 368 | "truncate64", 369 | "ugetrlimit", 370 | "umask", 371 | "uname", 372 | "unlink", 373 | "unlinkat", 374 | "utime", 375 | "utimensat", 376 | "utimensat_time64", 377 | "utimes", 378 | "vfork", 379 | "vmsplice", 380 | "wait4", 381 | "waitid", 382 | "waitpid", 383 | "write", 384 | "writev" 385 | ], 386 | "action": "SCMP_ACT_ALLOW", 387 | "args": [], 388 | "comment": "", 389 | "includes": {}, 390 | "excludes": {} 391 | }, 392 | { 393 | "names": [ 394 | "ptrace" 395 | ], 396 | "action": "SCMP_ACT_ALLOW", 397 | "args": null, 398 | "comment": "", 399 | "includes": { 400 | "minKernel": "4.8" 401 | }, 402 | "excludes": {} 403 | }, 404 | { 405 | "names": [ 406 | "personality" 407 | ], 408 | "action": "SCMP_ACT_ALLOW", 409 | "args": [ 410 | { 411 | "index": 0, 412 | "value": 0, 413 | "valueTwo": 0, 414 | "op": "SCMP_CMP_EQ" 415 | } 416 | ], 417 | "comment": "", 418 | "includes": {}, 419 | "excludes": {} 420 | }, 421 | { 422 | "names": [ 423 | "personality" 424 | ], 425 | "action": "SCMP_ACT_ALLOW", 426 | "args": [ 427 | { 428 | "index": 0, 429 | "value": 8, 430 | "valueTwo": 0, 431 | "op": "SCMP_CMP_EQ" 432 | } 433 | ], 434 | "comment": "", 435 | "includes": {}, 436 | "excludes": {} 437 | }, 438 | { 439 | "names": [ 440 | "personality" 441 | ], 442 | "action": "SCMP_ACT_ALLOW", 443 | "args": [ 444 | { 445 | "index": 0, 446 | "value": 131072, 447 | "valueTwo": 0, 448 | "op": "SCMP_CMP_EQ" 449 | } 450 | ], 451 | "comment": "", 452 | "includes": {}, 453 | "excludes": {} 454 | }, 455 | { 456 | "names": [ 457 | "personality" 458 | ], 459 | "action": "SCMP_ACT_ALLOW", 460 | "args": [ 461 | { 462 | "index": 0, 463 | "value": 131080, 464 | "valueTwo": 0, 465 | "op": "SCMP_CMP_EQ" 466 | } 467 | ], 468 | "comment": "", 469 | "includes": {}, 470 | "excludes": {} 471 | }, 472 | { 473 | "names": [ 474 | "personality" 475 | ], 476 | "action": "SCMP_ACT_ALLOW", 477 | "args": [ 478 | { 479 | "index": 0, 480 | "value": 4294967295, 481 | "valueTwo": 0, 482 | "op": "SCMP_CMP_EQ" 483 | } 484 | ], 485 | "comment": "", 486 | "includes": {}, 487 | "excludes": {} 488 | }, 489 | { 490 | "names": [ 491 | "sync_file_range2" 492 | ], 493 | "action": "SCMP_ACT_ALLOW", 494 | "args": [], 495 | "comment": "", 496 | "includes": { 497 | "arches": [ 498 | "ppc64le" 499 | ] 500 | }, 501 | "excludes": {} 502 | }, 503 | { 504 | "names": [ 505 | "arm_fadvise64_64", 506 | "arm_sync_file_range", 507 | "sync_file_range2", 508 | "breakpoint", 509 | "cacheflush", 510 | "set_tls" 511 | ], 512 | "action": "SCMP_ACT_ALLOW", 513 | "args": [], 514 | "comment": "", 515 | "includes": { 516 | "arches": [ 517 | "arm", 518 | "arm64" 519 | ] 520 | }, 521 | "excludes": {} 522 | }, 523 | { 524 | "names": [ 525 | "arch_prctl" 526 | ], 527 | "action": "SCMP_ACT_ALLOW", 528 | "args": [], 529 | "comment": "", 530 | "includes": { 531 | "arches": [ 532 | "amd64", 533 | "x32" 534 | ] 535 | }, 536 | "excludes": {} 537 | }, 538 | { 539 | "names": [ 540 | "modify_ldt" 541 | ], 542 | "action": "SCMP_ACT_ALLOW", 543 | "args": [], 544 | "comment": "", 545 | "includes": { 546 | "arches": [ 547 | "amd64", 548 | "x32", 549 | "x86" 550 | ] 551 | }, 552 | "excludes": {} 553 | }, 554 | { 555 | "names": [ 556 | "s390_pci_mmio_read", 557 | "s390_pci_mmio_write", 558 | "s390_runtime_instr" 559 | ], 560 | "action": "SCMP_ACT_ALLOW", 561 | "args": [], 562 | "comment": "", 563 | "includes": { 564 | "arches": [ 565 | "s390", 566 | "s390x" 567 | ] 568 | }, 569 | "excludes": {} 570 | }, 571 | { 572 | "names": [ 573 | "open_by_handle_at" 574 | ], 575 | "action": "SCMP_ACT_ALLOW", 576 | "args": [], 577 | "comment": "", 578 | "includes": { 579 | "caps": [ 580 | "CAP_DAC_READ_SEARCH" 581 | ] 582 | }, 583 | "excludes": {} 584 | }, 585 | { 586 | "names": [ 587 | "bpf", 588 | "clone", 589 | "fanotify_init", 590 | "lookup_dcookie", 591 | "mount", 592 | "name_to_handle_at", 593 | "perf_event_open", 594 | "quotactl", 595 | "setdomainname", 596 | "sethostname", 597 | "setns", 598 | "syslog", 599 | "umount", 600 | "umount2", 601 | "unshare" 602 | ], 603 | "action": "SCMP_ACT_ALLOW", 604 | "args": [], 605 | "comment": "", 606 | "includes": { 607 | "caps": [ 608 | "CAP_SYS_ADMIN" 609 | ] 610 | }, 611 | "excludes": {} 612 | }, 613 | { 614 | "names": [ 615 | "clone" 616 | ], 617 | "action": "SCMP_ACT_ALLOW", 618 | "args": [ 619 | { 620 | "index": 0, 621 | "value": 2114060288, 622 | "valueTwo": 0, 623 | "op": "SCMP_CMP_MASKED_EQ" 624 | } 625 | ], 626 | "comment": "", 627 | "includes": {}, 628 | "excludes": { 629 | "caps": [ 630 | "CAP_SYS_ADMIN" 631 | ], 632 | "arches": [ 633 | "s390", 634 | "s390x" 635 | ] 636 | } 637 | }, 638 | { 639 | "names": [ 640 | "clone" 641 | ], 642 | "action": "SCMP_ACT_ALLOW", 643 | "args": [ 644 | { 645 | "index": 1, 646 | "value": 2114060288, 647 | "valueTwo": 0, 648 | "op": "SCMP_CMP_MASKED_EQ" 649 | } 650 | ], 651 | "comment": "s390 parameter ordering for clone is different", 652 | "includes": { 653 | "arches": [ 654 | "s390", 655 | "s390x" 656 | ] 657 | }, 658 | "excludes": { 659 | "caps": [ 660 | "CAP_SYS_ADMIN" 661 | ] 662 | } 663 | }, 664 | { 665 | "names": [ 666 | "reboot" 667 | ], 668 | "action": "SCMP_ACT_ALLOW", 669 | "args": [], 670 | "comment": "", 671 | "includes": { 672 | "caps": [ 673 | "CAP_SYS_BOOT" 674 | ] 675 | }, 676 | "excludes": {} 677 | }, 678 | { 679 | "names": [ 680 | "chroot" 681 | ], 682 | "action": "SCMP_ACT_ALLOW", 683 | "args": [], 684 | "comment": "", 685 | "includes": { 686 | "caps": [ 687 | "CAP_SYS_CHROOT" 688 | ] 689 | }, 690 | "excludes": {} 691 | }, 692 | { 693 | "names": [ 694 | "delete_module", 695 | "init_module", 696 | "finit_module", 697 | "query_module" 698 | ], 699 | "action": "SCMP_ACT_ALLOW", 700 | "args": [], 701 | "comment": "", 702 | "includes": { 703 | "caps": [ 704 | "CAP_SYS_MODULE" 705 | ] 706 | }, 707 | "excludes": {} 708 | }, 709 | { 710 | "names": [ 711 | "acct" 712 | ], 713 | "action": "SCMP_ACT_ALLOW", 714 | "args": [], 715 | "comment": "", 716 | "includes": { 717 | "caps": [ 718 | "CAP_SYS_PACCT" 719 | ] 720 | }, 721 | "excludes": {} 722 | }, 723 | { 724 | "names": [ 725 | "kcmp", 726 | "process_vm_readv", 727 | "process_vm_writev", 728 | "ptrace", 729 | "perf_event_open" 730 | ], 731 | "action": "SCMP_ACT_ALLOW", 732 | "args": [], 733 | "comment": "", 734 | "includes": { 735 | "caps": [ 736 | "CAP_SYS_PTRACE" 737 | ] 738 | }, 739 | "excludes": {} 740 | }, 741 | { 742 | "names": [ 743 | "iopl", 744 | "ioperm" 745 | ], 746 | "action": "SCMP_ACT_ALLOW", 747 | "args": [], 748 | "comment": "", 749 | "includes": { 750 | "caps": [ 751 | "CAP_SYS_RAWIO" 752 | ] 753 | }, 754 | "excludes": {} 755 | }, 756 | { 757 | "names": [ 758 | "settimeofday", 759 | "stime", 760 | "clock_settime" 761 | ], 762 | "action": "SCMP_ACT_ALLOW", 763 | "args": [], 764 | "comment": "", 765 | "includes": { 766 | "caps": [ 767 | "CAP_SYS_TIME" 768 | ] 769 | }, 770 | "excludes": {} 771 | }, 772 | { 773 | "names": [ 774 | "vhangup" 775 | ], 776 | "action": "SCMP_ACT_ALLOW", 777 | "args": [], 778 | "comment": "", 779 | "includes": { 780 | "caps": [ 781 | "CAP_SYS_TTY_CONFIG" 782 | ] 783 | }, 784 | "excludes": {} 785 | }, 786 | { 787 | "names": [ 788 | "get_mempolicy", 789 | "mbind", 790 | "set_mempolicy" 791 | ], 792 | "action": "SCMP_ACT_ALLOW", 793 | "args": [], 794 | "comment": "", 795 | "includes": { 796 | "caps": [ 797 | "CAP_SYS_NICE" 798 | ] 799 | }, 800 | "excludes": {} 801 | }, 802 | { 803 | "names": [ 804 | "syslog" 805 | ], 806 | "action": "SCMP_ACT_ALLOW", 807 | "args": [], 808 | "comment": "", 809 | "includes": { 810 | "caps": [ 811 | "CAP_SYSLOG" 812 | ] 813 | }, 814 | "excludes": {} 815 | } 816 | ] 817 | } 818 | -------------------------------------------------------------------------------- /buildworker/secret.env: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/staticfloat/julia-docker/fb13f4b022fcd4889a83d0e4fcdc96948d45ede0/buildworker/secret.env -------------------------------------------------------------------------------- /buildworker/start_worker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Helper function to watch logfiles once they are created 3 | watch_the_log() 4 | { 5 | while [ ! -f "$1" ]; do 6 | sleep 1; 7 | done 8 | tail -f "$1" 2>/dev/null 9 | } 10 | # Start a log watcher in the background for twistd.log 11 | watch_the_log /buildworker/worker/twistd.log & 12 | 13 | # Start our buildworker! 14 | cd /buildworker/worker 15 | rm -f twistd.pid 16 | exec twistd -ny buildbot.tac 17 | -------------------------------------------------------------------------------- /common.mk: -------------------------------------------------------------------------------- 1 | # BUILD_OS and BUILD_ARCH are the actual OS and architecture of the machine we're running on 2 | BUILD_OS=$(shell uname -s) 3 | BUILD_ARCH=$(shell uname -m) 4 | 5 | # Given a BUILD_ARCH, we can determine which architecures we can build images for 6 | ifeq ($(BUILD_ARCH),x86_64) 7 | BUILD_ARCHS=x86_64 i686 8 | else ifeq ($(BUILD_ARCH),i686) 9 | BUILD_ARCHS=i686 10 | else ifeq ($(BUILD_ARCH),ppc64le) 11 | BUILD_ARCHS=ppc64le 12 | else ifeq ($(BUILD_ARCH),aarch64) 13 | BUILD_ARCHS=aarch64 armv7l 14 | else ifeq ($(BUILD_ARCH),armv7l) 15 | BUILD_ARCHS=armv7l 16 | endif 17 | 18 | # Begin by listing all the Dockerfiles in the `workerbase/` directory, and 19 | # storing those into $(HFS). Many of our rules will be built from these names 20 | HFS=$(notdir $(basename $(wildcard $(dir $(MAKEFILE_LIST))/workerbase/*.Dockerfile))) 21 | 22 | # Filter a list of inputs to select only ones that contain the build archs we're interested in 23 | define arch_filt 24 | $(foreach ARCH,$(1),$(foreach w,$(2),$(if $(findstring $(ARCH),$(w)),$(w),))) 25 | endef 26 | 27 | # Helper function that adds $(2) as a dependency to rule $(1) 28 | define add_dep 29 | $(1): $(2) 30 | endef 31 | 32 | # Helper function that takes in an arch or an OS-arch tuple and 33 | # prefixes it with the appropriate prefix 34 | define worker_tag_name 35 | $(strip staticfloat/julia_workerbase:$(1)) 36 | endef 37 | define tabularasa_tag_name 38 | $(strip staticfloat/julia_tabularasa:$(1)) 39 | endef 40 | define crossbuild_tag_name 41 | $(strip staticfloat/julia_$(patsubst %-$(lastword $(subst -, ,$(1))),%, $(1)):$(lastword $(subst -, ,$(1)))) 42 | endef 43 | 44 | # If we have `--squash` support, then use it! 45 | ifneq ($(shell docker build --help 2>/dev/null | grep squash),) 46 | DOCKER_BUILD = docker build --squash 47 | else 48 | DOCKER_BUILD = docker build 49 | endif 50 | 51 | print-%: 52 | @echo '$*=$($*)' 53 | -------------------------------------------------------------------------------- /crossbuild/Makefile: -------------------------------------------------------------------------------- 1 | include ../common.mk 2 | 3 | # Make our own HFS off of the files in this directory 4 | HFS:=$(subst .Dockerfile,,$(wildcard *.Dockerfile)) 5 | BUILD_HFS=$(call arch_filt,$(BUILD_ARCHS),$(HFS)) 6 | 7 | # Build "dockerfiles" target that assembles all Dockerfiles 8 | $(foreach f,$(HFS),$(eval $(call add_dep,dockerfiles,build/$(f)/Dockerfile))) 9 | 10 | # Build "buildall" target that attempts to build every Dockerfile in the room, 11 | # but only from the ones that our build architecture can manage. 12 | $(foreach f,$(BUILD_HFS),$(eval $(call add_dep,buildall,build-$(f)))) 13 | 14 | # Build "pushall" target that pushes up the result of "buildall" 15 | #$(foreach f,$(BUILD_HFS),$(eval $(call add_dep,pushall,push-$(f)))) 16 | 17 | # This is where we put our derived Dockerfiles 18 | lib: 19 | ln -sf ../workerbase/lib lib 20 | patches: 21 | ln -sf ../workerbase/patches patches 22 | 23 | # Temporary folder for generated output 24 | SHELL=/bin/bash 25 | TMPDIR=/tmp 26 | 27 | define build_dockerfile 28 | # Running just `make build-crossshard-x86_64-linux-gnu-x64` will build that image 29 | build-$(1): build/$(1)/Dockerfile 30 | docker build -t $(call crossbuild_tag_name,$(1)) "build/$(1)" 31 | 32 | buildsquash-$(1): build/$(1)/Dockerfile 33 | $(DOCKER_BUILD) --pull -t $(call crossbuild_tag_name,$(1)) "build/$(1)" 34 | 35 | shell-$(1): 36 | docker run -ti $(call crossbuild_tag_name,$(1)) 37 | 38 | # Running `make push-ubuntu16_04-x86` will upload that image to 39 | #push-$(1): buildsquash-$(1) 40 | # docker push $(call crossbuild_tag_name,$(1)) 41 | 42 | # This is how we build the actual Dockerfile 43 | build/$(1)/Dockerfile: Makefile lib patches $(shell ../dockerdeps $(1).Dockerfile) 44 | @if [ ! -f "$(1).Dockerfile" ]; then \ 45 | echo "Target \"$(1)\" is invalid, recheck your spelling good sir."; \ 46 | exit 1; \ 47 | fi 48 | @mkdir -p "build/$(1)" 49 | @rm -f "build/$(1)/Dockerfile.tmp" 50 | 51 | @# Build the altered Dockerfile 52 | @echo "$(1).Dockerfile" 53 | @../dockerchain "./$(1).Dockerfile" > "build/$(1)/Dockerfile.tmp" 54 | @echo "## This file was autogenerated" > "build/$(1)/Dockerfile" 55 | @echo "# Do not edit directly; edit the .Dockerfile files" >> "build/$(1)/Dockerfile" 56 | @echo "#" >> "build/$(1)/Dockerfile" 57 | @echo "# To build this docker image via \`make\`, run \`make build-$(1)\` in the \`crossbuild\` directory" >> "build/$(1)/Dockerfile" 58 | @echo "# To build this docker image manually, run \`docker build --pull -t $(call crossbuild_tag_name,$(1)) .\`" >> "build/$(1)/Dockerfile" 59 | @echo >> "build/$(1)/Dockerfile" 60 | @cat "build/$(1)/Dockerfile.tmp" >> "build/$(1)/Dockerfile" 61 | @rm -f "build/$(1)/Dockerfile.tmp" 62 | @cp -L lib/*.sh "build/$(1)/" 63 | @mkdir -p "build/$(1)/crossbuild" 64 | @cp -L lib/crossbuild/*.sh "build/$(1)/crossbuild/" 65 | cp -LR cmake_toolchains "build/$(1)/" 66 | cp -LR patches "build/$(1)/" 67 | endef 68 | 69 | # Special dependency rules to ensure that the shards are built in the right orders 70 | TARGETS = $(patsubst crossshard-%-x64.Dockerfile,%,$(wildcard crossshard-*-x64.Dockerfile)) 71 | BUILD_TARGETS = $(patsubst %,build-crossshard-%-x64,$(TARGETS)) 72 | $(foreach f,$(BUILD_TARGETS),$(eval $(f): crossbase-x64.Dockerfile)) 73 | 74 | # Generate build- rules for each dockerfile in the directory 75 | $(foreach f,$(HFS),$(eval $(call build_dockerfile,$(f)))) 76 | 77 | # This is how we make .sha256 files 78 | %.sha256: % 79 | @shasum -a 256 $< | cut -d' ' -f1 > $@ 80 | 81 | # We have special options to squash things with 82 | SQUASHFS_OPTS=-force-uid 0 -force-gid 0 -comp xz -b 1048576 -Xdict-size 100% -noappend 83 | 84 | # This is where the shards get uploaded 85 | S3_PREFIX=s3://julialangmirror/binarybuilder 86 | 87 | define shard_build_recipe 88 | # Rule to extract a shard out into a .tar file (what docker natively exports) 89 | $(TMPDIR)/rootfs-$(1).tar: 90 | @echo "Exporting $(1) shard to $$@" 91 | @if [ "$(1)" == "base" ]; then \ 92 | IMG_NAME=staticfloat/julia_crossbase:x64; \ 93 | else \ 94 | IMG_NAME=staticfloat/julia_crossshard-$(1):x64; \ 95 | fi; \ 96 | CONTAINER_ID=$$$$(docker run -ti --rm -d $$$$IMG_NAME bash); \ 97 | echo "Running in container $$$$CONTAINER_ID, exporting..."; \ 98 | docker export $$$$CONTAINER_ID -o $$@; \ 99 | echo "Stopping container $$$$CONTAINER_ID..."; \ 100 | docker stop $$$$CONTAINER_ID >/dev/null 101 | 102 | # Add docker build dependency 103 | #ifeq ($(1),base) 104 | #$(TMPDIR)/rootfs-$(1).tar: build-crossbase-x64 105 | #else 106 | #$(TMPDIR)/rootfs-$(1).tar: build-crossshard-$(1)-x64 107 | #endif 108 | 109 | # Rule to extract (and cleanup) a shard into a plain directory 110 | $(TMPDIR)/rootfs-$(1): $(TMPDIR)/rootfs-$(1).tar 111 | @echo "Unpacking $$<..." 112 | @rm -rf $$@; mkdir -p $$@ 113 | @tar -xf $$< -C $$@ --exclude='dev/null' --exclude='usr/share/terminfo' 114 | @if [ -d $$@/opt/$(1)/MacOSX*.sdk ]; then \ 115 | echo "Removing OSX SDK..."; \ 116 | rm -rf $$@/opt/$(1)/MacOSX*.sdk; \ 117 | fi 118 | @if [ "$(1)" == "base" ]; then \ 119 | echo "Cleaning up base image..."; \ 120 | touch $$@/dev/null; \ 121 | touch $$@/dev/urandom; \ 122 | touch $$@/dev/ptmx; \ 123 | echo "nameserver 8.8.8.8" > $$@/etc/resolv.conf; \ 124 | echo "nameserver 8.8.4.4" >> $$@/etc/resolv.conf; \ 125 | echo "nameserver 4.4.4.4" >> $$@/etc/resolv.conf; \ 126 | fi 127 | 128 | # Rule to extract shard into a .tar.gz 129 | $(TMPDIR)/rootfs-$(1).tar.gz: $(TMPDIR)/rootfs-$(1) 130 | @rm -f $$@ 131 | @if [ "$(1)" == "base" ]; then SRC="$$<"; else SRC="$$ \"$$(cat $${f}.sha256)\","; \ 182 | done 183 | @echo " )" 184 | @# Then print out the tarball hashes: 185 | @echo " tarball_hashes = Dict(" 186 | @for f in $(TMPDIR)/rootfs-*.tar.gz; do \ 187 | f_prefix=$$(basename $${f%.*.*}); \ 188 | triplet=$${f_prefix:7}; \ 189 | echo " \"$${triplet}\" => \"$$(cat $${f}.sha256)\","; \ 190 | done 191 | @echo " )" 192 | 193 | clean: 194 | rm -rf build lib patches 195 | -------------------------------------------------------------------------------- /crossbuild/README.md: -------------------------------------------------------------------------------- 1 | # Crossbuild images 2 | 3 | The crossbuild images are used to create a docker image that contains GCC cross-compilers for every OS and architecture combination we officially support for use with [`BinaryBuilder.jl`](https://github.com/JuliaPackaging/BinaryBuilder.jl). The `Dockerfile`s are normal except they have the added feature of an `INCLUDE` statement that pastes in another `Dockerfile`. 4 | 5 | The cross compilation toolkits are all installed into triplet-specific subdirectories of `/opt`. Because the overall cross-compilation environment is an `x86_64` Linux image, tools that are target-independent (such as `patchelf` or `cmake`) are installed straight to `/usr/local` and are always available. This environment is intended for use with environment variables setup such that `/opt//bin` is on the `PATH`, so that naive calls to `gcc` will use the correct cross-compiler. See [`BinaryBuilder.jl`](https://github.com/JuliaPackaging/BinaryBuilder.jl) for more detail in [which environment variables are defined](https://github.com/JuliaPackaging/BinaryBuilder.jl/blob/76a3073753bd017aaf522ed068ea29418f1059c0/src/DockerRunner.jl#L108-L133) for a particular target triplet. 6 | 7 | The build result is uploaded as the `staticfloat/julia_crossbuild:x64` image [on DockerHub](https://hub.docker.com/r/staticfloat/julia_crossbuild/). 8 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/aarch64-linux-gnu.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for aarch64-linux-gnu 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR aarch64) 4 | 5 | set(CMAKE_SYSROOT /opt/aarch64-linux-gnu/aarch64-linux-gnu/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/aarch64-linux-gnu/bin/aarch64-linux-gnu-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/aarch64-linux-gnu/bin/aarch64-linux-gnu-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/aarch64-linux-musl.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for aarch64-linux-musl 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR aarch64) 4 | 5 | set(CMAKE_SYSROOT /opt/aarch64-linux-musl/aarch64-linux-musl/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/aarch64-linux-musl/bin/aarch64-linux-musl-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/aarch64-linux-musl/bin/aarch64-linux-musl-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/arm-linux-gnueabihf.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for arm-linux-gnueabihf 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR arm) 4 | 5 | set(CMAKE_SYSROOT /opt/arm-linux-gnueabihf/arm-linux-gnueabihf/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/arm-linux-gnueabihf/bin/arm-linux-gnueabihf-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/arm-linux-gnueabihf/bin/arm-linux-gnueabihf-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/arm-linux-musleabihf.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for arm-linux-musleabihf 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR arm) 4 | 5 | set(CMAKE_SYSROOT /opt/arm-linux-musleabihf/arm-linux-musleabihf/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/arm-linux-musleabihf/bin/arm-linux-musleabihf-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/arm-linux-musleabihf/bin/arm-linux-musleabihf-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/i686-linux-gnu.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for i686-linux-gnu 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR i686) 4 | 5 | set(CMAKE_SYSROOT /opt/i686-linux-gnu/i686-linux-gnu/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/i686-linux-gnu/bin/i686-linux-gnu-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/i686-linux-gnu/bin/i686-linux-gnu-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/i686-linux-musl.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for i686-linux-musl 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR i686) 4 | 5 | set(CMAKE_SYSROOT /opt/i686-linux-musl/i686-linux-musl/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/i686-linux-musl/bin/i686-linux-musl-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/i686-linux-musl/bin/i686-linux-musl-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/i686-w64-mingw32.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for i686-w64-mingw32 2 | set(CMAKE_SYSTEM_NAME Windows) 3 | set(CMAKE_SYSTEM_PROCESSOR i686) 4 | 5 | set(CMAKE_SYSROOT /opt/i686-w64-mingw32/i686-w64-mingw32/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/i686-w64-mingw32/bin/i686-w64-mingw32-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/i686-w64-mingw32/bin/i686-w64-mingw32-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/powerpc64le-linux-gnu.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for powerpc64le-linux-gnu 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR ppc64le) 4 | 5 | set(CMAKE_SYSROOT /opt/powerpc64le-linux-gnu/powerpc64le-linux-gnu/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/powerpc64le-linux-gnu/bin/powerpc64le-linux-gnu-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/powerpc64le-linux-gnu/bin/powerpc64le-linux-gnu-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/x86_64-apple-darwin14.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for x86_64-apple-darwin14 2 | set(CMAKE_SYSTEM_NAME Darwin) 3 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 4 | set(CMAKE_SYSTEM_VERSION 14.5) 5 | 6 | # Extract out DARWIN_MAJOR_VERSION and DARWIN_MINOR_VERSION 7 | string(REGEX REPLACE "^([0-9]+)\\.([0-9]+).*$" "\\1" DARWIN_MAJOR_VERSION "${CMAKE_SYSTEM_VERSION}") 8 | string(REGEX REPLACE "^([0-9]+)\\.([0-9]+).*$" "\\2" DARWIN_MINOR_VERSION "${CMAKE_SYSTEM_VERSION}") 9 | 10 | # Enable rpath support for 10.5 and greater where it is known to work. 11 | if("${DARWIN_MAJOR_VERSION}" GREATER 8) 12 | set(CMAKE_SHARED_LIBRARY_RUNTIME_C_FLAG "-Wl,-rpath,") 13 | endif() 14 | 15 | set(CMAKE_SYSROOT /opt/x86_64-apple-darwin14/MacOSX10.10.sdk/) 16 | set(CMAKE_SYSTEM_FRAMEWORK_PATH 17 | ${CMAKE_SYSROOT}/System/Library/Frameworks 18 | ${CMAKE_SYSROOT}/System/Library/PrivateFrameworks 19 | ) 20 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 21 | 22 | set(CMAKE_C_COMPILER /opt/x86_64-apple-darwin14/bin/clang) 23 | set(CMAKE_CXX_COMPILER /opt/x86_64-apple-darwin14/bin/clang++) 24 | 25 | # These settings don't seem to function properly, they stop cmake from being 26 | # able to find anything within the workspace at all. 27 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 28 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 29 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 30 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 31 | if( $ENV{CC} MATCHES ccache ) 32 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 33 | endif() 34 | 35 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/x86_64-linux-gnu.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for x86_64-linux-gnu 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 4 | 5 | set(CMAKE_SYSROOT /opt/x86_64-linux-gnu/x86_64-linux-gnu/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/x86_64-linux-gnu/bin/x86_64-linux-gnu-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/x86_64-linux-gnu/bin/x86_64-linux-gnu-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/x86_64-linux-musl.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for x86_64-linux-musl 2 | set(CMAKE_SYSTEM_NAME Linux) 3 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 4 | 5 | set(CMAKE_SYSROOT /opt/x86_64-linux-musl/x86_64-linux-musl/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/x86_64-linux-musl/bin/x86_64-linux-musl-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/x86_64-linux-musl/bin/x86_64-linux-musl-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/x86_64-unknown-freebsd11.1.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for x86_64-unknown-freebsd11.1 2 | set(CMAKE_SYSTEM_NAME FreeBSD) 3 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 4 | 5 | set(CMAKE_SYSROOT /opt/x86_64-unknown-freebsd11.1/x86_64-unknown-freebsd11.1/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/x86_64-unknown-freebsd11.1/bin/clang) 9 | set(CMAKE_CXX_COMPILER /opt/x86_64-unknown-freebsd11.1/bin/clang++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/cmake_toolchains/x86_64-w64-mingw32.toolchain: -------------------------------------------------------------------------------- 1 | # Toolchain file for x86_64-w64-mingw32 2 | set(CMAKE_SYSTEM_NAME Windows) 3 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 4 | 5 | set(CMAKE_SYSROOT /opt/x86_64-w64-mingw32/x86_64-w64-mingw32/sys-root/) 6 | set(CMAKE_INSTALL_PREFIX /workspace/destdir/) 7 | 8 | set(CMAKE_C_COMPILER /opt/x86_64-w64-mingw32/bin/x86_64-w64-mingw32-gcc) 9 | set(CMAKE_CXX_COMPILER /opt/x86_64-w64-mingw32/bin/x86_64-w64-mingw32-g++) 10 | 11 | # These settings don't seem to function properly, they stop cmake from being 12 | # able to find anything within the workspace at all. 13 | #set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 14 | #set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 15 | #set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 16 | #set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 17 | if( $ENV{CC} MATCHES ccache ) 18 | set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) 19 | endif() 20 | 21 | -------------------------------------------------------------------------------- /crossbuild/crossbase-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:latest as base 2 | 3 | ## First, setup base system with our special commands and tools and whatnot 4 | # These are where we'll do all our work, so make them now 5 | RUN mkdir -p /src /downloads 6 | 7 | # Get our bash script library ready 8 | COPY crossbuild/build.sh /build.sh 9 | 10 | # We use the "download_unpack.sh" command a lot, we need a `tar` wrapper to insert 11 | # extra command line flags on every `tar` command, we have an `update_configure_scripts` 12 | # command, and we fake out `uname` depending on the value of `$target`. GCC uses 13 | # gnuisms for sha512sum, so we need to work around that as well. 14 | COPY download_unpack.sh /usr/local/bin 15 | COPY tar_wrapper.sh /usr/local/bin/tar 16 | COPY update_configure_scripts.sh /usr/local/bin/update_configure_scripts 17 | COPY fake_uname.sh /usr/local/bin/uname 18 | RUN rm /usr/bin/sha512sum 19 | COPY fake_sha512sum.sh /usr/local/bin/sha512sum 20 | 21 | RUN chmod +x /usr/local/bin/* 22 | 23 | # Install build tools 24 | RUN apk add --update curl make patch tar gawk autoconf automake python libtool git bison flex pkgconfig zip unzip gdb xz bash sudo file libintl findutils wget openssl ca-certificates libstdc++ libgcc python pv 25 | 26 | # Also install glibc, to do so we need to first import a packaging key 27 | RUN curl -q -# -L https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub -o /etc/apk/keys/sgerrand.rsa.pub 28 | RUN curl -q -# -L https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.26-r0/glibc-2.26-r0.apk -o /tmp/glibc.apk 29 | RUN apk add /tmp/glibc.apk; rm -f /tmp/glibc.apk 30 | 31 | # Install a few tools from scratch, and patch cmake defaults 32 | RUN apk add gcc g++ 33 | INCLUDE lib/cmake_install 34 | INCLUDE lib/patchelf_install 35 | INCLUDE lib/super_binutils_install 36 | INCLUDE lib/objconv_install 37 | INCLUDE lib/ccache_install 38 | RUN apk del gcc g++ 39 | 40 | # We want to be able to do things like "source" 41 | SHELL ["/bin/bash", "-c"] 42 | ENV TERM="screen-256color" 43 | RUN echo "alias ll='ls -la'" >> /root/.bashrc 44 | 45 | # We need to override the ld conf to search /usr/local before /usr 46 | RUN echo "/usr/local/lib64:/usr/local/lib:/lib:/usr/local/lib:/usr/lib" > /etc/ld-musl-x86_64.path 47 | 48 | # Create /overlay_workdir so that we know we can always mount an overlay there. Same with /meta 49 | RUN mkdir /overlay_workdir /meta 50 | 51 | 52 | ## Create "builder" stage that just contains a bunch of stuff we need to build 53 | # our cross-compilers, but aren't actually runtime requirements 54 | FROM base as shard_builder 55 | RUN apk add clang gcc g++ fuse freetype tiff mesa linux-headers gettext-dev libgcc 56 | 57 | # Build the sandbox toward the end, so that if we need to iterate on this we don't disturb the 58 | # shards (which are built off of the `shard_builder` above. 59 | FROM shard_builder as sandbox_builder 60 | ADD https://raw.githubusercontent.com/JuliaPackaging/BinaryBuilder.jl/8a5fdcc7c4bad920b924e68c4ffe438ddc35b930/deps/sandbox.c /sandbox.c 61 | RUN gcc -static -std=c99 -o /sandbox /sandbox.c; rm -f /sandbox.c 62 | 63 | ## Create "crossbuild" stage that contains "sandbox" and is slightly cleaned up 64 | FROM base as crossbuild 65 | COPY --from=sandbox_builder /sandbox /sandbox 66 | RUN rm -rf /downloads /build.sh 67 | 68 | # Set default workdir 69 | WORKDIR /workspace 70 | CMD ["/bin/bash"] 71 | -------------------------------------------------------------------------------- /crossbuild/crossshard-aarch64-linux-gnu-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # build for AArch64 4 | FROM shard_builder as shard_aarch64-linux-gnu 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="aarch64-linux-gnu" 7 | INCLUDE lib/linux_glibc_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-aarch64-linux-musl-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # AArch64 musl! 4 | FROM shard_builder as shard_aarch64-linux-musl 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="aarch64-linux-musl" 7 | INCLUDE lib/linux_musl_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-arm-linux-gnueabihf-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # Build for armv7l 4 | FROM shard_builder as shard_arm-linux-gnueabihf 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="arm-linux-gnueabihf" 7 | INCLUDE lib/linux_glibc_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-arm-linux-musleabihf-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # flex your arm musl 4 | FROM shard_builder as shard_arm-linux-musleabihif 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="arm-linux-musleabihf" 7 | INCLUDE lib/linux_musl_crosscompiler_install 8 | 9 | -------------------------------------------------------------------------------- /crossbuild/crossshard-i686-linux-gnu-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # build gcc for i686. Again use an especially old glibc version to maximize compatibility 4 | FROM shard_builder as shard_i686-linux-gnu 5 | INCLUDE lib/crossbuild/version_defaults 6 | ARG glibc_version=2.19 7 | ENV compiler_target="i686-linux-gnu" 8 | INCLUDE lib/linux_glibc_crosscompiler_install 9 | 10 | -------------------------------------------------------------------------------- /crossbuild/crossshard-i686-linux-musl-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # musl on i686 4 | FROM shard_builder as shard_i686-linux-musl 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="i686-linux-musl" 7 | INCLUDE lib/linux_musl_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-i686-w64-mingw32-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # Build for win32. We use gcc 6.X, so that we stay with the old 4 | # gfortran.3 ABI, not gfortran.4, as that doesn't work with our Julia builds. 5 | FROM shard_builder as shard_i686-w64-mingw32 6 | INCLUDE lib/crossbuild/version_defaults 7 | ENV compiler_target="i686-w64-mingw32" 8 | ARG gcc_version="6.4.0" 9 | INCLUDE lib/win_crosscompiler_install 10 | -------------------------------------------------------------------------------- /crossbuild/crossshard-powerpc64le-linux-gnu-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # build gcc for ppc64le (we need a more recent glibc here as well) 4 | # We require at least version 2.22 for the fixes to assembler problems: 5 | # https://sourceware.org/bugzilla/show_bug.cgi?id=18116 6 | # We require at least version 2.24 for the fixes to memset.S: 7 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=843691 8 | FROM shard_builder as shard_powerpc64le-linux-gnu 9 | INCLUDE lib/crossbuild/version_defaults 10 | ENV compiler_target="powerpc64le-linux-gnu" 11 | ARG glibc_version=2.25 12 | INCLUDE lib/linux_glibc_crosscompiler_install 13 | -------------------------------------------------------------------------------- /crossbuild/crossshard-x86_64-apple-darwin14-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # build for mac64 4 | FROM shard_builder as shard_x86_64-apple-darwin14 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="x86_64-apple-darwin14" 7 | INCLUDE lib/osx_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-x86_64-linux-gnu-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # build gcc for x86_64. Use an especially old glibc version to maximize compatibility 4 | FROM shard_builder as shard_x86_64-linux-gnu 5 | INCLUDE lib/crossbuild/version_defaults 6 | ARG glibc_version=2.12.2 7 | ENV compiler_target="x86_64-linux-gnu" 8 | INCLUDE lib/linux_glibc_crosscompiler_install 9 | -------------------------------------------------------------------------------- /crossbuild/crossshard-x86_64-linux-musl-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # x86_64 musl 4 | FROM shard_builder as shard_x86_64-linux-musl 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="x86_64-linux-musl" 7 | INCLUDE lib/linux_musl_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-x86_64-unknown-freebsd11.1-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # x86_64 FreeBSD build 4 | FROM shard_builder as shard_x86_64-unknown-freebsd11.1 5 | INCLUDE lib/crossbuild/version_defaults 6 | ENV compiler_target="x86_64-unknown-freebsd11.1" 7 | INCLUDE lib/freebsd_crosscompiler_install 8 | -------------------------------------------------------------------------------- /crossbuild/crossshard-x86_64-w64-mingw32-x64.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbase-x64 2 | 3 | # Build for win64. We use gcc 6.X, so that we stay with the old 4 | # gfortran.3 ABI, not gfortran.4, as that doesn't work with our Julia builds. 5 | FROM shard_builder as shard_x86_64-w64-mingw32 6 | INCLUDE lib/crossbuild/version_defaults 7 | ENV compiler_target="x86_64-w64-mingw32" 8 | ARG gcc_version="6.4.0" 9 | INCLUDE lib/win_crosscompiler_install 10 | -------------------------------------------------------------------------------- /crossbuild/lib: -------------------------------------------------------------------------------- 1 | ../workerbase/lib -------------------------------------------------------------------------------- /dockerchain: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # spits out a Dockerfile, inlining any `INCLUDE` statements it finds 4 | if [[ -z "$1" ]]; then 5 | echo "Usage: $0 path/to/Dockerfile" 6 | exit 1 7 | fi 8 | 9 | if [[ ! -f "$1" ]]; then 10 | echo "$1 does not exist!" 1>&2 11 | exit 1 12 | fi 13 | 14 | function cat_inlined() 15 | { 16 | cd "$(dirname "$1")" 17 | while read -r line || [[ -n "$line" ]]; do 18 | INC_FILE=$(echo "$line" | sed -n 's_^INCLUDE \(.*\)_\1_p') 19 | if [[ -n "$INC_FILE" ]]; then 20 | if [[ ! -f "$INC_FILE" ]]; then 21 | # Check to see if it's just missing the .Dockerfile at the end 22 | if [[ -f "$INC_FILE.Dockerfile" ]]; then 23 | INC_FILE="$INC_FILE.Dockerfile" 24 | else 25 | echo "ERROR: Could not INCLUDE $INC_FILE" >&2 26 | exit 1 27 | fi 28 | fi 29 | (cat_inlined "$INC_FILE") 30 | else 31 | echo "$line" 32 | fi 33 | done < "$(basename "$1")" 34 | } 35 | 36 | (cat_inlined "$1") 37 | -------------------------------------------------------------------------------- /dockerdeps: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ -z "$1" ]]; then 4 | echo "Usage: $0 path/to/Dockerfile" 5 | exit 1 6 | fi 7 | 8 | if [[ ! -f "$1" ]]; then 9 | echo "$1 does not exist!" 1>&2 10 | exit 1 11 | fi 12 | 13 | function cat_deps() 14 | { 15 | # Print out this file 16 | (cd $(dirname "$1"); echo $(pwd)/$(basename "$1")) 17 | 18 | # See if we need to recurse 19 | cd "$(dirname "$1")" 20 | while read -r line || [[ -n "$line" ]]; do 21 | INC_FILE=$(echo $line | sed -n 's_^INCLUDE \(.*\)_\1_p') 22 | if [[ -n "$INC_FILE" ]]; then 23 | if [[ ! -f "$INC_FILE" ]]; then 24 | # Check to see if it's just missing the .Dockerfile at the end 25 | if [[ -f "$INC_FILE.Dockerfile" ]]; then 26 | INC_FILE="$INC_FILE.Dockerfile" 27 | else 28 | echo "ERROR: Could not INCLUDE $INC_FILE" >&2 29 | exit 1 30 | fi 31 | fi 32 | (cat_deps "$INC_FILE") 33 | fi 34 | done <<< "$(grep INCLUDE "$(basename "$1")")" 35 | } 36 | 37 | (cat_deps "$1" | sort | uniq) 38 | -------------------------------------------------------------------------------- /freebsd/telegraf_sysctl_probe: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Combine this with the following in your /usr/local/etc/telegraf.conf: 4 | # 5 | # [[inputs.exec]] 6 | # commands = [ 7 | # "/usr/local/bin/telegraf_sysctl_probe kern.ipc.pipekva,kern.ipc.piperesizefail,kern.ipc.pipeallocfail", 8 | # ] 9 | # 10 | # timeout = "1s" 11 | # data_format = "influx" 12 | 13 | if [ -z "$1" ]; then 14 | echo "Usage: $0 sysctl1,sysctl2,..." >&2 15 | exit 1; 16 | fi 17 | 18 | VALUES="" 19 | for p in $(echo $1 | tr ',' ' '); do 20 | VALUES="${VALUES} ${p}=$(sysctl ${p} | cut -d' ' -f2-)" 21 | done 22 | 23 | echo sysctl $(echo ${VALUES} | tr ' ' ',') -------------------------------------------------------------------------------- /julia/Dockerfile: -------------------------------------------------------------------------------- 1 | # First, install things we need in order to download and verify Julia 2 | RUN ${L32} apt update && \ 3 | ${L32} apt install -y --no-install-recommends ca-certificates curl gnupg 4 | 5 | RUN ${L32} gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 3673DF529D9049477F76B37566E3C7DC03D6E495 6 | 7 | # We template this in here instead of passing it in as an ARG at the top like 8 | # we do with L32 so that we can re-use the `apt update` and `gpg --keyserver` 9 | # steps above as much as possible as they take a nontrivial amount of time. 10 | ARG JULIA_URL="{{JULIA_URL}}" 11 | 12 | # Download Julia 13 | RUN ${L32} curl -sSL "${JULIA_URL}" -o julia.tar.gz 14 | 15 | # Verify the Julia download (this is removed for builds without a .tar.gz.asc) 16 | RUN ${L32} curl -sSL "${JULIA_URL}.asc" -o julia.tar.gz.asc && \ 17 | ${L32} gpg --batch --verify julia.tar.gz.asc julia.tar.gz && \ 18 | ${L32} rm -r "${HOME}/.gnupg" julia.tar.gz.asc 19 | 20 | # Unpack Julia into /usr 21 | RUN ${L32} tar -xzf julia.tar.gz -C /usr --strip-components 1 22 | 23 | # Cleanup after yourself 24 | RUN ${L32} rm -rf /var/lib/apt/lists/* julia.tar.gz* 25 | 26 | # Create entrypoint.sh to shoe-horn ${L32} into everything 27 | RUN ${L32} echo "#!/bin/bash" > /entrypoint.sh && \ 28 | ${L32} echo "${L32} \"\$@\"" >> /entrypoint.sh && \ 29 | ${L32} chmod +x /entrypoint.sh 30 | ENTRYPOINT ["/entrypoint.sh"] 31 | CMD ["/usr/bin/julia"] 32 | -------------------------------------------------------------------------------- /julia/Makefile: -------------------------------------------------------------------------------- 1 | include ../common.mk 2 | 3 | # By default, build all the dockerfiles 4 | all: dockerfiles 5 | 6 | # All the versions we'll generate Dockerfiles for 7 | VERS := $(VERS) v0.4.0 v0.4.1 v0.4.2 v0.4.3 v0.4.4 v0.4.5 v0.4.6 v0.4.7 8 | VERS := $(VERS) v0.5.0 v0.5.1 v0.5.2 9 | VERS := $(VERS) v0.6.0 v0.6.1 v0.6.2 v0.6.3 10 | VERS := $(VERS) v0.7.0-alpha 11 | VERS := $(VERS) nightly 12 | 13 | # Make some short version mappings 14 | SHORT_VERS := $(shell (for v in $(VERS); do echo $${v%.*}; done) | sort | uniq) 15 | # But don't let "nightly" into the short version mappings 16 | SHORT_VERS := $(filter-out nightly,$(SHORT_VERS)) 17 | 18 | # Convenience function to take in `v0.5` and spit out `v0.5.2` as the highest 19 | # matching version. (At least, it was at the time of writing this comment) 20 | define expand_version 21 | $(lastword $(sort $(filter $(1)%,$(VERS)))) 22 | endef 23 | 24 | # Convenience function to build the docker tag for an image 25 | define julia_tag_name 26 | $(strip staticfloat/julia:$(firstword $(subst -, ,$(2)))-$(1)) 27 | endef 28 | 29 | # Arch-dependent values 30 | ARCHS=x64 x86 ppc64le aarch64 armv7l 31 | IMAGE-x64=multiarch/debian-debootstrap:amd64-jessie 32 | IMAGE-x86=multiarch/debian-debootstrap:i386-jessie 33 | IMAGE-ppc64le=multiarch/debian-debootstrap:ppc64el-jessie 34 | IMAGE-aarch64=multiarch/debian-debootstrap:arm64-jessie 35 | IMAGE-armv7l=multiarch/debian-debootstrap:armhf-jessie 36 | 37 | # Only on x86 and armv7l will we set L32 38 | L32-x86=linux32 39 | L32-armv7l=linux32 40 | 41 | define major_version 42 | $(strip $(shell echo $(1) | sed -E -n 's/v?([0-9]+\.[0-9]+).*/\1/p')) 43 | endef 44 | 45 | # Grumble grumble inconsistency 46 | TAR_ARCH-x64=x86_64 47 | TAR_ARCH-x86=i686 48 | TAR_ARCH-ppc64le=ppc64le 49 | TAR_ARCH-aarch64=aarch64 50 | TAR_ARCH-armv7l=arm 51 | UP_ARCH-x64=x64 52 | UP_ARCH-x86=x86 53 | UP_ARCH-ppc64le=ppc64le 54 | UP_ARCH-aarch64=aarch64 55 | UP_ARCH-armv7l=arm 56 | 57 | # grumble grumble EVEN MORE INCONSISTENCY 58 | TAR_NIGHTLY_ARCH-x64=64 59 | TAR_NIGHTLY_ARCH-x86=32 60 | TAR_NIGHTLY_ARCH-ppc64le=ppc64 61 | TAR_NIGHTLY_ARCH-aarch64=aarch64 62 | TAR_NIGHTLY_ARCH-armv7l=arm 63 | 64 | 65 | define JULIA_URL 66 | $(strip https://julialang-s3.julialang.org/bin/linux/$(UP_ARCH-$(1))/$(call major_version,$(2))/julia-$(subst v,,$(2))-linux-$(TAR_ARCH-$(1)).tar.gz) 67 | endef 68 | 69 | define JULIA_NIGHTLY_URL 70 | $(strip https://julialangnightlies-s3.julialang.org/bin/linux/$(UP_ARCH-$(1))/julia-latest-linux$(TAR_NIGHTLY_ARCH-$(1)).tar.gz) 71 | endef 72 | 73 | define build_dockerfile 74 | build/$(2)/Dockerfile.$(1): Makefile Dockerfile 75 | @mkdir -p $$(dir $$@) 76 | @echo $(2)-$(1) 77 | @echo "## This file was autogenerated" > "$$@" 78 | @echo "# Do not edit directly; edit Makefile and top-level Dockerfile" >> "$$@" 79 | @echo "FROM $(IMAGE-$(1))" >> "$$@" 80 | @echo "ARG L32=\"$(L32-$(1))\"" >> "$$@" 81 | @echo >> "$$@" 82 | @if [ "$(2)" = "nightly" ]; then \ 83 | cat Dockerfile | sed -e 's&{{JULIA_URL}}&$(call JULIA_NIGHTLY_URL,$(1))&g' | grep -v .tar.gz.asc >> "$$@"; \ 84 | else \ 85 | cat Dockerfile | sed -e 's&{{JULIA_URL}}&$(call JULIA_URL,$(1),$(2))&g' >> "$$@"; \ 86 | fi 87 | 88 | build-$(2)-$(1): build/$(2)/Dockerfile.$(1) 89 | $(DOCKER_BUILD) --pull -t $(call julia_tag_name,$(1),$(2)) -f build/$(2)/Dockerfile.$(1) build/$(2) 90 | 91 | push-$(2)-$(1): 92 | docker push $(call julia_tag_name,$(1),$(2)) 93 | endef 94 | 95 | # Redo some of that work for the shortened names, which don't actually build 96 | # an image, but just re-tag a previously built image 97 | define build_retags 98 | push-$(2)-$(1): 99 | docker push $(call julia_tag_name,$(1),$(2)) 100 | 101 | retag-$(2)-$(1): 102 | docker tag $(call julia_tag_name,$(1),$(call expand_version,$(2))) $(call julia_tag_name,$(1),$(2)) 103 | endef 104 | 105 | # Construct the actual build targets for all the dockerfiles 106 | $(foreach v,$(VERS),$(foreach a,$(ARCHS),$(eval $(call build_dockerfile,$(a),$(v))))) 107 | 108 | # Build "dockerfiles" target that assembles all Dockerfiles 109 | $(foreach v,$(VERS),$(foreach a,$(ARCHS),$(eval $(call add_dep,dockerfiles,build/$(v)/Dockerfile.$(a))))) 110 | 111 | # Build "buildall" target that attempts to build every Dockerfile in the room, 112 | # but only from the ones that our build architecture can manage. 113 | $(foreach v,$(VERS),$(foreach a,$(BUILD_ARCHS),$(eval $(call add_dep,buildall,build-$(v)-$(a))))) 114 | 115 | # Build "pushall" target that pushes up the result of "buildall" 116 | $(foreach v,$(VERS),$(foreach a,$(BUILD_ARCHS),$(eval $(call add_dep,pushall,push-$(v)-$(a))))) 117 | 118 | # Also push up the short tag names as well. :) 119 | $(foreach v,$(SHORT_VERS),$(foreach a,$(ARCHS), $(eval $(call build_retags,$(a),$(v))))) 120 | $(foreach v,$(SHORT_VERS),$(foreach a,$(BUILD_ARCHS),$(eval $(call add_dep,pushall,retag-$(v)-$(a))))) 121 | $(foreach v,$(SHORT_VERS),$(foreach a,$(BUILD_ARCHS),$(eval $(call add_dep,pushall,push-$(v)-$(a))))) 122 | 123 | clean: 124 | rm -rf build 125 | -------------------------------------------------------------------------------- /julia/README.md: -------------------------------------------------------------------------------- 1 | # Julia 2 | 3 | Super-simple docker image that downloads binary distributions of Julia across a wide range of versions and architectures. 4 | -------------------------------------------------------------------------------- /macos/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "${BUILDBOT_PASSWORD}" ]]; then 4 | echo "Must define BUILDBOT_PASSWORD" >&2 5 | exit 1 6 | fi 7 | 8 | brew install tmux bash ccache reattach-to-user-namespace gcc@7 9 | 10 | # We want `gfortran` to mean `gfortran-7` 11 | ln -s $(which gfortran-7) /usr/local/bin/gfortran 12 | 13 | # Install buildbot-worker 14 | pip3 install --user buildbot-worker 15 | export PATH=$PATH:$(echo ~/Library/Python/*/bin) 16 | mkdir ~/buildbot 17 | 18 | # Install buildbot worker directories 19 | ARCH="x86_64" 20 | if [[ $(uname -m) == "arm64" ]]; then 21 | ARCH="aarch64" 22 | fi 23 | 24 | buildbot-worker create-worker --keepalive=100 --umask 0o022 worker build.julialang.org:9989 macos-${ARCH}-$(hostname -s) ${BUILDBOT_PASSWORD} 25 | buildbot-worker create-worker --keepalive=100 --umask 0o022 worker-tabularasa build.julialang.org:9989 tabularasa_macos-${ARCH}-$(hostname -s) ${BUILDBOT_PASSWORD} 26 | echo "Elliot Saba " > worker/info/admin 27 | echo "Elliot Saba " > worker-tabularasa/info/admin 28 | echo "Julia $(hostname -s) buildworker" > worker/info/host 29 | echo "Julia tabularasa $(hostname -s) buildworker" > worker-tabularasa/info/host 30 | 31 | # Add startup scripts for them all 32 | startup_script --name buildbot --exe $(which buildbot-worker) --chdir ~/buildbot --args "restart --nodaemon worker" --env "HOME=${HOME}" 33 | startup_script --name buildbot-tabularasa --exe $(which buildbot-worker) --chdir ~/buildbot --args "restart --nodaemon worker-tabularasa" --env "HOME=${HOME}" 34 | 35 | # Start the services right meow :3 36 | sudo launchctl load -w /Library/LaunchDaemons/buildbot.plist 37 | sudo launchctl load -w /Library/LaunchDaemons/buildbot-tabularasa.plist 38 | 39 | echo "Okay done! Next steps:" 40 | echo " * Turn on auto-login, to avoid pbcopy/pbpaste errors" 41 | echo " * Copy over xcode.keychain and unlock_keychain.sh" 42 | echo " * Add SessionCreate to plist files" 43 | -------------------------------------------------------------------------------- /tabularasa/Makefile: -------------------------------------------------------------------------------- 1 | include ../common.mk 2 | 3 | # Only include HFS entries that exist within this directory as well 4 | HFS:=$(filter $(subst .Dockerfile,,$(wildcard *.Dockerfile)),$(HFS)) 5 | BUILD_HFS=$(call arch_filt,$(BUILD_ARCHS),$(HFS)) 6 | 7 | # Build "dockerfiles" target that assembles all Dockerfiles 8 | $(foreach f,$(HFS),$(eval $(call add_dep,dockerfiles,build/$(f)/Dockerfile))) 9 | 10 | # Build "buildall" target that attempts to build every Dockerfile in the room, 11 | # but only from the ones that our build architecture can manage. 12 | $(foreach f,$(BUILD_HFS),$(eval $(call add_dep,buildall,build-$(f)))) 13 | 14 | # Build "pushall" target that pushes up the result of "buildall" 15 | $(foreach f,$(BUILD_HFS),$(eval $(call add_dep,pushall,push-$(f)))) 16 | 17 | # This is where we put our derived Dockerfiles 18 | lib: 19 | ln -sf ../workerbase/lib lib 20 | 21 | 22 | define build_dockerfile 23 | # Running just `make build-ubuntu16_04-x86` will build that image 24 | build-$(1): build/$(1)/Dockerfile 25 | $(DOCKER_BUILD) --pull -t $(call tabularasa_tag_name,$(1)) "build/$(1)" 26 | 27 | shell-$(1): 28 | docker run -ti $(call tabularasa_tag_name,$(1)) 29 | 30 | # Running `make push-ubuntu16_04-x86` will upload that image to 31 | push-$(1): 32 | docker push $(call tabularasa_tag_name,$(1)) 33 | 34 | # This is how we build the actual Dockerfile 35 | build/$(1)/Dockerfile: $(1).Dockerfile lib Makefile 36 | @if [ ! -f "$(1).Dockerfile" ]; then \ 37 | echo "Target \"$(1)\" is invalid, recheck your spelling good sir."; \ 38 | exit 1; \ 39 | fi 40 | @mkdir -p "build/$(1)" 41 | @rm -f "build/$(1)/Dockerfile.tmp" 42 | 43 | @# Build the altered Dockerfile 44 | @echo "$(1).Dockerfile" 45 | @../dockerchain "./$(1).Dockerfile" > "build/$(1)/Dockerfile.tmp" 46 | @echo "## This file was autogenerated" > "build/$(1)/Dockerfile" 47 | @echo "# Do not edit directly; edit the .Dockerfile files" >> "build/$(1)/Dockerfile" 48 | @echo "#" >> "build/$(1)/Dockerfile" 49 | @echo "# To build this docker image via \`make\`, run \`make build-$(1)\` in the \`tabularasa\` directory" >> "build/$(1)/Dockerfile" 50 | @echo "# To build this docker image manually, run \`docker build --pull -t $(call tabularasa_tag_name,$(1)) .\`" >> "build/$(1)/Dockerfile" 51 | @echo >> "build/$(1)/Dockerfile" 52 | @cat "build/$(1)/Dockerfile.tmp" >> "build/$(1)/Dockerfile" 53 | @rm -f "build/$(1)/Dockerfile.tmp" 54 | @cp lib/*.sh "build/$(1)/" 55 | endef 56 | 57 | $(foreach f,$(HFS),$(eval $(call build_dockerfile,$(f)))) 58 | 59 | 60 | clean: 61 | rm -rf build lib 62 | -------------------------------------------------------------------------------- /tabularasa/alpine3_8-x86_64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | 4 | 5 | 6 | 7 | INCLUDE lib/alpha 8 | RUN apk add gcc g++ make libressl-dev zlib-dev bzip2-dev curl tar xz libffi-dev 9 | INCLUDE lib/python_install 10 | INCLUDE lib/omega 11 | 12 | -------------------------------------------------------------------------------- /tabularasa/centos6_9-x86_64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:6.9 2 | 3 | 4 | 5 | 6 | INCLUDE lib/alpha 7 | RUN yum update -y && yum install -y gcc gcc-c++ make openssl-devel zlib-devel bzip2-devel curl tar xz 8 | INCLUDE lib/python_install 9 | 10 | 11 | INCLUDE lib/omega 12 | -------------------------------------------------------------------------------- /tabularasa/debian8-aarch64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM arm64v8/debian:8 2 | 3 | # Eliminate troublesome debian-security repos, as they dropped support for Jessie 4 | RUN sed -i '/debian-security/d' /etc/apt/sources.list 5 | 6 | INCLUDE lib/alpha 7 | RUN apt update -y && apt install -y python python-dev curl build-essential 8 | RUN curl -L 'https://bootstrap.pypa.io/get-pip.py' | python 9 | 10 | # This enables qemu-*-static emulation on x86_64 11 | ARG QEMU_ARCH=aarch64 12 | INCLUDE lib/multiarch 13 | 14 | INCLUDE lib/omega 15 | -------------------------------------------------------------------------------- /tabularasa/debian8-armv7l.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM arm32v7/debian:8 2 | 3 | # Eliminate troublesome debian-security repos, as they dropped support for Jessie 4 | #RUN sed -i '/debian-security/d' /etc/apt/sources.list 5 | 6 | # This enables putting `linux32` before commands like `./configure` and `make` 7 | ARG L32=linux32 8 | 9 | INCLUDE lib/alpha 10 | RUN apt-get update && apt-get install -y python python-dev curl build-essential 11 | RUN curl -L 'https://bootstrap.pypa.io/get-pip.py' | python 12 | 13 | # This enables qemu-*-static emulation on x86_64 14 | ARG QEMU_ARCH=arm 15 | INCLUDE lib/multiarch 16 | 17 | INCLUDE lib/omega 18 | -------------------------------------------------------------------------------- /tabularasa/debian8_9-i686.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM i386/debian:8.9 2 | 3 | # This enables putting `linux32` before commands like `./configure` and `make` 4 | ARG L32=linux32 5 | 6 | INCLUDE lib/alpha 7 | RUN apt-get update && apt-get install -y python python-dev curl build-essential 8 | RUN curl -L 'https://bootstrap.pypa.io/get-pip.py' | python 9 | 10 | INCLUDE lib/omega 11 | -------------------------------------------------------------------------------- /tabularasa/debian9-ppc64le.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ppc64le/debian:9 2 | 3 | # Eliminate troublesome debian-security repos, as they dropped support for Jessie 4 | RUN sed -i '/debian-security/d' /etc/apt/sources.list 5 | 6 | INCLUDE lib/alpha 7 | RUN apt update -y && apt install -y python python-dev curl build-essential 8 | RUN curl -L 'https://bootstrap.pypa.io/get-pip.py' | python 9 | 10 | # This enables qemu-*-static emulation on x86_64 11 | ARG QEMU_ARCH=ppc64le 12 | INCLUDE lib/multiarch 13 | 14 | INCLUDE lib/omega 15 | -------------------------------------------------------------------------------- /telegraf/.env: -------------------------------------------------------------------------------- 1 | PROJECT=julia 2 | -------------------------------------------------------------------------------- /telegraf/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM telegraf 2 | 3 | RUN apt update && apt install -y lm-sensors 4 | COPY telegraf.conf /etc/telegraf/telegraf.conf 5 | -------------------------------------------------------------------------------- /telegraf/Makefile: -------------------------------------------------------------------------------- 1 | up: 2 | docker-compose build --pull 3 | docker-compose up --remove-orphans -d 4 | 5 | down: 6 | docker-compose down 7 | 8 | destroy: 9 | docker-compose down -v --remove-orphans 10 | 11 | shell: 12 | docker-compose exec telegraf /bin/bash 13 | 14 | logs: 15 | docker-compose logs -f --tail=100 16 | -------------------------------------------------------------------------------- /telegraf/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.3' 2 | services: 3 | telegraf: 4 | build: . 5 | volumes: 6 | - /proc:/proc2 7 | - /var/run/docker.sock:/var/run/docker.sock 8 | environment: 9 | - HOSTNAME=${HOSTNAME} 10 | - PROJECT=${PROJECT} 11 | - HOST_PROC=/proc2 12 | network_mode: "host" 13 | restart: unless-stopped 14 | -------------------------------------------------------------------------------- /windows/bootstrap.ps1: -------------------------------------------------------------------------------- 1 | 2 | Set-ExecutionPolicy Unrestricted 3 | 4 | # GitHub became TLS 1.2 only on Feb 22, 2018 5 | [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12; 6 | $url = "https://raw.githubusercontent.com/staticfloat/julia-docker/master/windows/provision.ps1" 7 | Invoke-WebRequest -Uri $url -OutFile "provision.ps1" -ErrorAction Stop 8 | 9 | # Run the provisioning script 10 | ./provision.ps1 11 | 12 | true -------------------------------------------------------------------------------- /windows/provision.ps1: -------------------------------------------------------------------------------- 1 | Set-ExecutionPolicy Unrestricted 2 | 3 | # GitHub became TLS 1.2 only on Feb 22, 2018 4 | [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12; 5 | 6 | Add-Type -AssemblyName System.IO.Compression.FileSystem 7 | function Unzip($ZipFile, $OutPath) { 8 | [System.IO.Compression.ZipFile]::ExtractToDirectory($ZipFile, $OutPath) 9 | } 10 | 11 | 12 | # Install NSSM 13 | function Install-NSSM { 14 | $url = "http://nssm.cc/ci/nssm-2.24-101-g897c7ad.zip" 15 | $installer = Join-Path $env:TEMP 'nssm.zip' 16 | 17 | Write-Output "Installing NSSM..." 18 | Invoke-WebRequest -Uri $url -OutFile $installer -ErrorAction Stop 19 | Unzip -ZipFile $installer -OutPath "$env:TEMP" -ErrorAction Stop 20 | Move-Item -Path (Join-Path $env:TEMP 'nssm-2.24-101-g897c7ad\win64\nssm.exe') ` 21 | -Destination "C:\Windows\nssm.exe" ` 22 | -ErrorAction Stop 23 | } 24 | Install-NSSM 25 | 26 | 27 | # Install Sysinternals 28 | function Install-Sysinternals { 29 | Write-Output "Installing Sysinternals suite..." 30 | $url = "https://download.sysinternals.com/files/SysinternalsSuite.zip" 31 | $installer = Join-Path $env:TEMP 'sysinternals.zip' 32 | Invoke-WebRequest -Uri "$url" -OutFile "$installer" -ErrorAction Stop 33 | # Unzip directly into C:\Windows 34 | Unzip -ZipFile $installer -OutPath "C:\Windows" -ErrorAction Stop 35 | } 36 | Install-Sysinternals 37 | 38 | 39 | # Install OpenSSH (parameterized on version and listen port, because we need to 40 | # run two different versions on two different ports, for now) 41 | function Install-OpenSSH($version, $listenPort) { 42 | $url = "https://github.com/PowerShell/Win32-OpenSSH/releases/download/$version/OpenSSH-Win64.zip" 43 | 44 | Write-Output "Installing OpenSSH..." 45 | $installer = Join-Path $env:TEMP 'OpenSSH-$version.zip' 46 | $sshdir = Join-Path $env:ProgramFiles "OpenSSH-$version" 47 | 48 | Invoke-WebRequest -Uri $url -OutFile $installer -ErrorAction Stop 49 | Unzip -ZipFile $installer -OutPath "$env:TEMP" -ErrorAction Stop 50 | 51 | Move-Item -Path (Join-Path $env:TEMP 'OpenSSH-Win64') ` 52 | -Destination "$sshdir" ` 53 | -ErrorAction Stop 54 | 55 | # Create sshd service using NSSM 56 | $sshdpath = Join-Path "$sshdir" "sshd.exe" 57 | nssm install "sshd-$version" "$sshdpath" -p $listenPort 58 | sc.exe privs "sshd-$version" SeAssignPrimaryTokenPrivilege/SeTcbPrivilege/SeBackupPrivilege/SeRestorePrivilege/SeImpersonatePrivilege 59 | 60 | New-NetFirewallRule -Name "sshd-$version" ` 61 | -DisplayName "OpenSSH Server (sshd)" ` 62 | -Group "Remote Access" ` 63 | -Description "Allow access via TCP port 22 to the OpenSSH Daemon" ` 64 | -Enabled True ` 65 | -Direction Inbound ` 66 | -Protocol TCP ` 67 | -LocalPort $listenPort ` 68 | -Program (Join-Path $sshdir 'sshd.exe') ` 69 | -Action Allow ` 70 | -ErrorAction Stop 71 | 72 | Write-Output "Configuring SSH..." 73 | $bashLaunchScript = "C:\autoexec.cmd" 74 | $cmd = @" 75 | @echo off 76 | 77 | if defined SSH_CLIENT ( 78 | :: check if we've got a terminal hooked up; if not, don't run bash.exe 79 | C:\cygwin\bin\bash.exe -c "if [ -t 1 ]; then exit 1; fi" 80 | if errorlevel 1 ( 81 | set SSH_CLIENT= 82 | C:\cygwin\bin\bash.exe --login 83 | exit 84 | ) 85 | ) 86 | "@ 87 | $cmd | Out-File -Encoding ASCII $bashLaunchScript 88 | $acl = Get-ACL -Path $bashLaunchScript 89 | $newRule = New-Object System.Security.AccessControl.FileSystemAccessRule("Administrator", "ReadAndExecute", "Allow") 90 | $acl.AddAccessRule($newRule) 91 | Set-Acl -Path $bashLaunchScript -AclObject $acl 92 | 93 | $psProfileScript = "C:\Windows\system32\WindowsPowerShell\v1.0\profile.ps1" 94 | $ps = @" 95 | Remove-Item env:SSH_CLIENT 96 | "@ 97 | $ps | Out-File -Encoding ASCII $psProfileScript 98 | 99 | New-ItemProperty -Path "HKLM:Software\Microsoft\Command Processor" -Name AutoRun -ErrorAction Stop ` 100 | -Value "$bashLaunchScript" -PropertyType STRING -Force 101 | } 102 | 103 | # Install an older version that works with VS Code on port 2222 104 | # See https://github.com/microsoft/vscode-remote-release/issues/25 for context. 105 | Install-OpenSSH -version "v7.7.2.0p1-Beta" -listenPort 2222 106 | 107 | # Install a newer version (that doesn't work with VS Code, but works better with shells) on port 22 108 | Install-OpenSSH -version "v8.1.0.0p1-Beta" -listenPort 22 109 | 110 | 111 | # Set hostname, this is used to determine which versions of cygwin to install, etc... 112 | $instanceId = (Invoke-RestMethod -Method Get -Uri http://169.254.169.254/latest/meta-data/instance-id) 113 | $instanceTags = Get-EC2Tag -Filter @{ Name="resource-id"; Values=$instanceId } 114 | $hostname = $instanceTags.Where({$_.Key -eq "Name"}).Value 115 | Rename-Computer -NewName $hostname 116 | 117 | # Install Cygwin (and add itself to PATH) 118 | function Install-Cygwin { 119 | Write-Output "Installing Cygwin..." 120 | $CygDir="c:\cygwin" 121 | $pkg_list = "git,make,curl,patch,python3,gcc-g++,binutils,gdb,m4,cmake,p7zip,nano,tmux,procps,ccache,time" 122 | 123 | if($hostname.StartsWith("win32")) { 124 | $arch="x86" 125 | $pkg_list += ",mingw64-i686-gcc-g++,mingw64-i686-gcc-fortran" 126 | } else { 127 | $arch="x86_64" 128 | $pkg_list += ",mingw64-x86_64-gcc-g++,mingw64-x86_64-gcc-fortran" 129 | } 130 | 131 | if(!(Test-Path -Path $CygDir -PathType Container)) { 132 | New-Item -Type Directory -Path $CygDir -Force 133 | } 134 | 135 | Invoke-WebRequest -Uri "http://cygwin.com/setup-$arch.exe" -OutFile "$CygDir\setup-$arch.exe" -ErrorAction Stop 136 | Start-Process -wait -FilePath "$CygDir\setup-$arch.exe" -ArgumentList "-q -g -l $CygDir -s http://mirror.cs.vt.edu/pub/cygwin/cygwin/ -R $CygDir -P $pkg_list" 137 | 138 | Invoke-WebRequest -Uri "https://go.microsoft.com/fwlink/p/?LinkID=822845" -OutFile "$CygDir\win10sdksetup.exe" -ErrorAction Stop 139 | Start-Process -Wait -FilePath "$CygDir\win10sdksetup.exe" -ArgumentList "/quiet" 140 | 141 | [Environment]::SetEnvironmentVariable("Path", "C:\cygwin\bin;" + [Environment]::GetEnvironmentVariable("Path", [EnvironmentVariableTarget]::Machine), [EnvironmentVariableTarget]::Machine) 142 | } 143 | Install-Cygwin 144 | 145 | 146 | # Install Python (and add itself to PATH) 147 | function Install-Python { 148 | param ( $version="3.8.2" ) 149 | $url="https://www.python.org/ftp/python/$version/python-$version-amd64.exe" 150 | 151 | Write-Output "Installing Python $version..." 152 | $installer = Join-Path $env:TEMP "python-$version-amd64.exe" 153 | 154 | Invoke-WebRequest -Uri $url -OutFile $installer -ErrorAction Stop 155 | Start-Process -Wait -FilePath "$installer" -ArgumentList "/quiet InstallAllUsers=1 PrependPath=1 Include_test=0" 156 | 157 | # Gotta explicitly update our path 158 | $env:Path = [System.Environment]::GetEnvironmentVariable("Path","Machine") + ";" + [System.Environment]::GetEnvironmentVariable("Path","User") 159 | } 160 | Install-Python 161 | 162 | 163 | # Install Julia 164 | function Install-Julia { 165 | param ( $version="1.4.2" ) 166 | Write-Output "Installing Julia..." 167 | 168 | if($hostname.StartsWith("win32")) { 169 | $arch="x86" 170 | $bits = "32" 171 | } else { 172 | $arch="x64" 173 | $bits = "64" 174 | } 175 | 176 | $majmin = $version.SubString(0, 3) 177 | $juliaUrl = "https://julialang-s3.julialang.org/bin/winnt/$arch/$majmin/julia-$version-win$bits.exe" 178 | $installer = Join-Path $env:TEMP "julia-installer.exe" 179 | $installdir = Join-Path $env:ProgramFiles 'Julia' 180 | Invoke-WebRequest -Uri "$juliaUrl" -OutFile "$installer" -ErrorAction Stop 181 | 182 | # Run the actual install 183 | Start-Process -Wait -FilePath "$installer" -ArgumentList "/VERYSILENT /DIR=`"$installdir`"" 184 | 185 | # Create shortcut for Julia 186 | Start-Process -Wait -FilePath "C:\cygwin\bin\bash.exe" -ArgumentList "-c 'ln -s `"$(cygpath `"$installdir`")/bin/julia.exe`" /usr/bin/julia'" 187 | } 188 | Install-Julia 189 | 190 | 191 | # Download WireGuard, first, obtaining the wireguard keys from S3 192 | $wgKeysPath = Join-Path $env:TEMP "wireguard.json" 193 | Read-S3Object -BucketName julialangsecure -Key SSH/wireguard.json -File "$wgKeysPath" 194 | $wgKeys = Get-Content -Path "$wgKeysPath" -Encoding ASCII | ConvertFrom-Json 195 | function Install-Wireguard { 196 | Write-Output "Installing WireGuard..." 197 | $wgUrl = "https://download.wireguard.com/windows-client/wireguard-amd64-0.0.38.msi" 198 | $wgInstallFile = Join-Path $env:TEMP "wireguard.msi" 199 | $wgInstallDir = Join-Path $env:ProgramFiles 'WireGuard' 200 | Invoke-WebRequest -Uri "$wgUrl" -OutFile "$wgInstallFile" -ErrorAction Stop 201 | Start-Process -Wait -FilePath "$wgInstallFile" -ArgumentList "/quiet" 202 | 203 | # Only install tunnel service if we find our hostname 204 | if($wgKeys.PSObject.Properties.Name -contains $hostname) { 205 | $wgaddr, $wgseckey = $wgKeys.$hostname 206 | 207 | $wgConfigFile = @" 208 | [Interface] 209 | Address = $wgaddr 210 | PrivateKey = $wgseckey 211 | 212 | [Peer] 213 | PublicKey = pZq1HmTtHyYP5bToj+hrpVIITbe2oeRlyP19O1D6/QU= 214 | Endpoint = mieli.e.ip.saba.us:37 215 | AllowedIPs = fd37:5040::/64 216 | PersistentKeepalive = 45 217 | "@ 218 | 219 | Set-Content -Path $wgInstallDir\wg0.conf -NoNewline -Encoding ASCII -Value $wgConfigFile 220 | & "$wgInstallDir\wireguard.exe" /installtunnelservice "$wgInstallDir\wg0.conf" 221 | 222 | $rewgScriptFile = @' 223 | #!/bin/bash 224 | 225 | net stop WireGuardTunnel\$wg0 226 | net start WireGuardTunnel\$wg0 227 | '@ 228 | Set-Content -Path $wgInstallDir\rewg.sh -NoNewline -Encoding ASCII -Value $rewgScriptFile 229 | 230 | # Auto-restart wireguard every 8 hours, to help with DNS changes: 231 | $A = New-ScheduledTaskAction -Execute "C:\cygwin\bin\bash.exe" -Argument "$(cygpath $wgInstallDir\rewg.sh)" 232 | $T = New-ScheduledTaskTrigger -Once -At (Get-Date) -RepetitionInterval (New-TimeSpan -Hours 8) 233 | Register-ScheduledTask -Action $A -Trigger $T -TaskName "rewg" -User "$hostname\Administrator" -Description "Wireguard tunnel auto-restarter" 234 | } 235 | } 236 | Install-Wireguard 237 | 238 | 239 | # Install Telegraf 240 | function Install-Telegraf { 241 | Write-Output "Installing Telegraf..." 242 | $telegrafUrl = "https://dl.influxdata.com/telegraf/releases/telegraf-1.12.4_windows_amd64.zip" 243 | $telegrafZip = Join-Path $env:TEMP 'telegraf.zip' 244 | Invoke-WebRequest -Uri $telegrafUrl -OutFile $telegrafZip -ErrorAction Stop 245 | 246 | $telegrafInstallDir = Join-Path $env:ProgramFiles 'Telegraf' 247 | Unzip -ZipFile $telegrafZip -OutPath "$env:TEMP" -ErrorAction Stop 248 | Move-Item -Path (Join-Path $env:TEMP 'telegraf') ` 249 | -Destination $telegrafInstallDir -ErrorAction Stop 250 | 251 | # Spit out telegraf's configuration file 252 | $telegrafConf = @" 253 | [global_tags] 254 | project= "julia" 255 | [agent] 256 | interval = "10s" 257 | round_interval = true 258 | metric_batch_size = 1000 259 | metric_buffer_limit = 10000 260 | collection_jitter = "0s" 261 | flush_interval = "60s" 262 | flush_jitter = "10s" 263 | precision = "" 264 | hostname = "$hostname" 265 | omit_hostname = false 266 | [[outputs.influxdb]] 267 | urls = ["http://[fd37:5040::dc82:d3f5:c8b7:c381]:8086"] 268 | content_encoding = "gzip" 269 | [[inputs.cpu]] 270 | percpu = true 271 | totalcpu = true 272 | collect_cpu_time = true 273 | report_active = true 274 | [[inputs.disk]] 275 | ignore_fs = ["tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs"] 276 | [[inputs.diskio]] 277 | [[inputs.kernel]] 278 | [[inputs.mem]] 279 | [[inputs.swap]] 280 | [[inputs.system]] 281 | fielddrop = ["uptime_format"] 282 | [[inputs.net]] 283 | "@ 284 | Set-Content -Path $telegrafInstallDir\telegraf.conf -NoNewline -Encoding ASCII -Value $telegrafConf 285 | 286 | # Load telegraf config if we actually have a route using wireguard 287 | if($wgKeys.PSObject.Properties.Name -contains $hostname) { 288 | & "$telegrafInstallDir\telegraf.exe" --service install 289 | Start-Service telegraf 290 | } 291 | } 292 | Install-Telegraf 293 | 294 | # Disk initialization 295 | Write-Output "Formatting DATA drive..." 296 | Get-Disk | Where-Object PartitionStyle -eq "RAW" | ` 297 | Initialize-Disk -PartitionStyle GPT -PassThru | ` 298 | New-Volume -FileSystem NTFS -DriveLetter D -FriendlyName 'DATA' 299 | 300 | 301 | # Install buildbot 302 | function Install-Buildbot { 303 | Write-Output "Installing Buildbot..." 304 | if($hostname.StartsWith("win32")) { 305 | $longArch = "i686" 306 | } else { 307 | $longArch = "x86_64" 308 | } 309 | $workerIdx = $hostname.SubString($hostname.Length - 1) 310 | 311 | # Install special version of Twisted that is precompiled for windows and python 3.8 312 | &python -m pip install https://download.lfd.uci.edu/pythonlibs/w3jqiv8s/Twisted-20.3.0-cp38-cp38-win_amd64.whl 313 | # Now that we've got Twisted, the rest of buildbot should be a cinch 314 | &python -m pip install pywin32 buildbot-worker 315 | 316 | # Install buildbot workers 317 | mkdir D:\buildbot 318 | cd D:\buildbot 319 | $worker_exe=(Get-Command "buildbot-worker.exe" | Select-Object -ExpandProperty Definition) 320 | &$worker_exe create-worker --keepalive=100 worker build.julialang.org:9989 win-$longArch-aws_$workerIdx julialang42 321 | &$worker_exe create-worker --keepalive=100 worker-tabularasa build.julialang.org:9989 tabularasa_win-$longArch-aws_$workerIdx julialang42 322 | 323 | # Initialize services 324 | nssm install "workerbuild" "$worker_exe" "start --nodaemon ." 325 | nssm install "workertest" "$worker_exe" "start --nodaemon ." 326 | nssm set "workerbuild" AppDirectory "D:\buildbot\worker" 327 | nssm set "workertest" AppDirectory "D:\buildbot\worker-tabularasa" 328 | nssm set "workerbuild" AppPriority BELOW_NORMAL_PRIORITY_CLASS 329 | nssm set "workertest" AppPriority BELOW_NORMAL_PRIORITY_CLASS 330 | } 331 | Install-Buildbot 332 | 333 | 334 | # Install Mono for its codesigning utilities 335 | function Install-Mono() { 336 | Write-Output "Installing Mono..." 337 | $url = "https://download.mono-project.com/archive/6.4.0/windows-installer/mono-6.4.0.198-x64-0.msi" 338 | $installer = Join-Path $env:TEMP 'mono.msi' 339 | Invoke-WebRequest -Uri "$url" -OutFile "$installer" -ErrorAction Stop 340 | Start-Process -Wait -FilePath "$installer" -ArgumentList "/quiet" 341 | } 342 | Install-Mono 343 | 344 | 345 | # Install Firefox, because builtin IE is such a pain 346 | function Install-Firefox { 347 | Write-Output "Installing Firefox..." 348 | $url = "https://download.mozilla.org/?product=firefox-latest&os=win64&lang=en-US" 349 | $installer = Join-Path $env:TEMP 'FirefoxSetup.exe' 350 | Invoke-WebRequest -Uri "$url" -OutFile "$installer" -ErrorAction Stop 351 | Start-Process -Wait -FilePath "$installer" -ArgumentList "/S" 352 | } 353 | Install-Firefox 354 | 355 | 356 | # Set password to the contents of a file we get off of a private S3 bucket 357 | $passwordFile = Join-Path $env:TEMP 'password.txt' 358 | Read-S3Object -BucketName julialangsecure -Key SSH/julia-windows-password.txt -File "$passwordFile" 359 | $password = Get-Content -Path "$passwordFile" -Encoding ASCII 360 | Set-LocalUser -Name "Administrator" -Password (ConvertTo-SecureString -AsPlainText "$password" -Force) 361 | 362 | 363 | # Download `autodump.jl` to D: 364 | $url = "https://raw.githubusercontent.com/JuliaCI/julia-buildbot/master/commands/autodump.jl" 365 | Invoke-WebRequest -Uri "$url" -OutFile "D:\autodump.jl" -ErrorAction Stop 366 | 367 | 368 | # Disable windows defender 369 | Set-MpPreference -DisableRealtimeMonitoring $true 370 | New-ItemProperty -Path "HKLM:\SOFTWARE\Policies\Microsoft\Windows Defender" -Name DisableAntiSpyware -Value 1 -PropertyType DWORD -Force 371 | 372 | 373 | # Startup OpenSSH/codesigning as SYSTEM 374 | $SYSTEMScript = @' 375 | $sshdir = 'c:\ProgramData\ssh' 376 | $auth_keys = Join-Path $sshdir 'authorized_keys' 377 | 378 | If (-Not (Test-Path $sshdir)) { 379 | New-Item -Path $sshdir -Type Directory 380 | } 381 | 382 | & "C:\Program Files\OpenSSH-v8.1.0.0p1-Beta\ssh-keygen.exe" -A 383 | 384 | # Download the OpenSSH key associated with this instance 385 | $keyUrl = "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key" 386 | $keyMaterial = (New-Object System.IO.StreamReader ([System.Net.WebRequest]::Create($keyUrl)).GetResponse().GetResponseStream()).ReadToEnd() 387 | 388 | $keyMaterial | Out-File -Append -FilePath $auth_keys -Encoding ASCII 389 | 390 | # Ensure access control on authorized_keys meets the requirements 391 | $acl = Get-ACL -Path $auth_keys 392 | $acl.SetAccessRuleProtection($True, $True) 393 | Set-Acl -Path $auth_keys -AclObject $acl 394 | 395 | $acl = Get-ACL -Path $auth_keys 396 | $ar = New-Object System.Security.AccessControl.FileSystemAccessRule( ` 397 | "NT Authority\Authenticated Users", "ReadAndExecute", "Allow") 398 | $acl.RemoveAccessRule($ar) 399 | $ar = New-Object System.Security.AccessControl.FileSystemAccessRule( ` 400 | "BUILTIN\Administrators", "FullControl", "Allow") 401 | $acl.RemoveAccessRule($ar) 402 | $ar = New-Object System.Security.AccessControl.FileSystemAccessRule( ` 403 | "BUILTIN\Users", "FullControl", "Allow") 404 | $acl.RemoveAccessRule($ar) 405 | Set-Acl -Path $auth_keys -AclObject $acl 406 | 407 | $sshdConfigContent = @" 408 | PasswordAuthentication yes 409 | PubKeyAuthentication yes 410 | PidFile __PROGRAMDATA__/ssh/logs/sshd.pid 411 | AuthorizedKeysFile __PROGRAMDATA__/ssh/authorized_keys 412 | AllowUsers Administrator 413 | 414 | Subsystem sftp sftp-server.exe 415 | "@ 416 | 417 | Set-Content -NoNewline -Path C:\ProgramData\ssh\sshd_config -Value $sshdConfigContent 418 | 419 | # Download codesigning keys and generate codesigning script 420 | $signHome = "C:\cygwin\home\SYSTEM\" 421 | $sdkHome = Resolve-Path "C:\Program Files (x86)/Windows Kits/10/bin/10.*/x64/" | Select -ExpandProperty Path 422 | & mkdir $signHome 423 | 424 | # Download codesigning tools 425 | Read-S3Object -BucketName julialangsecure -Key CodeSigning/windows/julia-win-key.pvk -File "$signHome\julia-win-key.pvk" 426 | Read-S3Object -BucketName julialangsecure -Key CodeSigning/windows/julia-win-cert.spc -File "$signHome\julia-win-cert.spc" 427 | Read-S3Object -BucketName julialangsecure -Key CodeSigning/windows/julia-win.pfx -File "$signHome\julia-win.pfx" 428 | Read-S3Object -BucketName julialangsecure -Key CodeSigning/windows/wsdk-sign.sh -File "$signHome/sign.sh" 429 | Read-S3Object -BucketName julialangsecure -Key CodeSigning/windows/wsdk-sign.ps1 -File "$signHome/sign.ps1" 430 | '@ 431 | $SYSTEMScriptPath = Join-Path $env:TEMP 'setup_SYSTEM.ps1' 432 | $SYSTEMScript | Out-File $SYSTEMScriptPath 433 | 434 | # Run the SYSTEM script as, well, SYSTEM. 435 | & psexec /accepteula -i -s Powershell.exe -ExecutionPolicy Bypass -File $SYSTEMScriptPath 436 | if ($LASTEXITCODE -ne 0) { 437 | throw("Failed to run SYSTEM setup") 438 | } 439 | 440 | # Restart to ensure public key authentication works and SSH comes up 441 | Restart-Computer -Force 442 | -------------------------------------------------------------------------------- /workerbase/Makefile: -------------------------------------------------------------------------------- 1 | include ../common.mk 2 | 3 | # Build "dockerfiles" target that assembles all Dockerfiles 4 | $(foreach f,$(HFS),$(eval $(call add_dep,dockerfiles,build/$(f)/Dockerfile))) 5 | 6 | # Build "buildall" target that attempts to build every Dockerfile in the room, 7 | # but only from the ones that our build architecture can manage. 8 | BUILD_HFS=$(call arch_filt,$(BUILD_ARCH),$(HFS)) 9 | $(foreach f,$(BUILD_HFS),$(eval $(call add_dep,buildall,build-$(f)))) 10 | 11 | # Build "pushall" target that pushes up the result of "buildall" 12 | $(foreach f,$(BUILD_HFS),$(eval $(call add_dep,pushall,push-$(f)))) 13 | 14 | # Search Dockerfiles for INCLUDE statements and add them to the make dependency 15 | # tree, so that changing a dependency will rebuild the Dockerfile. 16 | $(foreach f,$(HFS),$(eval $(call add_dep,build/$(f)/Dockerfile,$(shell ../dockerdeps ./$(f).Dockerfile)))) 17 | 18 | define build_dockerfile 19 | # Running just `make build-ubuntu16_04-x86` will build that image 20 | build-$(1): build/$(1)/Dockerfile 21 | $(DOCKER_BUILD) --pull -t $(call worker_tag_name,$(1)) "build/$(1)" 22 | 23 | shell-$(1): 24 | docker run -ti $(call worker_tag_name,$(1)) 25 | 26 | # Running `make push-ubuntu16_04-x86` will upload that image to docker hub 27 | push-$(1): 28 | docker push $(call worker_tag_name,$(1)) 29 | 30 | # This is how we build the actual Dockerfile 31 | build/$(1)/Dockerfile: $(1).Dockerfile Makefile 32 | @if [ ! -f "$(1).Dockerfile" ]; then \ 33 | echo "Target \"$(1)\" is invalid, recheck your spelling good sir."; \ 34 | exit 1; \ 35 | fi 36 | @mkdir -p "build/$(1)" 37 | @rm -f "build/$(1)/Dockerfile.tmp" 38 | @echo "$(1).Dockerfile" 39 | @../dockerchain ./$(1).Dockerfile > "build/$(1)/Dockerfile.tmp" 40 | @echo "## This file was autogenerated" > "build/$(1)/Dockerfile" 41 | @echo "# Do not edit directly; edit the .Dockerfile files" >> "build/$(1)/Dockerfile" 42 | @echo "#" >> "build/$(1)/Dockerfile" 43 | @echo "# To build this docker image via \`make\`, run \`make build-$(1)\` in the \`workerbase\` directory" >> "build/$(1)/Dockerfile" 44 | @echo "# To build this docker image manually, run \`docker build --pull -t $(call worker_tag_name,$(1)) .\`" >> "build/$(1)/Dockerfile" 45 | @echo >> "build/$(1)/Dockerfile" 46 | @cat "build/$(1)/Dockerfile.tmp" >> "build/$(1)/Dockerfile" 47 | @rm -f "build/$(1)/Dockerfile.tmp" 48 | @# Copy in any auxilliary scripts so they can be installed 49 | @cp lib/*.sh "build/$(1)/" 50 | @cp -R patches/ "build/$(1)/" 51 | @# On Windows, copy in the powershell scripts too 52 | @if [ -n "$(filter win%,$(1))" ]; then \ 53 | cp lib/*.ps1 "build/$(1)/"; \ 54 | fi 55 | endef 56 | 57 | $(foreach f,$(HFS),$(eval $(call build_dockerfile,$(f)))) 58 | 59 | 60 | clean: 61 | rm -rf build 62 | -------------------------------------------------------------------------------- /workerbase/README.md: -------------------------------------------------------------------------------- 1 | worker base docker images 2 | ========================= 3 | 4 | To build Julia worker base docker images, run `make` in this directory to generate `Dockerfile` directories within `build`, then run `make `. The result will be docker images named things like `julia_workerbase_centos5_11:x64` and `julia_workerbase_ubuntu16_04:x86`. These images are what are deployed onto in order to create buildworkers, as these images contain things like very recent versions of `gcc`, `python`, `cmake`, etc... 5 | 6 | To generate all the `Dockerfile`s without actually building the docker images, run `make`, or `make dockerfiles`, if you like to be verbose. This is the default behavior. 7 | 8 | To generate a `Dockerfile` and then build the result, run something like `make centos5_11-x86`, or `make ubuntu16_04-x64`. 9 | 10 | To generate all `Dockerfile`s and then build all results, run `make buildall`. Note that this will automatically skip buildworkers that are on an incompatible architecture (e.g. an `x64` machine will only ever build `x86` and `x64` images, not `ppc64le` images) 11 | 12 | To push the resulting docker images to the Docker Hub, run `make pushall`, or `make push-centos5_11-x86` to push only a single image. You must have logged into an authorized Docker Hub account on your local docker daemon for this to work, of course. 13 | 14 | **NOTE:** This repository uses the new experimental `--squash` command introduced in Docker 1.13 in order to cut down on image sizes (Without `--squash` support, the `centos5_11-x86` image is ~6.5 GB, while with it it is a mere ~1.3 GB). While the actual `Dockerfile`s generated by this repository do not have anything experimental within them, the `Makefile` within this repository will attempt to pass `--squash` to `docker build` if it can (e.g. if `--squash` is listed withing `docker build --help`). If you are not running a `dockerd` new enough, or with the `experimental` setting set to false, then `--squash` will not be used and your images will be significantly larger. 15 | 16 | ## Multiarch support 17 | Experimental multiarch support is being added for all non-`x86` architectures. All non-`x86` images have a `qemu-*-static` binary in `/usr/bin` that can be used as the entrypoint on an `x86_64` machine. This allows normal usage of the docker image as if it were being used on a native hardware stack of whatever exotic architecture you desire. 18 | 19 | To run e.g. the `armv7l` buildbot image on your `x86_64` system using QEMU for emulation, first run the `qemu_register.sh` script with `sudo` to register the `qemu` paths within docker containers as a valid interpreter for foreign `armv7l`, `aarch64` and `ppc64le` binaries. You can then run the docker image directly, e.g. `docker run -ti staticfloat/julia_workerbase:debian7_11-armv7l` 20 | -------------------------------------------------------------------------------- /workerbase/alpine3_8-x86_64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | 4 | 5 | 6 | 7 | INCLUDE lib/alpha 8 | INCLUDE lib/builddeps_apk 9 | 10 | # musl does not support mudflap, or libsanitizer 11 | # libmpx uses secure_getenv and struct _libc_fpstate not present in musl 12 | # alpine musl provides libssp_nonshared.a, so we don't need libssp either 13 | ARG gcc_configure_flags="--disable-libcilkrts --disable-libssp --disable-libmpx --disable-libmudflap --disable-libsanitizer --disable-multilib --build=x86_64-linux-musl --host=x86_64-linux-musl --target=x86_64-linux-musl" 14 | 15 | INCLUDE lib/build_tools 16 | INCLUDE lib/omega 17 | 18 | -------------------------------------------------------------------------------- /workerbase/centos6_9-x86_64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:6.9 2 | 3 | 4 | 5 | 6 | 7 | INCLUDE lib/alpha 8 | INCLUDE lib/builddeps_yum 9 | INCLUDE lib/build_tools 10 | INCLUDE lib/omega 11 | 12 | -------------------------------------------------------------------------------- /workerbase/debian8-aarch64.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM arm64v8/debian:8 2 | 3 | # Eliminate troublesome debian-security repos, as they dropped support for Jessie 4 | RUN sed -i '/debian-security/d' /etc/apt/sources.list 5 | 6 | 7 | INCLUDE lib/alpha 8 | INCLUDE lib/builddeps_apt 9 | INCLUDE lib/build_tools 10 | 11 | # This enables qemu-*-static emulation on x86_64 12 | ARG qemu_arch=aarch64 13 | INCLUDE lib/multiarch 14 | 15 | 16 | INCLUDE lib/omega 17 | -------------------------------------------------------------------------------- /workerbase/debian8-armv7l.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM arm32v7/debian:8 2 | 3 | # This enables putting `linux32` before commands like `./configure` and `make` 4 | ARG L32=linux32 5 | 6 | # We need to manually override binutils/gcc's host triplets 7 | ARG TRIPLET="arm-linux-gnueabihf" 8 | ARG binutils_configure_flags="--host=${TRIPLET} --build=${TRIPLET} --target=${TRIPLET} --enable-lto --enable-plugins" 9 | ARG gcc_configure_flags="--host=${TRIPLET} --build=${TRIPLET} --target=${TRIPLET} --with-arch=armv7-a --with-float=hard --with-fpu=vfpv3-d16 --enable-lto --enable-plugin" 10 | 11 | INCLUDE lib/alpha 12 | INCLUDE lib/builddeps_apt 13 | INCLUDE lib/build_tools 14 | 15 | # This enables qemu-*-static emulation on x86_64 16 | ARG qemu_arch=arm 17 | INCLUDE lib/multiarch 18 | 19 | INCLUDE lib/omega 20 | 21 | -------------------------------------------------------------------------------- /workerbase/debian8_9-i686.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM i386/debian:8.9 2 | 3 | # This enables putting `linux32` before commands like `./configure` and `make` 4 | ARG L32=linux32 5 | 6 | INCLUDE lib/alpha 7 | INCLUDE lib/builddeps_apt 8 | INCLUDE lib/build_tools 9 | INCLUDE lib/omega 10 | 11 | -------------------------------------------------------------------------------- /workerbase/debian9-ppc64le.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ppc64le/debian:9 2 | 3 | # Eliminate troublesome debian-security repos, as they dropped support for Jessie 4 | RUN sed -i '/debian-security/d' /etc/apt/sources.list 5 | 6 | INCLUDE lib/alpha 7 | INCLUDE lib/builddeps_apt 8 | INCLUDE lib/build_tools 9 | 10 | # This enables qemu-*-static emulation on x86_64 11 | ARG qemu_arch=ppc64le 12 | INCLUDE lib/multiarch 13 | 14 | INCLUDE lib/omega 15 | -------------------------------------------------------------------------------- /workerbase/lib/alpha.Dockerfile: -------------------------------------------------------------------------------- 1 | MAINTAINER Elliot Saba 2 | USER root 3 | 4 | # We create a `buildworker` user so that we don't have to run everything as root 5 | RUN useradd -u 1337 -m -s /bin/bash buildworker || true 6 | RUN adduser -u 1337 -s /bin/bash buildworker || true # <-- for alpine 7 | 8 | # These are where we'll do all our work, so make them now 9 | RUN mkdir -p /src /downloads 10 | RUN chown buildworker:buildworker /src /downloads 11 | 12 | # We use the "download_unpack.sh" command a lot, throw it into /usr/bin 13 | COPY download_unpack.sh /usr/bin 14 | 15 | # Add ourselves to sudoers 16 | RUN mkdir -p /etc/sudoers.d 17 | RUN echo "buildworker ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/buildworker 18 | 19 | # Vim needs to work with arrow keys 20 | RUN echo "set nocompatible" > /home/buildworker/.vimrc && chown buildworker:buildworker /home/buildworker/.vimrc 21 | ENV TERM=screen 22 | 23 | # <--- for alpine 24 | RUN apk add bash || true 25 | 26 | # We want to be able to do things like "source" 27 | SHELL ["/bin/bash", "-c"] 28 | -------------------------------------------------------------------------------- /workerbase/lib/binutils_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install binutils 2 | ARG binutils_version=2.29.1 3 | ARG binutils_url=https://ftp.gnu.org/gnu/binutils/binutils-${binutils_version}.tar.xz 4 | 5 | # Use download_unpack to download and unpack binutils and gcc 6 | WORKDIR /src 7 | RUN download_unpack.sh "${binutils_url}" 8 | 9 | # Build binutils! Because it's cheap and easy, we enable essentially every 10 | # target under the sun for binutils 11 | WORKDIR /src/binutils-${binutils_version} 12 | RUN ${L32} ./configure --prefix=/usr/local \ 13 | ${binutils_configure_flags} 14 | RUN ${L32} make -j4 15 | 16 | # Install binutils 17 | USER root 18 | RUN ${L32} make install 19 | 20 | # Cleanup 21 | WORKDIR /src 22 | RUN rm -rf binutils-${binutils_version} 23 | -------------------------------------------------------------------------------- /workerbase/lib/build_tools.Dockerfile: -------------------------------------------------------------------------------- 1 | # Download and install `tar` because some machines have it too old and don't 2 | # know what `.xz` files are. Build it first so that we can use it to extract 3 | # our other tools. Yes, this means this is the only tool that's not built with 4 | # our new GCC, but that's okay. 5 | USER buildworker 6 | INCLUDE tar_install 7 | 8 | # Download and install `gcc` because we want only the latest in cutting-edge 9 | # compiler technology, and also because LLVM is a needy little piece of software 10 | USER buildworker 11 | INCLUDE binutils_install 12 | USER buildworker 13 | INCLUDE gcc_install 14 | 15 | # Download and install `libtool` based off of our GCC version 16 | USER buildworker 17 | INCLUDE libtool_install 18 | 19 | # Download and install `patchelf` because he's a really standup guy 20 | USER buildworker 21 | INCLUDE patchelf_install 22 | 23 | # Download and install `git` because some of the distributions we build on are 24 | # old enough that `git` isn't even installable from the default distributions 25 | USER buildworker 26 | INCLUDE git_install 27 | 28 | # Download and install `cmake` because LLVM again. Whiner. 29 | USER buildworker 30 | INCLUDE cmake_install 31 | 32 | # Download and install `python` because buildbot doesn't like ancient versions 33 | USER buildworker 34 | INCLUDE python_install 35 | 36 | # Download and install `ccache` to speed up compilation 37 | USER buildworker 38 | INCLUDE ccache_install 39 | 40 | # Download and install `docker`, because we often want to build our own images again 41 | USER buildworker 42 | INCLUDE docker_install 43 | -------------------------------------------------------------------------------- /workerbase/lib/builddeps_apk.Dockerfile: -------------------------------------------------------------------------------- 1 | # Tools that make it easy to get stuff done within the docker image 2 | ARG NICE_TOOLS="vim curl gdb procps sudo" 3 | 4 | # Tools to bootstrap our compiler chain that we will remove afterward 5 | ARG TEMPORARY_DEPS="gcc g++" 6 | 7 | # Tools that we need to build Julia and other deps that we are not going to 8 | # build ourselves 9 | ARG BUILD_DEPS="make musl-dev dpkg-dev m4 libressl-dev patch pkgconfig xz \ 10 | zlib-dev curl-dev expat-dev gettext-dev wget zlib bzip2-dev autoconf \ 11 | automake linux-headers libffi-dev" 12 | 13 | # Install all these packages 14 | RUN apk add ${NICE_TOOLS} ${TEMPORARY_DEPS} ${BUILD_DEPS} 15 | 16 | # Create sha512sum wrapper 17 | RUN rm /usr/bin/sha512sum 18 | COPY fake_sha512sum.sh /usr/bin/sha512sum 19 | -------------------------------------------------------------------------------- /workerbase/lib/builddeps_apt.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Download and install needed build dependencies for x86_64 apt-based systems 2 | USER root 3 | RUN ${L32} apt-get update 4 | 5 | # Tools that make it easy to get stuff done within the docker image 6 | ARG NICE_TOOLS="vim curl gdb procps sudo time openssh-client" 7 | 8 | # Tools to bootstrap our compiler chain that we will remove afterward 9 | ARG TEMPORARY_DEPS="gcc g++" 10 | 11 | # Tools that we need to build Julia and other deps that we are not going to 12 | # build ourselves 13 | ARG BUILD_DEPS="make libc6-dev dpkg-dev m4 libssl-dev patch pkg-config \ 14 | libcurl4-openssl-dev libexpat1-dev gettext wget zlib1g-dev \ 15 | libbz2-dev autoconf automake" 16 | 17 | # Install all these packages 18 | RUN ${L32} apt-get install -y ${NICE_TOOLS} ${TEMPORARY_DEPS} ${BUILD_DEPS} 19 | 20 | # I hate that on 32-bit ubuntu, libc6-dev is i386 and libc6-dev-i386 doesn't 21 | # exist. Consistency Conshmistency, amirite? It's too much work to properly 22 | # identify which version we're running on, so instead just allow this to fail. 23 | RUN ${L32} apt-get install -y libc6-dev-i386 || echo "Stupid Ubuntu". 24 | -------------------------------------------------------------------------------- /workerbase/lib/builddeps_yum.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Download and install needed build dependencies for x86_64 yum-based systems 2 | USER root 3 | RUN ${L32} yum update -y 4 | 5 | # Tools that make it easy to get stuff done within the docker image 6 | ARG NICE_TOOLS="vim curl gdb net-tools which sudo time openssh-client" 7 | 8 | # Tools to bootstrap our compiler chain that we will remove afterward 9 | ARG TEMPORARY_DEPS="gcc gcc-c++" 10 | 11 | # Tools that we need to build Julia and other deps that we are not going to 12 | # build ourselves 13 | ARG BUILD_DEPS="make m4 openssl openssl-devel patch pkg-config curl-devel \ 14 | expat-devel gettext-devel perl-devel wget bzip2 tar \ 15 | zlib-devel bzip2-devel xz rpmdevtools autoconf automake \ 16 | glibc-devel.i686 glibc-devel" 17 | 18 | # Install all these packages 19 | RUN ${L32} yum install -y ${NICE_TOOLS} ${TEMPORARY_DEPS} ${BUILD_DEPS} 20 | 21 | # Fixup sudo problems 22 | RUN ${L32} sed -i.bak -e 's/Defaults[[:space:]]*env_reset//g' /etc/sudoers 23 | RUN ${L32} sed -i.bak -e 's/Defaults[[:space:]]*secure_path[[:space:]]*=.*//g' /etc/sudoers 24 | -------------------------------------------------------------------------------- /workerbase/lib/ccache_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install ccache 2 | ARG ccache_version=3.3.4 3 | ARG ccache_url=https://www.samba.org/ftp/ccache/ccache-${ccache_version}.tar.xz 4 | 5 | WORKDIR /src 6 | 7 | # Use download_unpack to download and unpack ccache 8 | RUN download_unpack.sh "${ccache_url}" 9 | 10 | WORKDIR /src/ccache-${ccache_version} 11 | # We need to patch ccache's configure system since it's ancient 12 | RUN ${L32} curl -L 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess' -o config.guess 13 | RUN ${L32} curl -L 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub' -o config.sub 14 | RUN ${L32} ./configure --prefix=/usr/local 15 | RUN ${L32} make all -j4 16 | 17 | # Install ccache 18 | USER root 19 | RUN ${L32} make install 20 | 21 | # cleanup /src 22 | WORKDIR /src 23 | RUN rm -rf ccache-${ccache_version} 24 | -------------------------------------------------------------------------------- /workerbase/lib/cmake_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install cmake into /usr/local 2 | # Note that when you change `cmake_version`, you need to change the major version 3 | # in the URL below, I would use ${cmake_version%.*} except that those fancy 4 | # substitutions don't work in docker ARG rules. :( 5 | ARG cmake_version=3.16.1 6 | ARG cmake_url=https://cmake.org/files/v3.16/cmake-${cmake_version}.tar.gz 7 | 8 | WORKDIR /src 9 | 10 | # Unfortunately, we have to pass `-k` to `curl` because cmake.org has weird SSL 11 | # certificates, and old versions of `curl` can't deal with it. :( 12 | RUN EXTRA_CURL_FLAGS="-k" download_unpack.sh "${cmake_url}" 13 | 14 | # Build the cmake sources! 15 | WORKDIR /src/cmake-${cmake_version} 16 | RUN ${L32} ./configure --prefix=/usr/local 17 | RUN ${L32} make -j4 18 | 19 | # Install as root 20 | USER root 21 | RUN ${L32} make install 22 | 23 | # Patch cmake defaults 24 | WORKDIR / 25 | COPY patches/cmake_install.patch /tmp/ 26 | RUN patch -p0 < /tmp/cmake_install.patch; \ 27 | rm -f /tmp/cmake_install.patch 28 | 29 | # Now cleanup /src 30 | WORKDIR /src 31 | RUN rm -rf cmake-${cmake_version} 32 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/binutils_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG binutils_url=https://ftp.gnu.org/gnu/binutils/binutils-${binutils_version}.tar.bz2 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${binutils_url}" 5 | 6 | # Build binutils! 7 | WORKDIR /src/binutils-${binutils_version} 8 | RUN source /build.sh; \ 9 | ./configure \ 10 | --prefix=/opt/${compiler_target} \ 11 | --target=${compiler_target} \ 12 | --with-sysroot="$(get_sysroot)" \ 13 | --enable-multilib \ 14 | --disable-werror 15 | RUN make -j$(nproc) 16 | 17 | # Install binutils 18 | RUN make install 19 | 20 | # Cleanup 21 | WORKDIR /src 22 | RUN rm -rf binutils-${binutils_version} 23 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Generally useful routines for building crosscompiler stuff 4 | 5 | ## Function to take in a target such as `aarch64-linux-gnu`` and spit out a 6 | ## linux kernel arch like "arm64". 7 | target_to_linux_arch() 8 | { 9 | case "$1" in 10 | arm*) 11 | echo "arm" 12 | ;; 13 | aarch64*) 14 | echo "arm64" 15 | ;; 16 | powerpc*) 17 | echo "powerpc" 18 | ;; 19 | i686*) 20 | echo "x86" 21 | ;; 22 | x86*) 23 | echo "x86" 24 | ;; 25 | esac 26 | } 27 | 28 | ## Function to take in a target such as `x86_64-apple-darwin14` and spit out 29 | ## an SDK version such as "10.10" 30 | target_to_darwin_sdk() 31 | { 32 | case "$1" in 33 | *darwin14*) 34 | echo "10.10" 35 | ;; 36 | *darwin15*) 37 | echo "10.11" 38 | ;; 39 | *darwin16*) 40 | echo "10.12" 41 | ;; 42 | *darwin17*) 43 | echo "10.13" 44 | ;; 45 | esac 46 | } 47 | 48 | target_to_clang_target() 49 | { 50 | case "$1" in 51 | x86_64-apple-darwin14) 52 | echo "x86_64-apple-macosx10.10" 53 | ;; 54 | x86_64-apple-darwin15) 55 | echo "x86_64-apple-macosx10.11" 56 | ;; 57 | x86_64-apple-darwin16) 58 | echo "x86_64-apple-macosx10.12" 59 | ;; 60 | x86_64-apple-darwin17) 61 | echo "x86_64-apple-macosx10.13" 62 | ;; 63 | x86_64-unknown-freebsd*) 64 | echo "x86_64-unknown-freebsd11.1" 65 | ;; 66 | esac 67 | } 68 | 69 | get_sysroot() 70 | { 71 | if [[ "${compiler_target}" == *apple* ]]; then 72 | sdk_version="$(target_to_darwin_sdk ${compiler_target})" 73 | echo "/opt/${compiler_target}/MacOSX${sdk_version}.sdk" 74 | else 75 | echo "/opt/${compiler_target}/${compiler_target}/sys-root" 76 | fi 77 | } 78 | 79 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/cctools_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG cctools_url=https://github.com/tpoechtrager/cctools-port/archive/${cctools_version}.tar.gz 2 | WORKDIR /src 3 | RUN download_unpack.sh "${cctools_url}" 4 | 5 | WORKDIR /src/cctools-port-${cctools_version} 6 | # Fix build on musl (https://github.com/tpoechtrager/cctools-port/pull/36) 7 | COPY patches/cctools_musl.patch /tmp/ 8 | RUN patch -p1 < /tmp/cctools_musl.patch; \ 9 | rm -f /tmp/cctools_musl.patch 10 | 11 | # Install cctools 12 | WORKDIR /src/cctools-port-${cctools_version}/cctools 13 | RUN rm -f aclocal.m4 14 | RUN aclocal 15 | RUN libtoolize --force 16 | RUN automake --add-missing --force 17 | RUN autoreconf 18 | RUN ./autogen.sh 19 | 20 | RUN ./configure \ 21 | --target=${compiler_target} \ 22 | --prefix=/opt/${compiler_target} \ 23 | --disable-clang-as \ 24 | --with-libtapi=/opt/${compiler_target} 25 | RUN make -j$(nproc) 26 | RUN make install 27 | 28 | # Cleanup 29 | WORKDIR /src 30 | RUN rm -rf cctools-port-${cctools_version} 31 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/dsymutil_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG dsymutil_url=https://github.com/tpoechtrager/llvm-dsymutil/archive/${dsymutil_version}.tar.gz 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${dsymutil_url}" 5 | 6 | WORKDIR /src/llvm-dsymutil-${dsymutil_version} 7 | 8 | # Backport of https://reviews.llvm.org/D39297 to fix build on musl 9 | COPY patches/dsymutil_llvm_dynlib.patch /tmp/ 10 | RUN patch -p1 < /tmp/dsymutil_llvm_dynlib.patch; \ 11 | rm -f /tmp/dsymutil_llvm_dynlib.patch 12 | 13 | # Make this `ar` able to use `-rcu` 14 | COPY patches/llvm_ar_options.patch /tmp/ 15 | RUN patch -p1 < /tmp/llvm_ar_options.patch; \ 16 | rm -f /tmp/llvm_ar_options.patch 17 | 18 | # Install dsymutil 19 | WORKDIR /src/llvm-dsymutil-${dsymutil_version}/build 20 | RUN cmake .. \ 21 | -DCMAKE_BUILD_TYPE=Release \ 22 | -DLLVM_TARGETS_TO_BUILD="X86" \ 23 | -DLLVM_ENABLE_ASSERTIONS=Off 24 | RUN make -f tools/dsymutil/Makefile -j$(nproc) 25 | RUN cp bin/llvm-dsymutil /opt/${compiler_target}/bin/dsymutil 26 | RUN make -f tools/llvm-ar/Makefile -j$(nproc) 27 | RUN cp bin/llvm-ar /opt/${compiler_target}/bin/${compiler_target}-ar 28 | RUN cp bin/llvm-ranlib /opt/${compiler_target}/bin/${compiler_target}-ranlib 29 | 30 | # Cleanup 31 | WORKDIR /src 32 | RUN rm -rf llvm-dsymutil-${dsymutil_version} 33 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/freebsd_components_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG freebsd_url="https://download.freebsd.org/ftp/releases/amd64/${freebsd_version}-RELEASE/base.txz" 2 | 3 | WORKDIR /src/freebsd-${freebsd_version} 4 | RUN download_unpack.sh "${freebsd_url}" 5 | 6 | # Copy over the things we need for our bootstrapping 7 | RUN source /build.sh; \ 8 | sysroot="$(get_sysroot)"; \ 9 | mkdir -p "${sysroot}"; \ 10 | mv usr/include "${sysroot}"; \ 11 | mv usr/lib "${sysroot}"; \ 12 | mv lib/* "${sysroot}/lib"; \ 13 | mkdir -p "${sysroot}/usr"; \ 14 | ln -sf "${sysroot}/include" "${sysroot}/usr/"; \ 15 | ln -sf "${sysroot}/lib" "${sysroot}/usr/"; \ 16 | ln -sf "libgcc_s.so.1" "${sysroot}/lib/libgcc_s.so"; \ 17 | ln -sf "libcxxrt.so.1" "${sysroot}/lib/libcxxrt.so" 18 | 19 | # Many symlinks exist that point to `../../lib/libfoo.so`. 20 | # We need them to point to just `libfoo.so`. :P 21 | RUN for f in $(find "/opt/${target}" -xtype l); do \ 22 | link_target="$(readlink "$f")"; \ 23 | if [[ -n $(echo "${link_target}" | grep "^../../lib") ]]; then \ 24 | ln -vsf "${link_target#../../lib/}" "${f}"; \ 25 | fi; \ 26 | done 27 | 28 | # Cleanup 29 | WORKDIR /src 30 | RUN rm -rf freebsd-${freebsd_version}* 31 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/gcc_bootstrap.Dockerfile: -------------------------------------------------------------------------------- 1 | WORKDIR /src/gcc-${gcc_version}_bootstrap_build 2 | RUN source /build.sh; \ 3 | GCC_CONF_ARGS=""; \ 4 | if [[ "${compiler_target}" == arm*hf ]]; then \ 5 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-float=hard"; \ 6 | fi; \ 7 | if [[ "${compiler_target}" == *-gnu* ]]; then \ 8 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-glibc-version=$(echo $glibc_version | cut -d '.' -f 1-2)"; \ 9 | fi; \ 10 | /src/gcc-${gcc_version}/configure \ 11 | --prefix=/opt/${compiler_target} \ 12 | --target=${compiler_target} \ 13 | --host=${MACHTYPE} \ 14 | --build=${MACHTYPE} \ 15 | --disable-multilib \ 16 | --disable-werror \ 17 | --disable-shared \ 18 | --disable-threads \ 19 | --disable-libatomic \ 20 | --disable-decimal-float \ 21 | --disable-libffi \ 22 | --disable-libgomp \ 23 | --disable-libitm \ 24 | --disable-libmpx \ 25 | --disable-libquadmath \ 26 | --disable-libssp \ 27 | --disable-libsanitizer \ 28 | --without-headers \ 29 | --with-newlib \ 30 | --disable-bootstrap \ 31 | --enable-languages=c \ 32 | --with-sysroot="$(get_sysroot)" \ 33 | ${GCC_CONF_ARGS} 34 | 35 | RUN make -j$(nproc) 36 | RUN make install 37 | 38 | # This is needed for any glibc older than 2.14, which includes the following commit 39 | # https://sourceware.org/git/?p=glibc.git;a=commit;h=95f5a9a866695da4e038aa4e6ccbbfd5d9cf63b7 40 | RUN ln -vs libgcc.a $(${compiler_target}-gcc -print-libgcc-file-name | sed 's/libgcc/&_eh/') 41 | 42 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/gcc_download.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG gcc_url=https://mirrors.kernel.org/gnu/gcc/gcc-${gcc_version}/gcc-${gcc_version}.tar.xz 2 | 3 | # Download and unpack gcc and prereqs 4 | WORKDIR /src 5 | RUN download_unpack.sh "${gcc_url}" 6 | WORKDIR /src/gcc-${gcc_version} 7 | 8 | # Download prerequisites, then update config.{guess,sub} for all subprojects 9 | RUN contrib/download_prerequisites 10 | RUN update_configure_scripts 11 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/gcc_install.Dockerfile: -------------------------------------------------------------------------------- 1 | WORKDIR /src/gcc-${gcc_version}_build 2 | 3 | # target-specific GCC configuration flags. For example, 4 | # musl does not support mudflap, or libsanitizer 5 | # libmpx uses secure_getenv and struct _libc_fpstate not present in musl 6 | # alpine musl provides libssp_nonshared.a, so we don't need libssp either 7 | RUN source /build.sh; \ 8 | GCC_CONF_ARGS=""; \ 9 | if [[ "${compiler_target}" == *apple* ]]; then \ 10 | sdk_version="$(target_to_darwin_sdk ${compiler_target})"; \ 11 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-ld=/opt/${compiler_target}/bin/${compiler_target}-ld"; \ 12 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-as=/opt/${compiler_target}/bin/${compiler_target}-as"; \ 13 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --enable-languages=c,c++,fortran,objc,obj-c++"; \ 14 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-sysroot=$(get_sysroot)"; \ 15 | elif [[ "${compiler_target}" == *linux* ]]; then \ 16 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --enable-languages=c,c++,fortran"; \ 17 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-sysroot=$(get_sysroot)"; \ 18 | elif [[ "${compiler_target}" == *freebsd* ]]; then \ 19 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --enable-languages=fortran"; \ 20 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-sysroot=$(get_sysroot)"; \ 21 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --disable-default-pie"; \ 22 | fi; \ 23 | if [[ "${compiler_target}" == arm*hf ]]; then \ 24 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --with-float=hard --with-arch=armv7-a --with-fpu=vfpv3-d16"; \ 25 | fi; \ 26 | if [[ "${compiler_target}" == *musl* ]]; then \ 27 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --disable-libssp --disable-libmpx"; \ 28 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --disable-libmudflap --disable-libsanitizer"; \ 29 | GCC_CONF_ARGS="${GCC_CONF_ARGS} --disable-symvers"; \ 30 | export libat_cv_have_ifunc=no; \ 31 | fi; \ 32 | /src/gcc-${gcc_version}/configure \ 33 | --prefix=/opt/${compiler_target} \ 34 | --target=${compiler_target} \ 35 | --host=${MACHTYPE} \ 36 | --build=${MACHTYPE} \ 37 | --disable-multilib \ 38 | --disable-werror \ 39 | --enable-host-shared \ 40 | --enable-threads=posix \ 41 | ${GCC_CONF_ARGS} 42 | 43 | RUN if [[ "${compiler_target}" == *freebsd* ]]; then \ 44 | export ac_cv_have_decl___builtin_ffs=yes; \ 45 | fi; \ 46 | make -j$(nproc) 47 | RUN make install 48 | 49 | # Because this always writes out .texi files, we have to chown them back. >:( 50 | RUN chown $(id -u):$(id -g) -R . 51 | 52 | WORKDIR /src 53 | RUN rm -rf gcc-${gcc_version}* 54 | 55 | # Finally, create a bunch of symlinks stripping out the target so that 56 | # things like `gcc` "just work", as long as we've got our path set properly 57 | # We don't worry about failure to create these symlinks, as sometimes there are files 58 | # named ridiculous things like ${compiler_target}-${compiler_target}-foo, which screws this up 59 | RUN source /build.sh; \ 60 | for f in /opt/${compiler_target}/bin/${compiler_target}-*; do \ 61 | fbase=$(basename $f); \ 62 | ln -s $f /opt/${compiler_target}/bin/${fbase#${compiler_target}-} || true; \ 63 | done 64 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/glibc_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG glibc_url="http://mirrors.peers.community/mirrors/gnu/glibc/glibc-${glibc_version}.tar.xz" 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${glibc_url}" 5 | 6 | # patch glibc for ARM 7 | WORKDIR /src/glibc-${glibc_version} 8 | 9 | # patch glibc to keep around libgcc_s_resume on arm 10 | # ref: https://sourceware.org/ml/libc-alpha/2014-05/msg00573.html 11 | COPY patches/glibc_arm_gcc_fix.patch /tmp/ 12 | RUN if [[ "${compiler_target}" == arm* ]] || [[ "${compiler_target}" == aarch* ]]; then \ 13 | patch -p1 < /tmp/glibc_arm_gcc_fix.patch; \ 14 | fi; \ 15 | rm -f /tmp/glibc_arm_gcc_fix.patch 16 | 17 | # patch glibc's stupid gcc version check (we don't require this one, as if 18 | # it doesn't apply cleanly, it's probably fine) 19 | COPY patches/glibc_gcc_version.patch /tmp/ 20 | RUN patch -p0 < /tmp/glibc_gcc_version.patch || true; \ 21 | rm -f /tmp/glibc_gcc_version.patch 22 | 23 | # patch glibc's 32-bit assembly to withstand __i686 definition of newer GCC's 24 | # ref: http://comments.gmane.org/gmane.comp.lib.glibc.user/758 25 | COPY patches/glibc_i686_asm.patch /tmp/ 26 | RUN if [[ "${compiler_target}" == i686* ]]; then \ 27 | patch -p1 < /tmp/glibc_i686_asm.patch; \ 28 | fi; \ 29 | rm -f /tmp/glibc_i686_asm.patch 30 | 31 | # Patch glibc's sunrpc cross generator to work with musl 32 | # See https://sourceware.org/bugzilla/show_bug.cgi?id=21604 33 | COPY patches/glibc-sunrpc.patch /tmp/ 34 | RUN patch -p0 < /tmp/glibc-sunrpc.patch; \ 35 | rm -f /tmp/glibc-sunrpc.patch 36 | 37 | # patch for building old glibc on newer binutils 38 | # These patches don't apply on those versions of glibc where they 39 | # are not needed, but that's ok. 40 | COPY patches/glibc_nocommon.patch /tmp/ 41 | RUN patch -p0 < /tmp/glibc_nocommon.patch || true; \ 42 | rm -f /tmp/glibc_nocommon.patch 43 | COPY patches/glibc_regexp_nocommon.patch /tmp/ 44 | RUN patch -p0 < /tmp/glibc_regexp_nocommon.patch || true; \ 45 | rm -f /tmp/glibc_regexp_nocommon.patch 46 | 47 | # build glibc 48 | WORKDIR /src/glibc-${glibc_version}_build 49 | RUN source /build.sh; \ 50 | /src/glibc-${glibc_version}/configure \ 51 | --prefix=/usr \ 52 | --host=${compiler_target} \ 53 | --with-headers="$(get_sysroot)/usr/include" \ 54 | --with-binutils=/opt/${compiler_target}/bin \ 55 | --disable-multilib \ 56 | --disable-werror \ 57 | libc_cv_forced_unwind=yes \ 58 | libc_cv_c_cleanup=yes 59 | RUN chown $(id -u):$(id -g) -R /src/glibc-${glibc_version}_build 60 | RUN make -j$(nproc) 61 | RUN source /build.sh; make install install_root="$(get_sysroot)" 62 | 63 | # GCC won't build (crti.o: no such file or directory) unless these directories exist. 64 | # They can be empty though. 65 | RUN source /build.sh; mkdir -p $(get_sysroot)/{lib,usr/lib} 66 | 67 | # Cleanup 68 | WORKDIR /src 69 | RUN rm -rf glibc-${glibc_version}* 70 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/kernel_headers_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG linux_url=http://www.kernel.org/pub/linux/kernel/v4.x/linux-${linux_version}.tar.xz 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${linux_url}" 5 | WORKDIR /src/linux-${linux_version} 6 | RUN source /build.sh && \ 7 | ARCH="$(target_to_linux_arch ${compiler_target})" && \ 8 | make ARCH=${ARCH} mrproper && \ 9 | make ARCH=${ARCH} headers_check && \ 10 | make INSTALL_HDR_PATH=$(get_sysroot)/usr ARCH=${ARCH} V=0 headers_install 11 | 12 | # Cleanup 13 | WORKDIR /src 14 | RUN rm -rf linux-${linux_version} 15 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/libtapi_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG libtapi_url=https://github.com/tpoechtrager/apple-libtapi/archive/${libtapi_version}.tar.gz 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${libtapi_url}" 5 | WORKDIR /src/apple-libtapi-${libtapi_version} 6 | 7 | # Backport of https://reviews.llvm.org/D39297 to fix build on musl 8 | COPY patches/libtapi_llvm_dynlib.patch /tmp/ 9 | RUN patch -p1 < /tmp/libtapi_llvm_dynlib.patch; \ 10 | rm -f /tmp/libtapi_llvm_dynlib.patch 11 | 12 | # Build and install libtapi (We have to tell it to explicitly use clang) 13 | RUN export MACOSX_DEPLOYMENT_TARGET=10.10; \ 14 | export CC="/usr/bin/clang"; \ 15 | export CXX="/usr/bin/clang++"; \ 16 | INSTALLPREFIX=/opt/${compiler_target} ./build.sh; \ 17 | INSTALLPREFIX=/opt/${compiler_target} ./install.sh 18 | 19 | # Cleanup 20 | WORKDIR /src 21 | RUN rm -rf apple-libtapi-${libtapi_version} 22 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/llvm_clang_install.Dockerfile: -------------------------------------------------------------------------------- 1 | WORKDIR /src/llvm-${llvm_ver}_build 2 | 3 | RUN source /build.sh; \ 4 | cmake -G "Unix Makefiles" \ 5 | -DLLVM_TARGETS_TO_BUILD:STRING=host \ 6 | -DLLVM_PARALLEL_COMPILE_JOBS=$(($(nproc)+1)) \ 7 | -DLLVM_PARALLEL_LINK_JOBS=$(($(nproc)+1)) \ 8 | -DLLVM_BINDINGS_LIST="" \ 9 | -DLLVM_DEFAULT_TARGET_TRIPLE=$(target_to_clang_target ${compiler_target}) \ 10 | -DDEFAULT_SYSROOT="$(get_sysroot)" \ 11 | -DGCC_INSTALL_PREFIX="/opt/${compiler_target}" \ 12 | -DCMAKE_BUILD_TYPE=Release \ 13 | -DLLVM_ENABLE_ASSERTIONS=Off \ 14 | -DCMAKE_INSTALL_PREFIX="/opt/${compiler_target}" \ 15 | -DLIBCXX_HAS_MUSL_LIBC=On \ 16 | -DCLANG_DEFAULT_CXX_STDLIB=libc++ \ 17 | -DLLVM_TARGET_TRIPLE_ENV=LLVM_TARGET \ 18 | -DCOMPILER_RT_BUILD_SANITIZERS=Off \ 19 | -DCOMPILER_RT_BUILD_PROFILE=Off \ 20 | -DCOMPILER_RT_BUILD_LIBFUZZER=Off \ 21 | -DCOMPILER_RT_BUILD_XRAY=Off \ 22 | "/src/llvm-${llvm_ver}" 23 | 24 | RUN make -j$(($(nproc)+1)) 25 | RUN make install 26 | 27 | # Cleanup 28 | WORKDIR /src 29 | RUN rm -rf /src/llvm-${llvm_ver}* 30 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/llvm_download.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG llvm_ver=6.0.0 2 | 3 | WORKDIR /src 4 | 5 | # Download LLVM 6 | ARG llvm_url=http://releases.llvm.org/${llvm_ver}/llvm-${llvm_ver}.src.tar.xz 7 | RUN download_unpack.sh "${llvm_url}" && mv llvm-${llvm_ver}.src llvm-${llvm_ver} 8 | 9 | # Download clang. It's a special snowflake and gets put into tools/clang 10 | WORKDIR /src/llvm-${llvm_ver}/tools 11 | ARG clang_url=http://releases.llvm.org/${llvm_ver}/cfe-${llvm_ver}.src.tar.xz 12 | RUN download_unpack.sh "${clang_url}" && mv cfe-${llvm_ver}.src clang 13 | 14 | # Download libcxx, libcxxabi and compiler_rt 15 | WORKDIR /src/llvm-${llvm_ver}/projects 16 | ARG libcxx_url=http://releases.llvm.org/${llvm_ver}/libcxx-${llvm_ver}.src.tar.xz 17 | RUN download_unpack.sh "${libcxx_url}" && mv libcxx-${llvm_ver}.src libcxx 18 | ARG libcxxabi_url=http://releases.llvm.org/${llvm_ver}/libcxxabi-${llvm_ver}.src.tar.xz 19 | RUN download_unpack.sh "${libcxxabi_url}" && mv libcxxabi-${llvm_ver}.src libcxxabi 20 | ARG compiler_rt_url=http://releases.llvm.org/${llvm_ver}/compiler-rt-${llvm_ver}.src.tar.xz 21 | RUN download_unpack.sh "${compiler_rt_url}" && mv compiler-rt-${llvm_ver}.src compiler-rt 22 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/mingw_stage1.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG mingw_url=https://sourceforge.net/projects/mingw-w64/files/mingw-w64/mingw-w64-release/mingw-w64-v${mingw_version}.tar.bz2 2 | 3 | WORKDIR /src 4 | RUN download_unpack.sh "${mingw_url}" 5 | 6 | # Patch mingw to build 32-bit cross compiler with GCC 7.1+ 7 | WORKDIR /src/mingw-w64-v${mingw_version} 8 | COPY patches/mingw_gcc710_i686.patch /tmp/ 9 | RUN patch -p1 < /tmp/mingw_gcc710_i686.patch; \ 10 | rm -f /tmp/mingw_gcc710_i686.patch 11 | 12 | # Install mingw headers 13 | WORKDIR /src/mingw-w64-v${mingw_version}/mingw-w64-headers 14 | RUN ./configure \ 15 | --prefix=/opt/${compiler_target}/${compiler_target} \ 16 | --enable-sdk=all \ 17 | --enable-secure-api \ 18 | --host=${compiler_target} 19 | RUN make install 20 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/mingw_stage2.Dockerfile: -------------------------------------------------------------------------------- 1 | # If we're building a 32-bit build of mingw, add `--disable-lib64` 2 | WORKDIR /src/mingw-w64-v${mingw_version}-crt_build 3 | RUN MINGW_CONF_ARGS=""; \ 4 | if [[ "${compiler_target}" == i686-* ]]; then \ 5 | MINGW_CONF_ARGS="${MINGW_CONF_ARGS} --disable-lib64"; \ 6 | else \ 7 | MINGW_CONF_ARGS="${MINGW_CONF_ARGS} --disable-lib32"; \ 8 | fi; \ 9 | /src/mingw-w64-v${mingw_version}/mingw-w64-crt/configure \ 10 | --prefix=/opt/${compiler_target}/${compiler_target} \ 11 | --host=${compiler_target} \ 12 | ${MINGW_CONF_ARGS} 13 | 14 | # Install crt 15 | RUN make -j$(nproc) 16 | RUN make install 17 | 18 | # Install winpthreads 19 | WORKDIR /src/mingw-w64-v${mingw_version}-winpthreads_build 20 | RUN /src/mingw-w64-v${mingw_version}/mingw-w64-libraries/winpthreads/configure \ 21 | --prefix=/opt/${compiler_target}/${compiler_target} \ 22 | --host=${compiler_target} \ 23 | --enable-static \ 24 | --enable-shared 25 | RUN make -j$(nproc) 26 | RUN make install 27 | 28 | # Cleanup 29 | WORKDIR /src 30 | RUN rm -rf mingw-w64-v${mingw_version}* 31 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/musl_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG musl_url="https://www.musl-libc.org/releases/musl-${musl_version}.tar.gz" 2 | WORKDIR /src 3 | RUN download_unpack.sh "${musl_url}" 4 | 5 | # Build musl 6 | WORKDIR /src/musl-${musl_version}_build 7 | RUN source /build.sh; \ 8 | /src/musl-${musl_version}/configure \ 9 | --prefix=/usr \ 10 | --host=${compiler_target} \ 11 | --with-headers="$(get_sysroot)/usr/include" \ 12 | --with-binutils=/opt/${compiler_target}/bin \ 13 | --disable-multilib \ 14 | --disable-werror \ 15 | CROSS_COMPILE="${compiler_target}-" 16 | RUN make -j$(nproc) 17 | RUN source /build.sh; \ 18 | make install DESTDIR="$(get_sysroot)" 19 | 20 | # Cleanup 21 | WORKDIR /src 22 | RUN rm -rf musl-${musl_version}* 23 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/osx_sdk_install.Dockerfile: -------------------------------------------------------------------------------- 1 | # Download OSX SDK 2 | WORKDIR /opt/${compiler_target} 3 | RUN source /build.sh; \ 4 | sdk_version="$(target_to_darwin_sdk ${compiler_target})"; \ 5 | sdk_url="https://davinci.cs.washington.edu/MacOSX${sdk_version}.sdk.tar.xz"; \ 6 | download_unpack.sh "${sdk_url}" 7 | 8 | # Fix weird permissions on the SDK folder 9 | RUN chmod 755 . 10 | RUN chmod 755 MacOSX*.sdk 11 | -------------------------------------------------------------------------------- /workerbase/lib/crossbuild/version_defaults.Dockerfile: -------------------------------------------------------------------------------- 1 | ARG binutils_version=2.29.1 2 | ARG cctools_version=22ebe727a5cdc21059d45313cf52b4882157f6f0 3 | ARG dsymutil_version=6fe249efadf6139a7f271fee87a5a0f44e2454cf 4 | ARG gcc_version=7.3.0 5 | ARG glibc_version=2.19 6 | ARG linux_version=4.12 7 | ARG libtapi_version=1.30.0 8 | ARG mingw_version=5.0.3 9 | ARG musl_version=1.1.16 10 | ARG freebsd_version=11.1 11 | -------------------------------------------------------------------------------- /workerbase/lib/docker_install.Dockerfile: -------------------------------------------------------------------------------- 1 | # Install docker static binary 2 | USER root 3 | WORKDIR /tmp/docker 4 | ARG docker_version="17.12.1-ce" 5 | RUN M="$(uname -m)"; \ 6 | if [[ "$M" == "i686" ]]; then \ 7 | M="x86_64"; \ 8 | fi; \ 9 | if [[ "$M" == "armv7l" ]]; then \ 10 | M="armhf"; \ 11 | fi; \ 12 | download_unpack.sh "https://download.docker.com/linux/static/stable/${M}/docker-${docker_version}.tgz" 13 | 14 | # Copy across docker executables we need 15 | RUN mv docker/docker /usr/local/bin/ 16 | 17 | # Remove docker executables we don't need 18 | RUN rm -rf docker 19 | 20 | # Install docker-compose 21 | RUN pip install docker-compose 22 | -------------------------------------------------------------------------------- /workerbase/lib/download_unpack.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Auto-calculate stored name unless its given 4 | URL="$1" 5 | if [[ -z "$2" ]]; then 6 | TARBALL="/downloads/$(basename "$1")" 7 | else 8 | TARBALL="$2" 9 | fi 10 | 11 | # Download the file if it does not already exist 12 | if [[ ! -f ${TARBALL} ]]; then 13 | curl -q -# -L ${EXTRA_CURL_FLAGS} "${URL}" -o "${TARBALL}" 14 | fi 15 | 16 | # Extract it into the current directory 17 | if [[ "${TARBALL}" == *.tar.gz ]] || [[ "${TARBALL}" == *.tgz ]]; then 18 | tar zxf "${TARBALL}" 19 | elif [[ "${TARBALL}" == *.tar.bz2 ]]; then 20 | tar jxf "${TARBALL}" 21 | elif [[ "${TARBALL}" == *.tar.xz ]] || [[ "${TARBALL}" == *.txz ]]; then 22 | tar Jxf "${TARBALL}" 23 | else 24 | echo "Unknown tarball type ${TARBALL#*.}" >&2 25 | fi 26 | 27 | # Tar sometimes keeps around user IDs and stuff that I don't like, fix that: 28 | chown $(id -u):$(id -g) -R . 29 | -------------------------------------------------------------------------------- /workerbase/lib/fake_sha512sum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$1" = "--check" ]; then 4 | shift 5 | exec /bin/busybox sha512sum -c "$@" 6 | fi 7 | exec /bin/busybox sha512sum "$@" 8 | -------------------------------------------------------------------------------- /workerbase/lib/fake_uname.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "${target}" ]]; then 4 | # Fast path if target is empty 5 | /bin/uname "$@" 6 | exit 0 7 | fi 8 | 9 | # ${target} overrides the -s part of uname 10 | s_flag() 11 | { 12 | case "${target}" in 13 | *-linux*) 14 | echo "Linux" ;; 15 | *-darwin*) 16 | echo "Darwin" ;; 17 | *-mingw*) 18 | echo "MSYS_NT-6.3" ;; 19 | *-freebsd*) 20 | echo "FreeBSD" ;; 21 | *) 22 | /bin/uname -s ;; 23 | esac 24 | } 25 | 26 | # Kernel version. Mimic Cygwin/Darwin when appropriate 27 | r_flag() 28 | { 29 | case "${target}" in 30 | *-darwin*) 31 | echo "14.5.0" ;; 32 | *-mingw*) 33 | echo "2.8.2(0.313/5/3)" ;; 34 | *-freebsd*) 35 | echo "11.1-RELEASE-p9" ;; 36 | *) 37 | # On Linux platforms, actually report the real kernel release. 38 | /bin/uname -r ;; 39 | esac 40 | } 41 | 42 | v_flag() 43 | { 44 | # Easter egg 45 | julia_tag_time=$(date -u -d 2013.02.13-00:49:00) 46 | case "${target}" in 47 | *-darwin*) 48 | echo "Darwin Kernel Version $(r_flag): ${julia_tag_time}; root:xnu-9000/RELEASE_X86_64" ;; 49 | *-linux*) 50 | echo "#1 JuliaOS SMP PREEMPT ${julia_tag_time}" ;; 51 | *-mingw*) 52 | echo "${julia_tag_time}" ;; 53 | *-freebsd*) 54 | echo "FreeBSD $(r_flag) #0: ${julia_tag_time} root@build.julialang.org:/julia" ;; 55 | *) 56 | /bin/uname -v ;; 57 | esac 58 | } 59 | 60 | m_flag() 61 | { 62 | case "${target}" in 63 | arm*) 64 | echo "armv7l" ;; 65 | powerpc64le*) 66 | echo "ppc64le" ;; 67 | x86_64*) 68 | case "${target}" in 69 | *-freebsd*) 70 | # FreeBSD calls x86_64 amd64 instead. 71 | echo "amd64" ;; 72 | *) 73 | echo "x86_64" ;; 74 | esac ;; 75 | i686*) 76 | echo "i686" ;; 77 | aarch64*) 78 | echo "aarch64" ;; 79 | *) 80 | # If we don't know, just pass through the native machine type. 81 | /bin/uname -m ;; 82 | esac 83 | } 84 | 85 | o_flag() 86 | { 87 | case "${target}" in 88 | # Darwin doesn't have an -o flag! 89 | *-darwin*) 90 | echo "" ;; 91 | *-linux*) 92 | echo "GNU/Linux" ;; 93 | *-mingw*) 94 | echo "Cygwin" ;; 95 | *) 96 | /bin/uname -o ;; 97 | esac 98 | } 99 | 100 | # uname -a is not exactly the same across all platforms; we're mimicking Arch Linux here. 101 | a_flag() 102 | { 103 | echo $(s_flag) $(/bin/uname -n) $(r_flag) $(v_flag) $(m_flag) $(o_flag) 104 | } 105 | 106 | 107 | 108 | if [[ -z "$@" ]]; then 109 | s_flag 110 | else 111 | for flag in $@; do 112 | case "${flag}" in 113 | -a) 114 | a_flag ;; 115 | -s) 116 | s_flag ;; 117 | -r) 118 | r_flag ;; 119 | -v) 120 | v_flag ;; 121 | -m) 122 | m_flag ;; 123 | -p) 124 | m_flag ;; 125 | -i) 126 | m_flag ;; 127 | -o) 128 | o_flag ;; 129 | *) 130 | /bin/uname ${flag} ;; 131 | esac 132 | done 133 | fi 134 | 135 | -------------------------------------------------------------------------------- /workerbase/lib/freebsd_crosscompiler_install.Dockerfile: -------------------------------------------------------------------------------- 1 | INCLUDE crossbuild/freebsd_components_install 2 | 3 | # This doesn't work yet, isl fails to build with: 4 | # configure: error: No ffs implementation found 5 | INCLUDE crossbuild/binutils_install 6 | INCLUDE crossbuild/gcc_download 7 | INCLUDE crossbuild/gcc_install 8 | 9 | INCLUDE crossbuild/llvm_download 10 | INCLUDE crossbuild/llvm_clang_install 11 | 12 | # Install cmake toolchain 13 | COPY cmake_toolchains/${compiler_target}.toolchain /opt/${compiler_target}/ 14 | -------------------------------------------------------------------------------- /workerbase/lib/gcc_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install GCC 2 | ARG gcc_version=7.3.0 3 | ARG gcc_url=https://mirrors.kernel.org/gnu/gcc/gcc-${gcc_version}/gcc-${gcc_version}.tar.xz 4 | 5 | # Download and unpack gcc 6 | WORKDIR /src 7 | RUN download_unpack.sh "${gcc_url}" 8 | 9 | # Build gcc! 10 | WORKDIR /src/gcc-${gcc_version} 11 | RUN ${L32} contrib/download_prerequisites 12 | RUN mkdir -p /src/gcc-${gcc_version}_build 13 | WORKDIR /src/gcc-${gcc_version}_build 14 | RUN ${L32} /src/gcc-${gcc_version}/configure \ 15 | --prefix=/usr/local --enable-host-shared --enable-threads=posix \ 16 | --with-system-zlib --enable-multilib \ 17 | --enable-languages=c,c++,fortran,objc,obj-c++ ${gcc_configure_flags} 18 | RUN ${L32} make -j4 19 | 20 | # Install gcc 21 | USER root 22 | RUN ${L32} make install 23 | 24 | # Symlink LTO plugin into binutils directory 25 | RUN mkdir -p /usr/local/lib/bfd-plugins 26 | RUN ln -sf $(find /usr/local/libexec/gcc/ -name liblto_plugin.so) /usr/local/lib/bfd-plugins/ 27 | 28 | # Setup environment variables so that GCC takes precedence from this point on 29 | ENV PATH "/usr/local/bin:$PATH" 30 | 31 | # Put our /lib and /lib64 directories into /etc/ld.so.conf.d so that they take precedence 32 | RUN mkdir -p /etc/ld.so.conf.d 33 | RUN echo "/usr/local/lib" > /etc/ld.so.conf.d/0_new_gcc.conf 34 | RUN echo "/usr/local/lib64" >> /etc/ld.so.conf.d/0_new_gcc.conf 35 | RUN ldconfig || true # <--- for alpine 36 | 37 | # Add a `cc` symlink to gcc: 38 | RUN ln -sf /usr/local/bin/gcc /usr/local/bin/cc 39 | 40 | # Now cleanup /src 41 | WORKDIR /src 42 | RUN rm -rf gcc-${gcc_version}* 43 | -------------------------------------------------------------------------------- /workerbase/lib/git_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install git 2 | ARG git_version=2.24.1 3 | ARG git_url=https://github.com/git/git/archive/v${git_version}.tar.gz 4 | WORKDIR /src 5 | 6 | # Use download_unpack to download and unpack git 7 | RUN download_unpack.sh "${git_url}" /downloads/git-${git_version}.tar.gz 8 | WORKDIR /src/git-${git_version} 9 | RUN ${L32} make NO_GETTEXT=1 NO_REGEX=YesPlease prefix=/usr/local all -j4 10 | 11 | # Install git 12 | USER root 13 | RUN ${L32} make NO_GETTEXT=1 NO_REGEX=YesPlease prefix=/usr/local install 14 | 15 | # cleanup /src 16 | WORKDIR /src 17 | RUN rm -rf git-${git_version} 18 | -------------------------------------------------------------------------------- /workerbase/lib/install_cygwin.ps1: -------------------------------------------------------------------------------- 1 | function Install-Cygwin { 2 | param ( $CygDir="c:\cygwin", $arch="x86") 3 | 4 | Write-Verbose "Installing Cygwin and Windows 10 SDK for $arch" 5 | if(!(Test-Path -Path $CygDir -PathType Container)) { 6 | Write-Verbose "Creating directory $CygDir" 7 | New-Item -Type Directory -Path $CygDir -Force 8 | } 9 | Write-Verbose "Downloading http://cygwin.com/setup-$arch.exe" 10 | $client = new-object System.Net.WebClient 11 | $client.DownloadFile("http://cygwin.com/setup-$arch.exe", "$CygDir\setup-$arch.exe" ) 12 | 13 | $pkg_list = "git,make,curl,patch,python,gcc-g++,m4,cmake,p7zip,nano,tmux,procps" 14 | if( $arch -eq "x86" ) { 15 | $pkg_list += ",mingw64-i686-gcc-g++,mingw64-i686-gcc-fortran" 16 | } else { 17 | $pkg_list += ",mingw64-x86_64-gcc-g++,mingw64-x86_64-gcc-fortran" 18 | } 19 | 20 | Write-Verbose "Installing Cygwin and $pkg_list" 21 | Start-Process -wait -FilePath "$CygDir\setup-$arch.exe" -ArgumentList "-q -g -l $CygDir -s http://mirror.mit.edu/cygwin/ -R c:\cygwin -P $pkg_list" 22 | 23 | Write-Verbose "Downloading and running Windows 10 SDK" 24 | $client.DownloadFile( "https://go.microsoft.com/fwlink/p/?LinkID=822845", "$CygDir\sdksetup.exe" ) 25 | Start-Process -FilePath "$CygDir\sdksetup.exe" 26 | } 27 | 28 | $VerbosePreference = "Continue" 29 | Install-Cygwin -arch "$env:CYGWIN_ARCH" 30 | -------------------------------------------------------------------------------- /workerbase/lib/install_msys2.ps1: -------------------------------------------------------------------------------- 1 | # Install Msys2 and most of a toolchain 2 | function Install-Msys2 { 3 | param ( $arch="i686" ) 4 | 5 | if( $arch -eq "x86_64" ) { 6 | $bits = "64" 7 | } else { 8 | $bits = "32" 9 | } 10 | 11 | # change the date in the following for future msys2 releases 12 | $msys2tarball = "msys2-base-$arch-20161025.tar" 13 | $msyspath = "C:\msys$bits" 14 | 15 | # install chocolatey and cmake 16 | Write-Verbose "Installing Chocolatey from https://chocolatey.org/install.ps1" 17 | iex ((new-object net.webclient).DownloadString("https://chocolatey.org/install.ps1")) 18 | choco install -y cmake.portable 19 | 20 | # pacman is picky, reinstall msys2 from scratch 21 | foreach ($dir in @("etc", "usr", "var", "mingw32")) { 22 | if (Test-Path "$msyspath\$dir") { 23 | rm -Recurse -Force $msyspath\$dir 24 | } 25 | } 26 | mkdir -Force $msyspath | Out-Null 27 | 28 | Write-Verbose "Installing 7za from https://chocolatey.org/7za.exe" 29 | (new-object net.webclient).DownloadFile( 30 | "https://chocolatey.org/7za.exe", 31 | "$msyspath\7za.exe") 32 | 33 | Write-Verbose "Installing msys2 from http://sourceforge.net/projects/msys2/files/Base/$arch/$msys2tarball.xz" 34 | (new-object net.webclient).DownloadFile( 35 | "http://sourceforge.net/projects/msys2/files/Base/$arch/$msys2tarball.xz", 36 | "$msyspath\$msys2tarball.xz") 37 | 38 | cd C:\ 39 | &"$msyspath\7za.exe" x -y "$msyspath\$msys2tarball.xz" 40 | &"$msyspath\7za.exe" x -y "$msys2tarball" | Out-Null 41 | 42 | Write-Verbose "Installing bash, pacman, pacman-mirrors and msys2-runtime" 43 | &$msyspath\usr\bin\sh -lc "pacman --noconfirm --force --needed -Sy bash pacman pacman-mirrors msys2-runtime" 44 | 45 | $pkg_list = "diffutils git curl vim m4 make patch tar p7zip openssh cygrunsrv mingw-w64-$arch-editrights procps" 46 | Write-Verbose "Installing $pkg_list" 47 | &$msyspath\usr\bin\sh -lc "pacman --noconfirm -Syu && pacman --noconfirm -S $pkg_list" 48 | 49 | Write-Verbose "Rebasing MSYS2" 50 | &$msyspath\autorebase.bat 51 | 52 | # Let's install python 53 | Write-Verbose "Installing python from chocolatey" 54 | choco install -y python2 55 | } 56 | 57 | $VerbosePreference = "Continue" 58 | 59 | # Then, install Msys2 as either 64-bit or 32-bit 60 | Install-Msys2 -arch "$env:WIN_ARCH" 61 | -------------------------------------------------------------------------------- /workerbase/lib/libtool_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install libtool 2 | ARG libtool_version=2.4.6 3 | ARG libtool_url=http://ftpmirror.gnu.org/libtool/libtool-${libtool_version}.tar.gz 4 | WORKDIR /src 5 | 6 | # Use download_unpack to download and unpack libtool 7 | RUN download_unpack.sh "${libtool_url}" 8 | WORKDIR /src/libtool-${libtool_version} 9 | RUN ${L32} ./configure --prefix=/usr/local 10 | RUN ${L32} make all -j4 11 | 12 | # Install libtool 13 | USER root 14 | RUN ${L32} make install 15 | 16 | # cleanup /src 17 | WORKDIR /src 18 | RUN rm -rf libtool-${libtool_version} 19 | -------------------------------------------------------------------------------- /workerbase/lib/linux_glibc_crosscompiler_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ENV PATH="/opt/${compiler_target}/bin:$PATH" 2 | INCLUDE crossbuild/kernel_headers_install 3 | INCLUDE crossbuild/binutils_install 4 | INCLUDE crossbuild/gcc_download 5 | INCLUDE crossbuild/gcc_bootstrap 6 | INCLUDE crossbuild/glibc_install 7 | INCLUDE crossbuild/gcc_install 8 | 9 | # Install cmake toolchain 10 | COPY cmake_toolchains/${compiler_target}.toolchain /opt/${compiler_target}/ 11 | -------------------------------------------------------------------------------- /workerbase/lib/linux_musl_crosscompiler_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ENV PATH="/opt/${compiler_target}/bin:$PATH" 2 | INCLUDE crossbuild/kernel_headers_install 3 | INCLUDE crossbuild/binutils_install 4 | INCLUDE crossbuild/gcc_download 5 | INCLUDE crossbuild/gcc_bootstrap 6 | INCLUDE crossbuild/musl_install 7 | INCLUDE crossbuild/gcc_install 8 | 9 | # Install cmake toolchain 10 | COPY cmake_toolchains/${compiler_target}.toolchain /opt/${compiler_target}/ 11 | -------------------------------------------------------------------------------- /workerbase/lib/multiarch.Dockerfile: -------------------------------------------------------------------------------- 1 | USER root 2 | # Install binfmt-support 3 | RUN [ -z $(which apt-get 2>/dev/null) ] || apt-get install -y binfmt-support 4 | 5 | # Download latest qemu-user-static releases 6 | ARG qemu_version=2.11.0 7 | RUN curl -L https://github.com/multiarch/qemu-user-static/releases/download/v${qemu_version}/qemu-${qemu_arch}-static -o /usr/bin/qemu-${qemu_arch}-static 8 | RUN chmod +x /usr/bin/qemu-${qemu_arch}-static 9 | -------------------------------------------------------------------------------- /workerbase/lib/objconv_install.Dockerfile: -------------------------------------------------------------------------------- 1 | WORKDIR /src 2 | 3 | ## Install objconv 4 | ARG objconv_version=2.47 5 | ARG objconv_url=https://github.com/staticfloat/objconv/archive/v${objconv_version}.tar.gz 6 | 7 | # Use download_unpack to download and unpack 8 | RUN download_unpack.sh "${objconv_url}" 9 | 10 | # Build the objconv sources! 11 | WORKDIR /src/objconv-${objconv_version} 12 | RUN ${L32} make 13 | 14 | # Install objconv 15 | USER root 16 | RUN mv objconv /usr/local/bin/ 17 | 18 | # Now cleanup /src 19 | WORKDIR /src 20 | RUN rm -rf objconv-${objconv_version}* 21 | -------------------------------------------------------------------------------- /workerbase/lib/omega.Dockerfile: -------------------------------------------------------------------------------- 1 | USER root 2 | 3 | # We need to override ld.so.conf to search /usr/local before /usr 4 | RUN echo "/usr/local/lib64" > /etc/ld.so.conf.new; \ 5 | echo "/usr/local/lib" >> /etc/ld.so.conf.new; \ 6 | cat /etc/ld.so.conf >> /etc/ld.so.conf.new; \ 7 | mv /etc/ld.so.conf.new /etc/ld.so.conf; \ 8 | ldconfig || true # <--- for alpine 9 | 10 | # Cleanup downloads and build.sh 11 | RUN rm -rf /downloads /build.sh 12 | 13 | # Remove bootstrapping compiler toolchain if we need to 14 | RUN if [[ -n "${TEMPORARY_DEPS}" ]]; then \ 15 | if [[ -n "$(which yum 2>/dev/null)" ]]; then \ 16 | yum remove -y ${TEMPORARY_DEPS}; \ 17 | yum clean all; \ 18 | elif [[ -n "$(which apt-get 2>/dev/null)" ]]; then \ 19 | apt-get remove -y ${TEMPORARY_DEPS}; \ 20 | apt-get autoremove -y; \ 21 | apt-get clean -y; \ 22 | fi; \ 23 | fi 24 | 25 | # Clean up /tmp, some things leave droppings in there. 26 | RUN rm -rf /tmp/* 27 | 28 | # Set a default working directory that we know is good 29 | WORKDIR / 30 | 31 | # Use /entrypoint.sh to conditionally apply ${L32} since we cna't use ARG 32 | # values within an actual ENTRYPOINT command. :( 33 | RUN echo "#!/bin/bash" > /entrypoint.sh; \ 34 | echo "${L32} \"\$@\"" >> /entrypoint.sh; \ 35 | chmod +x /entrypoint.sh 36 | ENTRYPOINT ["/entrypoint.sh"] 37 | CMD ["/bin/bash"] 38 | USER buildworker 39 | -------------------------------------------------------------------------------- /workerbase/lib/osx_crosscompiler_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ENV PATH="/opt/${compiler_target}/bin:$PATH" 2 | INCLUDE crossbuild/osx_sdk_install 3 | INCLUDE crossbuild/libtapi_install 4 | INCLUDE crossbuild/cctools_install 5 | INCLUDE crossbuild/dsymutil_install 6 | INCLUDE crossbuild/llvm_download 7 | INCLUDE crossbuild/llvm_clang_install 8 | INCLUDE crossbuild/gcc_download 9 | INCLUDE crossbuild/gcc_install 10 | 11 | # Install cmake toolchain 12 | COPY cmake_toolchains/${compiler_target}.toolchain /opt/${compiler_target}/ 13 | -------------------------------------------------------------------------------- /workerbase/lib/patchelf_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install tar 2 | ARG patchelf_version=0.9 3 | ARG patchelf_url=https://github.com/NixOS/patchelf/archive/${patchelf_version}.tar.gz 4 | 5 | WORKDIR /src 6 | 7 | # Use download_unpack to download and unpack patchelf 8 | RUN download_unpack.sh "${patchelf_url}" 9 | 10 | # Build the patchelf sources! 11 | WORKDIR /src/patchelf-${patchelf_version} 12 | RUN $L32 ./bootstrap.sh 13 | RUN $L32 ./configure --prefix=/usr/local 14 | RUN $L32 make -j4 15 | 16 | # Install patchelf 17 | USER root 18 | RUN $L32 make install 19 | 20 | # Now cleanup /src 21 | WORKDIR /src 22 | RUN rm -rf patchelf-${patchelf_version}* 23 | -------------------------------------------------------------------------------- /workerbase/lib/python_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install python 2 | ARG python_version=3.8.1 3 | ARG python_url=https://www.python.org/ftp/python/${python_version}/Python-${python_version}.tar.xz 4 | ARG pip_url=https://bootstrap.pypa.io/get-pip.py 5 | WORKDIR /src 6 | 7 | # Use download_unpack to download and unpack python 8 | RUN download_unpack.sh "${python_url}" 9 | 10 | # Build the python sources! 11 | WORKDIR /src/Python-${python_version} 12 | RUN ${L32} ./configure --prefix=/usr/local 13 | RUN ${L32} make -j4 14 | 15 | # Install python 16 | USER root 17 | RUN ${L32} make install 18 | 19 | # We also want this usable as `python` 20 | RUN ln -s python3 /usr/local/bin/python 21 | 22 | # Install pip and install virtualenv (all as root, of course) 23 | RUN curl -q -# -L "${pip_url}" -o get-pip.py 24 | RUN python3 ./get-pip.py 25 | RUN pip3 install virtualenv 26 | 27 | # Now cleanup /src 28 | WORKDIR /src 29 | RUN rm -rf Python-${python_version} 30 | RUN rm -f get-pip.py 31 | -------------------------------------------------------------------------------- /workerbase/lib/rr_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install rr 2 | ARG rr_version=4.5.0 3 | ARG rr_url=https://github.com/mozilla/rr/archive/${rr_version}.tar.gz 4 | 5 | USER root 6 | # We need to install python's pexpect module for rr. sigh. 7 | RUN ${L32} pip install pexpect 8 | 9 | USER buildworker 10 | WORKDIR /src 11 | 12 | # Use download_unpack to download and unpack rr 13 | RUN download_unpack.sh "${rr_url}" /downloads/rr-${rr_version}.tar.gz 14 | 15 | RUN mkdir -p /src/rr-${rr_version}/build/Release 16 | WORKDIR /src/rr-${rr_version}/build/Release 17 | RUN ${L32} cmake -DCMAKE_INSTALL_PREFIX=/usr/local -Ddisable32bit=TRUE ../.. 18 | RUN ${L32} make all -j4 19 | 20 | # Install rr 21 | USER root 22 | RUN ${L32} make install 23 | 24 | # cleanup /src 25 | WORKDIR /src 26 | RUN rm -rf rr-${rr_version}* 27 | -------------------------------------------------------------------------------- /workerbase/lib/super_binutils_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install binutils 2 | ARG binutils_version=2.29.1 3 | ARG binutils_url=https://ftp.gnu.org/gnu/binutils/binutils-${binutils_version}.tar.xz 4 | 5 | # All the targets EXCEPT darwin 6 | ARG binutils_targets=x86_64-linux-gnu,i686-linux-gnu,aarch64-linux-gnu,arm-linux-gnueabihf,powerpc64le-linux-gnu,x86_64-w64-mingw32,i686-w64-mingw32,x86_64-unknown-freebsd 7 | 8 | # Use download_unpack to download and unpack binutils and gcc 9 | WORKDIR /src 10 | RUN download_unpack.sh "${binutils_url}" 11 | 12 | # Build binutils! Because we're building for platforms including darwin, we need to 13 | # first compile everything except ld for everything, then compile everything including 14 | # ld for everything except darwin. This is because binutils breaks when compiling for 15 | # everything when that everything includes darwin because ld doesn't work on OSX. 16 | WORKDIR /src/binutils-${binutils_version} 17 | RUN ${L32} ./configure --prefix=/opt/super_binutils --enable-targets=${binutils_targets},x86_64-apple-darwin --disable-ld 18 | RUN ${L32} make -j4 19 | 20 | # Install binutils 21 | RUN ${L32} make install 22 | 23 | # Cleanup 24 | WORKDIR /src 25 | RUN rm -rf binutils-${binutils_version} 26 | 27 | 28 | ## Now do it again 29 | WORKDIR /src 30 | RUN download_unpack.sh "${binutils_url}" 31 | 32 | # Install `ld` for everything except Darwin 33 | WORKDIR /src/binutils-${binutils_version} 34 | RUN ${L32} ./configure --prefix=/opt/super_binutils --enable-targets=${binutils_targets} --enable-ld 35 | RUN ${L32} make -j4 36 | RUN ${L32} make install-ld 37 | WORKDIR /src 38 | RUN rm -rf binutils-${binutils_version} 39 | 40 | # Add this guy onto our PATH immediately 41 | #ENV PATH=/opt/super_binutils/bin:$PATH 42 | -------------------------------------------------------------------------------- /workerbase/lib/tar_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ## Install tar 2 | ARG tar_version=1.29 3 | ARG tar_url=https://ftp.gnu.org/gnu/tar/tar-${tar_version}.tar.gz 4 | WORKDIR /src 5 | 6 | # Use download_unpack to download and unpack tar 7 | RUN download_unpack.sh "${tar_url}" 8 | 9 | # Build the tar sources! 10 | WORKDIR /src/tar-${tar_version} 11 | # Set CPPFLAGS because of this link: https://goo.gl/lKju1q 12 | RUN $L32 ./configure --prefix=/usr/local CPPFLAGS="-fgnu89-inline" 13 | RUN $L32 make -j4 14 | 15 | # Install tar 16 | USER root 17 | RUN $L32 make install 18 | 19 | # We need to pretend to be `gtar` as well 20 | RUN ln -s /usr/local/bin/tar /usr/local/bin/gtar 21 | 22 | # Now cleanup /src 23 | WORKDIR /src 24 | # Sigh, see https://github.com/docker/docker/issues/13451 for context 25 | RUN rm -rf tar-${tar_version}* || \ 26 | (mv tar-${tar_version}/confdir3/confdir3 tar-${tar_version}/confdir4 && \ 27 | rm -rf tar-${tar_version}*) 28 | -------------------------------------------------------------------------------- /workerbase/lib/tar_wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Forcibly insert --no-same-owner into every tar invocation 4 | /usr/bin/tar $* --no-same-owner 5 | -------------------------------------------------------------------------------- /workerbase/lib/update_configure_scripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function replace_files() 4 | { 5 | FILES=$(find . -type f -name $1) 6 | if [[ -n "${FILES}" ]]; then 7 | curl -L "$2" -o /tmp/$1 8 | for f in ${FILES}; do 9 | cp -vf /tmp/$1 ${f} 10 | done 11 | fi 12 | } 13 | 14 | replace_files config.guess 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' 15 | replace_files config.sub 'http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' 16 | -------------------------------------------------------------------------------- /workerbase/lib/win_crosscompiler_install.Dockerfile: -------------------------------------------------------------------------------- 1 | ENV PATH="/opt/${compiler_target}/bin:$PATH" 2 | INCLUDE crossbuild/binutils_install 3 | INCLUDE crossbuild/mingw_stage1 4 | INCLUDE crossbuild/gcc_download 5 | INCLUDE crossbuild/gcc_bootstrap 6 | INCLUDE crossbuild/mingw_stage2 7 | INCLUDE crossbuild/gcc_install 8 | 9 | # Install cmake toolchain 10 | COPY cmake_toolchains/${compiler_target}.toolchain /opt/${compiler_target}/ 11 | -------------------------------------------------------------------------------- /workerbase/lib/wine_install.Dockerfile: -------------------------------------------------------------------------------- 1 | USER root 2 | WORKDIR /src 3 | 4 | ARG wine_version=3.1 5 | 6 | RUN git clone https://github.com/wine-mirror/wine.git -b wine-${wine_version} 7 | WORKDIR /src/wine 8 | 9 | # Install some dependencies 10 | RUN [[ -n "$(which yum 2>/dev/null)" ]] && yum install -y libpng-devel libjpeg-dev libxslt-dev libgnutls-dev || true 11 | RUN [[ -n "$(which apt-get 2>/dev/null)" ]] && apt-get install -y libpng-dev libjpeg-dev libxslt-dev libgnutls-dev || true 12 | RUN [[ -n "$(which apk 2>/dev/null)" ]] && apk add libpng-dev libjpeg-turbo-dev libxslt-dev gnutls-dev || true 13 | 14 | # Patch -no-pie into LDFLAGS 15 | COPY patches/wine_nopie.patch /tmp/ 16 | RUN patch -p1 < /tmp/wine_nopie.patch; \ 17 | rm -f /tmp/wine_nopie.patch 18 | 19 | # First, build wine64 20 | RUN mkdir /src/wine64_build 21 | WORKDIR /src/wine64_build 22 | RUN ${L32} /src/wine/configure --without-x --without-freetype --enable-win64 23 | RUN ${L32} make -j3 24 | 25 | # Next, build wine32 26 | RUN mkdir /src/wine32_build 27 | WORKDIR /src/wine32_build 28 | RUN ${L32} /src/wine/configure --without-x --without-freetype --with-wine64=/src/wine64_build 29 | RUN ${L32} make -j3 30 | 31 | # Now install wine32, and THEN wine64... le sigh... 32 | USER root 33 | WORKDIR /src/wine32_build 34 | RUN ${L32} make install 35 | WORKDIR /src/wine64_build 36 | RUN ${L32} make install 37 | 38 | # cleanup 39 | WORKDIR /src 40 | RUN rm -rf wine* 41 | 42 | -------------------------------------------------------------------------------- /workerbase/patches/cctools_musl.patch: -------------------------------------------------------------------------------- 1 | diff --git a/cctools/ar/ar.c b/cctools/ar/ar.c 2 | index b95e9ec..d8ec180 100644 3 | --- a/cctools/ar/ar.c 4 | +++ b/cctools/ar/ar.c 5 | @@ -88,8 +88,8 @@ static char rcsid[] = "$OpenBSD: ar.c,v 1.3 1997/01/15 23:42:11 millert Exp $"; 6 | CHDR chdr; 7 | u_int options; 8 | char *archive, *envtmp, *posarg, *posname; 9 | -static void badoptions __P((char *)); 10 | -static void usage __P((void)); 11 | +static void badoptions(char *); 12 | +static void usage(void); 13 | char *progname; 14 | 15 | /* 16 | @@ -105,7 +105,7 @@ main(argc, argv) 17 | { 18 | int c, retval, verbose, run_ranlib, toc64; 19 | char *p; 20 | - int (*fcall) __P((char **)); 21 | + int (*fcall)(char **); 22 | 23 | fcall = 0; 24 | verbose = 0; 25 | diff --git a/cctools/ar/archive.c b/cctools/ar/archive.c 26 | index 0f41fe9..00573a0 100644 27 | --- a/cctools/ar/archive.c 28 | +++ b/cctools/ar/archive.c 29 | @@ -100,7 +100,7 @@ open_archive(mode) 30 | created = 0; 31 | if (mode & O_CREAT) { 32 | mode |= O_EXCL; 33 | - if ((fd = open(archive, mode, DEFFILEMODE)) >= 0) { 34 | + if ((fd = open(archive, mode, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)) >= 0) { 35 | /* POSIX.2 puts create message on stderr. */ 36 | if (!(options & AR_C)) 37 | warnx("creating archive %s", archive); 38 | @@ -111,7 +111,7 @@ open_archive(mode) 39 | error(archive); 40 | mode &= ~O_EXCL; 41 | } 42 | - if ((fd = open(archive, mode, DEFFILEMODE)) < 0) 43 | + if ((fd = open(archive, mode, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)) < 0) 44 | error(archive); 45 | 46 | if((mode & O_ACCMODE) == O_RDONLY) 47 | diff --git a/cctools/ar/archive.h b/cctools/ar/archive.h 48 | index 4b83a11..d46dd73 100644 49 | --- a/cctools/ar/archive.h 50 | +++ b/cctools/ar/archive.h 51 | @@ -61,6 +61,8 @@ 52 | * @(#)archive.h 8.3 (Berkeley) 4/2/94 53 | */ 54 | 55 | +#include 56 | + 57 | /* Ar(1) options. */ 58 | #define AR_A 0x0001 59 | #define AR_B 0x0002 60 | @@ -123,11 +125,11 @@ typedef struct { 61 | 62 | struct stat; 63 | 64 | -void close_archive __P((int)); 65 | -void copy_ar __P((CF *, off_t)); 66 | -int get_arobj __P((int)); 67 | -int open_archive __P((int)); 68 | -void put_arobj __P((CF *, struct stat *)); 69 | -void skip_arobj __P((int)); 70 | +void close_archive(int); 71 | +void copy_ar(CF *, off_t); 72 | +int get_arobj(int); 73 | +int open_archive(int); 74 | +void put_arobj(CF *, struct stat *); 75 | +void skip_arobj(int); 76 | 77 | extern int archive_opened_for_writing; 78 | diff --git a/cctools/ar/extern.h b/cctools/ar/extern.h 79 | index 036647b..d30abfe 100644 80 | --- a/cctools/ar/extern.h 81 | +++ b/cctools/ar/extern.h 82 | @@ -58,20 +58,20 @@ 83 | * @(#)extern.h 8.3 (Berkeley) 4/2/94 84 | */ 85 | 86 | -int append __P((char **)); 87 | -void badfmt __P((void)); 88 | -int compare __P((char *)); 89 | -int contents __P((char **)); 90 | -int delete __P((char **)); 91 | -void error __P((char *)); 92 | -int extract __P((char **)); 93 | -char *files __P((char **argv)); 94 | -int move __P((char **)); 95 | -void orphans __P((char **argv)); 96 | -int print __P((char **)); 97 | -int replace __P((char **)); 98 | -char *rname __P((char *)); 99 | -int tmp __P((void)); 100 | +int append(char **); 101 | +void badfmt(void); 102 | +int compare(char *); 103 | +int contents(char **); 104 | +int delete(char **); 105 | +void error(char *); 106 | +int extract(char **); 107 | +char *files(char **argv); 108 | +int move(char **); 109 | +void orphans(char **argv); 110 | +int print(char **); 111 | +int replace(char **); 112 | +char *rname(char *); 113 | +int tmp(void); 114 | 115 | extern char *archive; 116 | extern char *posarg, *posname; /* positioning file name */ 117 | diff --git a/cctools/include/foreign/sys/sysctl.h b/cctools/include/foreign/sys/sysctl.h 118 | index 30749d1..829aae3 100644 119 | --- a/cctools/include/foreign/sys/sysctl.h 120 | +++ b/cctools/include/foreign/sys/sysctl.h 121 | @@ -1,4 +1,4 @@ 122 | -#ifndef __CYGWIN__ 123 | +#if defined(__APPLE__) || defined(__GLIBC__) 124 | #include_next 125 | #else 126 | #ifndef __SYSCTL_H__ 127 | diff --git a/cctools/include/sys/cdefs.h b/cctools/include/sys/cdefs.h 128 | index ca78da5..082e829 100644 129 | --- a/cctools/include/sys/cdefs.h 130 | +++ b/cctools/include/sys/cdefs.h 131 | @@ -1,3 +1,5 @@ 132 | +#ifdef __GLIBC__ 133 | + 134 | /* 135 | * Workaround for a GLIBC bug. 136 | * https://sourceware.org/bugzilla/show_bug.cgi?id=14952 137 | @@ -5,8 +7,6 @@ 138 | 139 | #include_next 140 | 141 | -#ifdef __GLIBC__ 142 | - 143 | #ifndef __extern_inline 144 | # define __extern_inline \ 145 | extern __inline __attribute__ ((__gnu_inline__)) 146 | @@ -17,4 +17,14 @@ 147 | extern __always_inline __attribute__ ((__gnu_inline__)) 148 | #endif 149 | 150 | +#else 151 | + 152 | +#ifdef __cplusplus 153 | +#define __BEGIN_DECLS extern "C" { 154 | +#define __END_DECLS } 155 | +#else 156 | +#define __BEGIN_DECLS 157 | +#define __END_DECLS 158 | +#endif 159 | + 160 | #endif /* __GLIBC__ */ 161 | diff --git a/cctools/ld64/src/3rd/helper.c b/cctools/ld64/src/3rd/helper.c 162 | index 14bc11d..f08770f 100644 163 | --- a/cctools/ld64/src/3rd/helper.c 164 | +++ b/cctools/ld64/src/3rd/helper.c 165 | @@ -36,8 +36,11 @@ void __assert_rtn(const char *func, const char *file, int line, const char *msg) 166 | __assert(msg, file, line, func); 167 | #elif defined(__NetBSD__) || defined(__OpenBSD__) || defined(__CYGWIN__) 168 | __assert(msg, line, file); 169 | -#else 170 | +#elif defined(__GLIBC__) || defined(__MINGW32__) 171 | __assert(msg, file, line); 172 | +#else 173 | + /* musl libc */ 174 | + __assert_fail(msg, file, line, func); 175 | #endif /* __FreeBSD__ */ 176 | } 177 | 178 | diff --git a/cctools/ld64/src/ld/parsers/textstub_dylib_file.cpp b/cctools/ld64/src/ld/parsers/textstub_dylib_file.cpp 179 | index b08a9d7..3da2596 100644 180 | --- a/cctools/ld64/src/ld/parsers/textstub_dylib_file.cpp 181 | +++ b/cctools/ld64/src/ld/parsers/textstub_dylib_file.cpp 182 | @@ -124,7 +124,7 @@ template 183 | throw strdup(errorMessage.c_str()); 184 | 185 | // unmap file - it is no longer needed. 186 | - munmap((caddr_t)fileContent, fileLength); 187 | + munmap((void *)fileContent, fileLength); 188 | 189 | // write out path for -t option 190 | if ( logAllFiles ) 191 | diff --git a/cctools/libstuff/dylib_roots.c b/cctools/libstuff/dylib_roots.c 192 | index 5f1f106..f483efd 100644 193 | --- a/cctools/libstuff/dylib_roots.c 194 | +++ b/cctools/libstuff/dylib_roots.c 195 | @@ -28,7 +28,8 @@ 196 | #include 197 | #include 198 | #include 199 | -#ifndef __OPENSTEP__ 200 | +#if defined(__APPLE__) || defined(__GLIBC__) || defined(__MINGW32__) 201 | +#define HAVE_FTS 202 | #include 203 | #endif 204 | #include 205 | @@ -116,7 +117,7 @@ find_dylib_in_root( 206 | char *install_name, 207 | const char *root) 208 | { 209 | -#ifndef __OPENSTEP__ 210 | +#ifdef HAVE_FTS 211 | char *base_name, start[MAXPATHLEN + 1], *image_file_name; 212 | char const *paths[2]; 213 | FTS *fts; 214 | -------------------------------------------------------------------------------- /workerbase/patches/cmake_install.patch: -------------------------------------------------------------------------------- 1 | --- ./usr/local/share/cmake-3.6/Modules/GNUInstallDirs.cmake.orig 2017-11-08 21:58:03.829928387 -0500 2 | +++ ./usr/local/share/cmake-3.6/Modules/GNUInstallDirs.cmake 2017-11-08 21:59:13.097569355 -0500 3 | @@ -328,9 +328,6 @@ 4 | if("${dir}" STREQUAL "SYSCONFDIR" OR "${dir}" STREQUAL "LOCALSTATEDIR") 5 | set(CMAKE_INSTALL_FULL_${dir} "/${CMAKE_INSTALL_${dir}}") 6 | else() 7 | - if (NOT "${CMAKE_INSTALL_${dir}}" MATCHES "^usr/") 8 | - set(CMAKE_INSTALL_${dir} "usr/${CMAKE_INSTALL_${dir}}") 9 | - endif() 10 | set(CMAKE_INSTALL_FULL_${dir} "/${CMAKE_INSTALL_${dir}}") 11 | endif() 12 | elseif("${CMAKE_INSTALL_PREFIX}" MATCHES "^/usr/?$") 13 | -------------------------------------------------------------------------------- /workerbase/patches/dsymutil_llvm_dynlib.patch: -------------------------------------------------------------------------------- 1 | diff --git a/lib/Support/DynamicLibrary.cpp b/lib/Support/DynamicLibrary.cpp 2 | index 9a7aeb5..3e4e8dd 100644 3 | --- a/lib/Support/DynamicLibrary.cpp 4 | +++ b/lib/Support/DynamicLibrary.cpp 5 | @@ -138,7 +138,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) { 6 | 7 | // This macro returns the address of a well-known, explicit symbol 8 | #define EXPLICIT_SYMBOL(SYM) \ 9 | - if (!strcmp(symbolName, #SYM)) return &SYM 10 | + if (!strcmp(symbolName, #SYM)) return (void*)&SYM 11 | 12 | // On linux we have a weird situation. The stderr/out/in symbols are both 13 | // macros and global variables because of standards requirements. So, we 14 | -------------------------------------------------------------------------------- /workerbase/patches/gcc_libmpx_limits.patch: -------------------------------------------------------------------------------- 1 | diff --git a/libmpx/mpxrt/mpxrt-utils.c b/libmpx/mpxrt/mpxrt-utils.c 2 | index 63ee7c6..9ed991c 100644 3 | --- a/libmpx/mpxrt/mpxrt-utils.c 4 | +++ b/libmpx/mpxrt/mpxrt-utils.c 5 | @@ -45,7 +45,11 @@ 6 | #include 7 | #include 8 | #include 9 | +#ifdef __linux__ 10 | +#include 11 | +#else 12 | #include 13 | +#endif 14 | #include 15 | #include "mpxrt-utils.h" 16 | 17 | -------------------------------------------------------------------------------- /workerbase/patches/glibc-sunrpc.patch: -------------------------------------------------------------------------------- 1 | --- sunrpc/rpc/types.h.old 2 | +++ sunrpc/rpc/types.h 3 | @@ -69,7 +69,7 @@ 4 | #include 5 | #endif 6 | 7 | -#ifndef __u_char_defined 8 | +#if 0 9 | typedef __u_char u_char; 10 | typedef __u_short u_short; 11 | typedef __u_int u_int; 12 | @@ -79,7 +79,7 @@ 13 | typedef __fsid_t fsid_t; 14 | # define __u_char_defined 15 | #endif 16 | -#ifndef __daddr_t_defined 17 | +#if 0 18 | typedef __daddr_t daddr_t; 19 | typedef __caddr_t caddr_t; 20 | # define __daddr_t_defined 21 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_arm_gcc_fix.patch: -------------------------------------------------------------------------------- 1 | From 175cef4163dd60f95106cfd5f593b8a4e09d02c9 Mon Sep 17 00:00:00 2001 2 | From: Joseph Myers 3 | Date: Tue, 20 May 2014 21:27:13 +0000 4 | Subject: [PATCH] Fix ARM build with GCC trunk. 5 | 6 | sysdeps/unix/sysv/linux/arm/unwind-resume.c and 7 | sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c have static 8 | variables that are written in C code but only read from toplevel asms. 9 | Current GCC trunk now optimizes away such apparently write-only static 10 | variables, so causing a build failure. This patch marks those 11 | variables with __attribute_used__ to avoid that optimization. 12 | 13 | Tested that this fixes the build for ARM. 14 | 15 | * sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c 16 | (libgcc_s_resume): Use __attribute_used__. 17 | * sysdeps/unix/sysv/linux/arm/unwind-resume.c (libgcc_s_resume): 18 | Likewise. 19 | --- 20 | sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c | 3 ++- 21 | sysdeps/unix/sysv/linux/arm/unwind-resume.c | 3 ++- 22 | 3 files changed, 11 insertions(+), 2 deletions(-) 23 | 24 | diff --git a/nptl/sysdeps/pthread/unwind-forcedunwind.c b/nptl/sysdeps/pthread/unwind-forcedunwind.c 25 | index 6ccd9b4..660d148 100644 26 | --- a/nptl/sysdeps/pthread/unwind-forcedunwind.c 27 | +++ b/nptl/sysdeps/pthread/unwind-forcedunwind.c 28 | @@ -22,7 +22,8 @@ 29 | #include 30 | 31 | static void *libgcc_s_handle; 32 | -static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); 33 | +static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) 34 | + __attribute_used__; 35 | static _Unwind_Reason_Code (*libgcc_s_personality) 36 | (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); 37 | static _Unwind_Reason_Code (*libgcc_s_forcedunwind) 38 | diff --git a/sysdeps/gnu/unwind-resume.c b/sysdeps/gnu/unwind-resume.c 39 | index bff3e2b..1f1eb71 100644 40 | --- a/sysdeps/gnu/unwind-resume.c 41 | +++ b/sysdeps/gnu/unwind-resume.c 42 | @@ -20,7 +20,8 @@ 43 | #include 44 | #include 45 | 46 | -static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); 47 | +static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) 48 | + __attribute_used__; 49 | static _Unwind_Reason_Code (*libgcc_s_personality) 50 | (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); 51 | 52 | diff --git a/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c b/sysdeps/unix/sysv/linux/arm/unwind-forcedunwind.c 53 | index 6ccd9b4..660d148 100644 54 | --- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c 55 | +++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-forcedunwind.c 56 | @@ -22,7 +22,8 @@ 57 | #include 58 | 59 | static void *libgcc_s_handle; 60 | -static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); 61 | +static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) 62 | + __attribute_used__; 63 | static _Unwind_Reason_Code (*libgcc_s_personality) 64 | (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); 65 | static _Unwind_Reason_Code (*libgcc_s_forcedunwind) 66 | diff --git a/sysdeps/unix/sysv/linux/arm/unwind-resume.c b/sysdeps/unix/sysv/linux/arm/unwind-resume.c 67 | index bff3e2b..1f1eb71 100644 68 | --- a/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c 69 | +++ b/ports/sysdeps/unix/sysv/linux/arm/nptl/unwind-resume.c 70 | @@ -20,7 +20,8 @@ 71 | #include 72 | #include 73 | 74 | -static void (*libgcc_s_resume) (struct _Unwind_Exception *exc); 75 | +static void (*libgcc_s_resume) (struct _Unwind_Exception *exc) 76 | + __attribute_used__; 77 | static _Unwind_Reason_Code (*libgcc_s_personality) 78 | (_Unwind_State, struct _Unwind_Exception *, struct _Unwind_Context *); 79 | 80 | -- 81 | 1.9.4 82 | 83 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_gcc_version.patch: -------------------------------------------------------------------------------- 1 | --- configure.old 2017-07-27 20:01:59.997750069 +0000 2 | +++ configure 2017-07-27 20:02:54.029053809 +0000 3 | @@ -5189,7 +5189,7 @@ 4 | ac_prog_version=`$CC -v 2>&1 | sed -n 's/^.*version \([egcygnustpi-]*[0-9.]*\).*$/\1/p'` 5 | case $ac_prog_version in 6 | '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; 7 | - 3.4* | 4.[0-9]* ) 8 | + 3.4* | 4.[0-9]* | 6.[0-9]* | 7.[0-9]* ) 9 | ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; 10 | *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; 11 | 12 | @@ -5255,7 +5255,7 @@ 13 | ac_prog_version=`$MAKE --version 2>&1 | sed -n 's/^.*GNU Make[^0-9]*\([0-9][0-9.]*\).*$/\1/p'` 14 | case $ac_prog_version in 15 | '') ac_prog_version="v. ?.??, bad"; ac_verc_fail=yes;; 16 | - 3.79* | 3.[89]*) 17 | + 3.79* | 3.[89]* | 4.[0-9]* ) 18 | ac_prog_version="$ac_prog_version, ok"; ac_verc_fail=no;; 19 | *) ac_prog_version="$ac_prog_version, bad"; ac_verc_fail=yes;; 20 | 21 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_i686_asm.patch: -------------------------------------------------------------------------------- 1 | Submitted By: Matt Burgess 2 | Date: 2010-04-18 3 | Initial Package Version: 2.11.1 4 | Upstream Status: Not Submitted 5 | Origin: http://www.eglibc.org/archives/patches/msg00073.html 6 | Description: Fixes the following build problem with GCC-4.5.0: 7 | 8 | /mnt/lfs/sources/libc-build/math/s_frexp.os.dt -MT /mnt/lfs/sources/libc-build/math/s_frexp.os 9 | ./sysdeps/i386/fpu/s_frexp.S: Assembler messages: 10 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: invalid identifier for ".ifdef" 11 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk at end of line, first unrecognized character is `1' 12 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk at end of line, first unrecognized character is `1' 13 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk at end of line, first unrecognized character is `1' 14 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk at end of line, first unrecognized character is `.' 15 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk at end of line, first unrecognized character is `1' 16 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: expected comma after name `' in .size directive 17 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: ".endif" without ".if" 18 | ./sysdeps/i386/fpu/s_frexp.S:66: Error: junk `.get_pc_thunk.dx' after expression 19 | make[2]: *** [/mnt/lfs/sources/libc-build/math/s_frexp.os] Error 1 20 | 21 | diff -Naur glibc-2.11.1.orig/nptl/sysdeps/pthread/pt-initfini.c glibc-2.11.1/nptl/sysdeps/pthread/pt-initfini.c 22 | --- glibc-2.11.1.orig/nptl/sysdeps/pthread/pt-initfini.c 2009-12-08 20:10:20.000000000 +0000 23 | +++ glibc-2.11.1/nptl/sysdeps/pthread/pt-initfini.c 2010-04-17 11:34:06.882681001 +0000 24 | @@ -45,6 +45,11 @@ 25 | /* Embed an #include to pull in the alignment and .end directives. */ 26 | asm ("\n#include \"defs.h\""); 27 | 28 | +asm ("\n#if defined __i686 && defined __ASSEMBLER__"); 29 | +asm ("\n#undef __i686"); 30 | +asm ("\n#define __i686 __i686"); 31 | +asm ("\n#endif"); 32 | + 33 | /* The initial common code ends here. */ 34 | asm ("\n/*@HEADER_ENDS*/"); 35 | 36 | diff -Naur glibc-2.11.1.orig/sysdeps/unix/sysv/linux/i386/sysdep.h glibc-2.11.1/sysdeps/unix/sysv/linux/i386/sysdep.h 37 | --- glibc-2.11.1.orig/sysdeps/unix/sysv/linux/i386/sysdep.h 2009-12-08 20:10:20.000000000 +0000 38 | +++ glibc-2.11.1/sysdeps/unix/sysv/linux/i386/sysdep.h 2010-04-17 11:34:06.882681001 +0000 39 | @@ -29,6 +29,10 @@ 40 | #include 41 | #include 42 | 43 | +#if defined __i686 && defined __ASSEMBLER__ 44 | +#undef __i686 45 | +#define __i686 __i686 46 | +#endif 47 | 48 | /* For Linux we can use the system call table in the header file 49 | /usr/include/asm/unistd.h 50 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_nocommon.patch: -------------------------------------------------------------------------------- 1 | --- ./malloc/obstack.c.old 2017-10-27 17:08:52.000000000 -0400 2 | +++ ./malloc/obstack.c 2017-10-27 17:09:54.000000000 -0400 3 | @@ -117,7 +117,7 @@ 4 | /* A looong time ago (before 1994, anyway; we're not sure) this global variable 5 | was used by non-GNU-C macros to avoid multiple evaluation. The GNU C 6 | library still exports it because somebody might use it. */ 7 | -struct obstack *_obstack_compat; 8 | +struct obstack *_obstack_compat __attribute__((nocommon)); 9 | compat_symbol (libc, _obstack_compat, _obstack, GLIBC_2_0); 10 | # endif 11 | # endif 12 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_powerpc64le_gcc_fix.patch: -------------------------------------------------------------------------------- 1 | From db8fed87d9741b6b3da6c2257f01d63ef2fe407c Mon Sep 17 00:00:00 2001 2 | From: Martin Sebor 3 | Date: Mon, 1 Jun 2015 14:12:09 -0300 4 | Subject: [PATCH] powerpc: setcontext.S uses power6 mtfsf when not supported 5 | [BZ #18116] 6 | 7 | The attached patch fixes a glibc build failure with gcc 5 on powerpc64le 8 | caused by a recent change in gcc where the compiler defines the 9 | _ARCH_PWR6 macro when processing assembly files but doesn't invoke the 10 | assembler in the corresponding machine mode (unless it has been 11 | explicitly configured to target POWER 6 or later). A bug had been filed 12 | with gcc for this (65341) but was closed as won't fix. Glibc relies on 13 | the _ARCH_PWR6 macro in a few .S files to make use of Power ISA 2.5 14 | instructions (specifically, the four-argument form of the mtfsf insn). 15 | A similar problem had occurred in the past (bug 10118) but the fix that 16 | was committed for it didn't anticipate this new problem. 17 | --- 18 | ChangeLog | 9 ++++++ 19 | .../unix/sysv/linux/powerpc/powerpc64/setcontext.S | 30 ++++++++++++++---- 20 | .../sysv/linux/powerpc/powerpc64/swapcontext.S | 36 +++++++++++++++++----- 21 | 3 files changed, 61 insertions(+), 14 deletions(-) 22 | 23 | diff --git a/ChangeLog b/ChangeLog 24 | index 6d295e7..239a0e6 100644 25 | --- a/ChangeLog 26 | +++ b/ChangeLog 27 | @@ -1,3 +1,12 @@ 28 | +2015-03-11 Martin Sebor 29 | + 30 | + [BZ #18116] 31 | + * sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S 32 | + (__setcontext): Use extended four-operand version of mtsf whenever 33 | + possible. 34 | + * sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S 35 | + (__novec_swapcontext): Likewise. 36 | + 37 | 2015-06-01 Siddhesh Poyarekar 38 | 39 | * benchtests/scripts/compare_bench.py: New file. 40 | diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S 41 | index e47a57a..8a08dc4 100644 42 | --- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S 43 | +++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/setcontext.S 44 | @@ -81,22 +81,31 @@ ENTRY(__novec_setcontext) 45 | 46 | # ifdef _ARCH_PWR6 47 | /* Use the extended four-operand version of the mtfsf insn. */ 48 | - mtfsf 0xff,fp0,1,0 49 | -# else 50 | .machine push 51 | .machine "power6" 52 | + 53 | + mtfsf 0xff,fp0,1,0 54 | + 55 | + .machine pop 56 | +# else 57 | /* Availability of DFP indicates a 64-bit FPSCR. */ 58 | andi. r6,r5,PPC_FEATURE_HAS_DFP 59 | beq 5f 60 | /* Use the extended four-operand version of the mtfsf insn. */ 61 | + .machine push 62 | + .machine "power6" 63 | + 64 | mtfsf 0xff,fp0,1,0 65 | + 66 | + .machine pop 67 | + 68 | b 6f 69 | /* Continue to operate on the FPSCR as if it were 32-bits. */ 70 | 5: 71 | mtfsf 0xff,fp0 72 | 6: 73 | - .machine pop 74 | # endif /* _ARCH_PWR6 */ 75 | + 76 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) 77 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) 78 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) 79 | @@ -364,22 +373,31 @@ L(has_no_vec): 80 | 81 | # ifdef _ARCH_PWR6 82 | /* Use the extended four-operand version of the mtfsf insn. */ 83 | - mtfsf 0xff,fp0,1,0 84 | -# else 85 | .machine push 86 | .machine "power6" 87 | + 88 | + mtfsf 0xff,fp0,1,0 89 | + 90 | + .machine pop 91 | +# else 92 | /* Availability of DFP indicates a 64-bit FPSCR. */ 93 | andi. r6,r5,PPC_FEATURE_HAS_DFP 94 | beq 7f 95 | /* Use the extended four-operand version of the mtfsf insn. */ 96 | + .machine push 97 | + .machine "power6" 98 | + 99 | mtfsf 0xff,fp0,1,0 100 | + 101 | + .machine pop 102 | + 103 | b 8f 104 | /* Continue to operate on the FPSCR as if it were 32-bits. */ 105 | 7: 106 | mtfsf 0xff,fp0 107 | 8: 108 | - .machine pop 109 | # endif /* _ARCH_PWR6 */ 110 | + 111 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) 112 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) 113 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) 114 | diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S b/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S 115 | index bc02a21..2421ca4 100644 116 | --- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S 117 | +++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/swapcontext.S 118 | @@ -173,24 +173,34 @@ ENTRY(__novec_swapcontext) 119 | lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) 120 | lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) 121 | lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) 122 | + 123 | # ifdef _ARCH_PWR6 124 | /* Use the extended four-operand version of the mtfsf insn. */ 125 | - mtfsf 0xff,fp0,1,0 126 | -# else 127 | .machine push 128 | .machine "power6" 129 | + 130 | + mtfsf 0xff,fp0,1,0 131 | + 132 | + .machine pop 133 | +# else 134 | /* Availability of DFP indicates a 64-bit FPSCR. */ 135 | andi. r6,r8,PPC_FEATURE_HAS_DFP 136 | beq 5f 137 | - /* Use the extended four-operand version of the mtfsf insn. */ 138 | + 139 | + .machine push 140 | + .machine "power6" 141 | + 142 | mtfsf 0xff,fp0,1,0 143 | + 144 | + .machine pop 145 | + 146 | b 6f 147 | /* Continue to operate on the FPSCR as if it were 32-bits. */ 148 | 5: 149 | mtfsf 0xff,fp0 150 | 6: 151 | - .machine pop 152 | #endif /* _ARCH_PWR6 */ 153 | + 154 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) 155 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) 156 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) 157 | @@ -652,24 +662,34 @@ L(has_no_vec2): 158 | lfd fp0,(SIGCONTEXT_FP_REGS+(32*8))(r31) 159 | lfd fp31,(SIGCONTEXT_FP_REGS+(PT_R31*8))(r31) 160 | lfd fp30,(SIGCONTEXT_FP_REGS+(PT_R30*8))(r31) 161 | + 162 | # ifdef _ARCH_PWR6 163 | /* Use the extended four-operand version of the mtfsf insn. */ 164 | - mtfsf 0xff,fp0,1,0 165 | -# else 166 | .machine push 167 | .machine "power6" 168 | + 169 | + mtfsf 0xff,fp0,1,0 170 | + 171 | + .machine pop 172 | +# else 173 | /* Availability of DFP indicates a 64-bit FPSCR. */ 174 | andi. r6,r8,PPC_FEATURE_HAS_DFP 175 | beq 7f 176 | - /* Use the extended four-operand version of the mtfsf insn. */ 177 | + 178 | + .machine push 179 | + .machine "power6" 180 | + 181 | mtfsf 0xff,fp0,1,0 182 | + 183 | + .machine pop 184 | + 185 | b 8f 186 | /* Continue to operate on the FPSCR as if it were 32-bits. */ 187 | 7: 188 | mtfsf 0xff,fp0 189 | 8: 190 | - .machine pop 191 | #endif /* _ARCH_PWR6 */ 192 | + 193 | lfd fp29,(SIGCONTEXT_FP_REGS+(PT_R29*8))(r31) 194 | lfd fp28,(SIGCONTEXT_FP_REGS+(PT_R28*8))(r31) 195 | lfd fp27,(SIGCONTEXT_FP_REGS+(PT_R27*8))(r31) 196 | -- 197 | 2.9.3 198 | 199 | -------------------------------------------------------------------------------- /workerbase/patches/glibc_regexp_nocommon.patch: -------------------------------------------------------------------------------- 1 | --- misc/regexp.c.old 2017-10-28 12:54:11.000000000 -0400 2 | +++ misc/regexp.c 2017-10-28 12:54:37.000000000 -0400 3 | @@ -30,13 +30,13 @@ 4 | #if SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_23) 5 | 6 | /* Define the variables used for the interface. */ 7 | -char *loc1; 8 | -char *loc2; 9 | +char *loc1 __attribute__((nocommon)); 10 | +char *loc2 __attribute__((nocommon)); 11 | compat_symbol (libc, loc1, loc1, GLIBC_2_0); 12 | compat_symbol (libc, loc2, loc2, GLIBC_2_0); 13 | 14 | /* Although we do not support the use we define this variable as well. */ 15 | -char *locs; 16 | +char *locs __attribute__((nocommon)); 17 | compat_symbol (libc, locs, locs, GLIBC_2_0); 18 | 19 | 20 | -------------------------------------------------------------------------------- /workerbase/patches/libtapi_llvm_dynlib.patch: -------------------------------------------------------------------------------- 1 | diff --git a/src/apple-llvm/src/lib/Support/DynamicLibrary.cpp b/src/apple-llvm/src/lib/Support/DynamicLibrary.cpp 2 | index 9a7aeb5..3e4e8dd 100644 3 | --- a/src/apple-llvm/src/lib/Support/DynamicLibrary.cpp 4 | +++ b/src/apple-llvm/src/lib/Support/DynamicLibrary.cpp 5 | @@ -138,7 +138,7 @@ void* DynamicLibrary::SearchForAddressOfSymbol(const char *symbolName) { 6 | 7 | // This macro returns the address of a well-known, explicit symbol 8 | #define EXPLICIT_SYMBOL(SYM) \ 9 | - if (!strcmp(symbolName, #SYM)) return &SYM 10 | + if (!strcmp(symbolName, #SYM)) return (void*)&SYM 11 | 12 | // On linux we have a weird situation. The stderr/out/in symbols are both 13 | // macros and global variables because of standards requirements. So, we 14 | -------------------------------------------------------------------------------- /workerbase/patches/llvm_ar_options.patch: -------------------------------------------------------------------------------- 1 | From af481e4f940025c84ce601e68fdedbc1bd22cdd2 Mon Sep 17 00:00:00 2001 2 | From: Martin Storsjo 3 | Date: Fri, 3 Nov 2017 20:09:10 +0000 4 | Subject: [PATCH] [llvm-ar] Support an options string that start with a dash 5 | 6 | Some projects call $AR like "$AR -crs output input1 input2". 7 | 8 | Differential Revision: https://reviews.llvm.org/D39538 9 | 10 | git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317358 91177308-0d34-0410-b5e6-96231b3b80d8 11 | --- 12 | test/tools/llvm-ar/default-add.test | 3 ++- 13 | tools/llvm-ar/llvm-ar.cpp | 20 ++++++++++++++++++++ 14 | 2 files changed, 22 insertions(+), 1 deletion(-) 15 | 16 | diff --git a/test/tools/llvm-ar/default-add.test b/test/tools/llvm-ar/default-add.test 17 | index 88719e4..68e41c2 100644 18 | --- a/test/tools/llvm-ar/default-add.test 19 | +++ b/test/tools/llvm-ar/default-add.test 20 | @@ -4,7 +4,8 @@ RUN: yaml2obj %S/Inputs/coff.yaml -o %t-coff.o 21 | RUN: rm -f %t.ar 22 | RUN: llvm-ar crs %t.ar %t-macho.o 23 | RUN: grep -q __.SYMDEF %t.ar 24 | -RUN: llvm-ar crs %t.ar %t-coff.o 25 | +Test that an option string prefixed by a dash works. 26 | +RUN: llvm-ar -crs %t.ar %t-coff.o 27 | RUN: grep -q __.SYMDEF %t.ar 28 | 29 | RUN: rm -f %t.ar 30 | diff --git a/tools/llvm-ar/llvm-ar.cpp b/tools/llvm-ar/llvm-ar.cpp 31 | index 576265c..8c19f6b 100644 32 | --- a/tools/llvm-ar/llvm-ar.cpp 33 | +++ b/tools/llvm-ar/llvm-ar.cpp 34 | @@ -127,6 +127,8 @@ static cl::extrahelp MoreHelp( 35 | " [v] - be verbose about actions taken\n" 36 | ); 37 | 38 | +static const char OptionChars[] = "dmpqrtxabiosSTucv"; 39 | + 40 | // This enumeration delineates the kinds of operations on an archive 41 | // that are permitted. 42 | enum ArchiveOperation { 43 | @@ -864,6 +866,24 @@ int main(int argc, char **argv) { 44 | Stem.find("lib") != StringRef::npos) 45 | return libDriverMain(makeArrayRef(argv, argc)); 46 | 47 | + for (int i = 1; i < argc; i++) { 48 | + // If an argument starts with a dash and only contains chars 49 | + // that belong to the options chars set, remove the dash. 50 | + // We can't handle it after the command line options parsing 51 | + // is done, since it will error out on an unrecognized string 52 | + // starting with a dash. 53 | + // Make sure this doesn't match the actual llvm-ar specific options 54 | + // that start with a dash. 55 | + StringRef S = argv[i]; 56 | + if (S.startswith("-") && 57 | + S.find_first_not_of(OptionChars, 1) == StringRef::npos) { 58 | + argv[i]++; 59 | + break; 60 | + } 61 | + if (S == "--") 62 | + break; 63 | + } 64 | + 65 | // Have the command line options parsed and handle things 66 | // like --help and --version. 67 | cl::ParseCommandLineOptions(argc, argv, 68 | -- 69 | 2.7.4 70 | 71 | -------------------------------------------------------------------------------- /workerbase/patches/mingw_gcc710_i686.patch: -------------------------------------------------------------------------------- 1 | From 431ac2a912708546cd7271332e9331399e66bc62 Mon Sep 17 00:00:00 2001 2 | From: Liu Hao 3 | Date: Wed, 3 May 2017 15:52:32 +0800 4 | Subject: [PATCH] winpthreads/src/dll_math.c: Implement `__divmoddi4()' for GCC 5 | 7. 6 | 7 | GCC targeting i686 _may_ generate an external call to the function in 8 | question when divding a 64-bit (DIMode) integer with another one. 9 | Since we are linking against a fake libgcc, we have to implement it too. 10 | 11 | Signed-off-by: Liu Hao 12 | --- 13 | .../winpthreads/src/libgcc/dll_math.c | 27 ++++++++++++++++++++++ 14 | 1 file changed, 27 insertions(+) 15 | 16 | diff --git a/mingw-w64-libraries/winpthreads/src/libgcc/dll_math.c b/mingw-w64-libraries/winpthreads/src/libgcc/dll_math.c 17 | index e09b481..aeec068 100644 18 | --- a/mingw-w64-libraries/winpthreads/src/libgcc/dll_math.c 19 | +++ b/mingw-w64-libraries/winpthreads/src/libgcc/dll_math.c 20 | @@ -120,6 +120,7 @@ u_quad_t __qdivrem(u_quad_t u, u_quad_t v, u_quad_t *rem); 21 | u_quad_t __udivdi3(u_quad_t a, u_quad_t b); 22 | u_quad_t __umoddi3(u_quad_t a, u_quad_t b); 23 | int __ucmpdi2(u_quad_t a, u_quad_t b); 24 | +quad_t __divmoddi4(quad_t a, quad_t b, quad_t *rem); 25 | 26 | #endif /* !_LIBKERN_QUAD_H_ */ 27 | 28 | @@ -546,6 +547,32 @@ __umoddi3(a, b) 29 | (void)__qdivrem(a, b, &r); 30 | return (r); 31 | } 32 | + 33 | +/* 34 | + * Divide two signed quads. 35 | + * This function is new in GCC 7. 36 | + */ 37 | +quad_t 38 | +__divmoddi4(a, b, rem) 39 | + quad_t a, b, *rem; 40 | +{ 41 | + u_quad_t ua, ub, uq, ur; 42 | + int negq, negr; 43 | + 44 | + if (a < 0) 45 | + ua = -(u_quad_t)a, negq = 1, negr = 1; 46 | + else 47 | + ua = a, negq = 0, negr = 0; 48 | + if (b < 0) 49 | + ub = -(u_quad_t)b, negq ^= 1; 50 | + else 51 | + ub = b; 52 | + uq = __qdivrem(ua, ub, &ur); 53 | + if (rem) 54 | + *rem = (negr ? -ur : ur); 55 | + return (negq ? -uq : uq); 56 | +} 57 | + 58 | #else 59 | static int __attribute__((unused)) dummy; 60 | #endif /*deined (_X86_) && !defined (__x86_64__)*/ 61 | -- 62 | 2.7.4 63 | 64 | -------------------------------------------------------------------------------- /workerbase/patches/wine_nopie.patch: -------------------------------------------------------------------------------- 1 | diff --git a/loader/Makefile.in b/loader/Makefile.in 2 | index 8190037..437d4d9 100644 3 | --- a/loader/Makefile.in 4 | +++ b/loader/Makefile.in 5 | @@ -26,7 +26,7 @@ wine64_DEPS = $(WINELOADER_DEPENDS) 6 | wine64_LDFLAGS = $(LDEXECFLAGS) -lwine $(PTHREAD_LIBS) 7 | 8 | wine_preloader_OBJS = preloader.o 9 | -wine_preloader_LDFLAGS = -static -nostartfiles -nodefaultlibs -Wl,-Ttext=0x7c400000 10 | +wine_preloader_LDFLAGS = -static -nostartfiles -nodefaultlibs -Wl,-Ttext=0x7c400000 -no-pie 11 | 12 | wine64_preloader_OBJS = preloader.o 13 | -wine64_preloader_LDFLAGS = -static -nostartfiles -nodefaultlibs -Wl,-Ttext=0x7c400000 14 | +wine64_preloader_LDFLAGS = -static -nostartfiles -nodefaultlibs -Wl,-Ttext=0x7c400000 -no-pie 15 | -------------------------------------------------------------------------------- /workerbase/qemu_register.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ $(id -u) != "0" ]; then 5 | echo "Warning: not running as root, this probably won't work..." 6 | fi 7 | 8 | # Mount if neccessary 9 | if [ ! -d /proc/sys/fs/binfmt_misc ]; then 10 | echo "No binfmt support in the kernel." 11 | echo " Try: '/sbin/modprobe binfmt_misc'" 12 | exit 1 13 | fi 14 | if [ ! -f /proc/sys/fs/binfmt_misc/register ]; then 15 | mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc 16 | fi 17 | 18 | # Reset all pre-registered interpreters, if requested 19 | if [ "$1" = "--reset" ]; then 20 | echo "Resetting interpreters..." 21 | ( 22 | cd /proc/sys/fs/binfmt_misc 23 | for file in *; do 24 | case "${file}" in 25 | status|register) 26 | ;; 27 | *) 28 | echo -1 > "${file}" 29 | ;; 30 | esac 31 | done 32 | ) 33 | fi 34 | 35 | # Register new interpreters (unless we are ourselves one of these arches) 36 | if [ $(uname -p) != arm* ]; then 37 | echo "Installing interpreter for arm..." 38 | echo ':qemu-arm:M::\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/bin/qemu-arm-static:C' > /proc/sys/fs/binfmt_misc/register 39 | fi 40 | 41 | if [ $(uname -p) != aarch64 ]; then 42 | echo "Intsalling interpreter for aarch64..." 43 | echo ':qemu-aarch64:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff:/usr/bin/qemu-aarch64-static:C' > /proc/sys/fs/binfmt_misc/register 44 | fi 45 | 46 | if [ $(uname -p) != ppc64le ]; then 47 | echo "Installing interpreter for ppc64le..." 48 | echo ':qemu-ppc64le:M::\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15\x00:\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\x00:/usr/bin/qemu-ppc64le-static:C' > /proc/sys/fs/binfmt_misc/register 49 | fi 50 | --------------------------------------------------------------------------------