├── Dockerfile ├── Dockerfile-emscripten ├── Dockerfile-linux_x64 ├── Dockerfile-macosx ├── Dockerfile-temp ├── Dockerfile-tvossimulator ├── Makefile ├── README.md ├── cppdock ├── cppdock-complete.sh ├── example └── cppdock.ini ├── recipes ├── boostorg-config ├── boostorg-mp11 ├── chriskohlhoff-asio ├── cisco-openh264 ├── cisco-openh264-emscripten ├── default ├── default-emscripten ├── ericniebler-meta ├── howardhinnant-date ├── jedisct1-libsodium ├── jedisct1-libsodium-emscripten ├── kvasir-io-mpl ├── ldionne-dyno ├── open-source-parsers-jsoncpp ├── rbock-sqlpp11 ├── rbock-sqlpp11-connector-mysql ├── xiph-opus └── xiph-opus-emscripten ├── recipes_docker ├── Dockerfile-boost └── Dockerfile-libpq-linux_x64 ├── test └── json_lock │ ├── cppdock.json │ └── run_test.py └── toolchain ├── emscripten.cmake ├── linux_x64.cmake ├── macosx.cmake ├── tvossimulator.cmake └── tvossimulator ├── Availability.h └── AvailabilityInternal.h /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | RUN apt-get update \ 4 | && apt-get -yq install \ 5 | python-is-python3 cmake git autoconf bash-completion vim wget xz-utils \ 6 | && echo '. /usr/share/bash-completion/bash_completion && set -o vi' >> /root/.bashrc \ 7 | && echo 'set hlsearch' >> /root/.vimrc 8 | 9 | COPY cppdock /usr/local/bin/ 10 | COPY cppdock /opt/install/bin/ 11 | COPY recipes/ /root/.cppdock_recipes 12 | RUN ln -s /usr/local /opt/sysroot 13 | -------------------------------------------------------------------------------- /Dockerfile-emscripten: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/cppdock as emsdk_unzip 2 | ARG EMSCRIPTEN_TAG=3.1.59 3 | ADD https://github.com/emscripten-core/emsdk/archive/$EMSCRIPTEN_TAG.tar.gz emsdk.tar.gz 4 | RUN tar -xvzf emsdk.tar.gz && mv emsdk-$EMSCRIPTEN_TAG /opt/emsdk 5 | FROM ricejasonf/cppdock:linux_x64 6 | ENV EMSCRIPTEN_TAG=3.1.59 7 | COPY --from=emsdk_unzip /opt/emsdk/ /opt/emsdk 8 | WORKDIR /opt/emsdk 9 | RUN ./emsdk install $EMSCRIPTEN_TAG 10 | RUN ./emsdk activate $EMSCRIPTEN_TAG 11 | 12 | # Replace the symlink for sysroot 13 | RUN ln -snf /opt/emsdk/upstream/emscripten/cache/sysroot/ /opt/sysroot 14 | 15 | ENV EMSDK=/opt/emsdk \ 16 | PATH="/opt/emsdk:/opt/emsdk/upstream/emscripten:/opt/emsdk/node/16.20.0_64bit/bin:${PATH}" 17 | COPY toolchain/emscripten.cmake /opt/toolchain.cmake 18 | -------------------------------------------------------------------------------- /Dockerfile-linux_x64: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/cppdock:latest 2 | COPY --from=ricejasonf/llvm-project-heavy /opt/install/ /opt/sysroot 3 | ENV LD_LIBRARY_PATH = "${LD_LIBRARY_PATH}:/opt/sysroot/lib/x86_64-unknown-linux-gnu" 4 | # TODO use -frtlib-add-rpath 5 | COPY toolchain/linux_x64.cmake /opt/toolchain.cmake 6 | RUN ldconfig 7 | -------------------------------------------------------------------------------- /Dockerfile-macosx: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/emscripten:1.37.39 AS cctools 2 | 3 | # cctools (linker for darwin targets) 4 | RUN git clone https://github.com/tpoechtrager/cctools-port.git \ 5 | && cd cctools-port/cctools \ 6 | && ./configure --prefix /opt/install \ 7 | && make \ 8 | && make install 9 | 10 | FROM ricejasonf/emscripten:1.37.19 11 | 12 | RUN apt-get update && apt-get -yq install \ 13 | cmake python bash-completion vim patch clang libxml2-devel \ 14 | && echo '. /usr/share/bash-completion/bash_completion && set -o vi' >> /root/.bashrc \ 15 | && echo 'set hlsearch' >> /root/.vimrc 16 | 17 | COPY cppdock /usr/local/bin 18 | COPY recipes/ /root/.cppdock_recipes 19 | COPY ./toolchain/macosx.cmake /opt/toolchain.cmake 20 | COPY --from=cctools /opt/install /usr/local 21 | -------------------------------------------------------------------------------- /Dockerfile-temp: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/emscripten:1.37.39 2 | 3 | # cctools (linker for darwin targets) 4 | # https://opensource.apple.com/tarballs/Libc/Libc-1244.30.3.tar.gz 5 | RUN git clone https://github.com/unofficial-opensource-apple/Libc \ 6 | && cd Libc \ 7 | && ./configure --prefix=/opt/install \ 8 | && make \ 9 | && make install 10 | 11 | -------------------------------------------------------------------------------- /Dockerfile-tvossimulator: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/emscripten:1.37.39 AS cctools 2 | 3 | # cctools (linker for darwin targets) 4 | RUN git clone --depth 1 https://github.com/tpoechtrager/cctools-port.git \ 5 | && cd cctools-port/cctools \ 6 | && ./configure --prefix /opt/install \ 7 | && make \ 8 | && make install 9 | 10 | FROM ricejasonf/emscripten_fastcomp:1.37.39 11 | 12 | RUN apt-get update && apt-get -yq install \ 13 | cmake python bash-completion vim \ 14 | && echo '. /usr/share/bash-completion/bash_completion && set -o vi' >> /root/.bashrc \ 15 | && echo 'set hlsearch' >> /root/.vimrc \ 16 | && mkdir /opt/install \ 17 | && mkdir /opt/build \ 18 | && ln -s /usr/local /opt/sysroot 19 | 20 | COPY cppdock /usr/local/bin 21 | COPY recipes/ /root/.cppdock_recipes 22 | COPY ./toolchain/tvossimulator.cmake /opt/toolchain.cmake 23 | COPY ./toolchain/tvossimulator/Availability.h /opt/sysroot/include/ 24 | COPY ./toolchain/tvossimulator/AvailabilityInternal.h /opt/sysroot/include/ 25 | COPY --from=cctools /opt/install /opt/sysroot 26 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: platforms cppdock platform_linux_x64 platform_emscripten boost platform_tvossimulator 2 | 3 | cppdock: 4 | docker build --force-rm=true -t ricejasonf/cppdock . 5 | 6 | platforms: platform_linux_x64 platform_emscripten 7 | 8 | platform_linux_x64: 9 | docker build --force-rm=true -f ./Dockerfile-linux_x64 -t ricejasonf/cppdock:linux_x64 . 10 | 11 | platform_emscripten: 12 | docker build --force-rm=true -f ./Dockerfile-emscripten -t ricejasonf/cppdock:emscripten . 13 | 14 | platform_emscripten_1_37_19: 15 | docker build --force-rm=true \ 16 | --build-arg EMSCRIPTEN_TAG=1.37.19 \ 17 | -f ./Dockerfile-emscripten \ 18 | -t ricejasonf/cppdock:emscripten_1_37_19 . 19 | 20 | platform_tvossimulator: 21 | docker build --force-rm=true -f ./Dockerfile-tvossimulator -t ricejasonf/cppdock:tvossimulator . 22 | 23 | platform_macosx: 24 | docker build --force-rm=true -f ./Dockerfile-macosx -t ricejasonf/cppdock:macosx . 25 | 26 | temp: 27 | docker build --force-rm=true -f ./Dockerfile-temp -t ricejasonf/cppdock:temp . 28 | 29 | install: 30 | cp ./cppdock /usr/local/bin/ 31 | 32 | push: cppdock platforms boost 33 | docker push ricejasonf/cppdock && \ 34 | docker push ricejasonf/cppdock:linux_x64 && \ 35 | docker push ricejasonf/cppdock:emscripten && \ 36 | docker push ricejasonf/boost:1_85_0 && \ 37 | docker push ricejasonf/boost_header_only:1_85_0 38 | 39 | # Prebuild dependencies. 40 | boost: 41 | docker buildx build . -f ./recipes_docker/Dockerfile-boost --tag ricejasonf/boost:1_85_0 \ 42 | --target=boost && \ 43 | docker buildx build . -f ./recipes_docker/Dockerfile-boost --tag ricejasonf/boost_header_only:1_85_0 \ 44 | --target=boost_header_only 45 | 46 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CppDock 2 | 3 | >Manage and build C++ dependencies within a Docker container 4 | > 5 | >This project is a work in progress. 6 | 7 | ## Overview 8 | 9 | CppDock is just a tiny python script that uses a JSON config file to specify project specific values and platform specific dependencies based on GitHub. 10 | 11 | In the simple `my_app` example, running `cppdock build linux_x64` will create a Docker image called `my_app_deps:linux_64`. 12 | 13 | ### Lock Library Dependencies to Specific Revisions Automatically 14 | 15 | In a local `cppdock.json` file you can specify repos of libraries and cppdock automatically sets the SHA of the revision so there are no unexpected changes. 16 | 17 | ```json 18 | { 19 | "cppdock": { 20 | "name": "nbdl" 21 | }, 22 | "platforms": { 23 | "develop": { 24 | "type": "linux_x64", 25 | "deps": [ 26 | [ 27 | { 28 | "name": "boostorg/callable_traits", 29 | "tag": "master" 30 | } 31 | ], 32 | ``` 33 | 34 | Run `cppdock init` and cppdock.json is updated in place automatically to the current revision for that branch or tag or `HEAD`. 35 | 36 | ```json 37 | { 38 | "cppdock": { 39 | "name": "nbdl" 40 | }, 41 | "platforms": { 42 | "develop": { 43 | "type": "linux_x64", 44 | "deps": [ 45 | [ 46 | { 47 | "name": "boostorg/callable_traits", 48 | "revision": "684dfbd7dfbdd0438ef3670be10002ca33a71715", 49 | "tag": "master" 50 | } 51 | ], 52 | ``` 53 | 54 | To update a library simply delete the "revision" property. Then run `cppdock init` again. 55 | 56 | The great thing about this is that the builds for each library are cached by Docker so it only rebuilds a library when the revision has changed. 57 | 58 | This also creates a more disciplined approach to dependency management that doesn't rely on third parties creating release tags. 59 | 60 | ### Custom Recipes 61 | 62 | Recipes are just bash scripts that are run in a Docker container. 63 | 64 | The default recipe assumes a typical CMake build: 65 | 66 | ```bash 67 | #!/bin/bash 68 | 69 | mkdir build && cd build \ 70 | && cmake \ 71 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 72 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 73 | -DCMAKE_BUILD_TYPE=Release \ 74 | .. \ 75 | && make install 76 | ``` 77 | 78 | Custom recipes can be placed in your project's `./cppdock_recipes` directory. 79 | 80 | Consider the following as an example describing recipe resolution. 81 | 82 | ``` 83 | ./cppdock_recipes/chriskohlhoff-asio-linux_x64 84 | ./cppdock_recipes/chriskohlhoff-asio 85 | ./cppdock_recipes/default 86 | {Then resolves using builtin recipes} 87 | ``` 88 | 89 | As you can see a platform specific recipe is the first in the order of resolution where the platform in this case is `linux_x64`. 90 | 91 | Note that every build should install to `/opt/install`. 92 | 93 | ### Custom Compilers and SDKs 94 | 95 | CppDock is built specifically for cross-compiling and it supports custom build environments by letting the user specify the Docker image to use for each. 96 | 97 | Consider the following setup for a hypothetical `mydroid` platform: 98 | 99 | ``` 100 | { 101 | "cppdock": { 102 | "name": "nbdl" /* Project name */ 103 | }, 104 | "platforms": { 105 | "develop": { /* The name of the platform */ 106 | "type": "linux_arm", /* The type of the platform (used in recipe name) */ 107 | "base_image": "my_droid_sdk", /* optionally specify name of docker image */ 108 | "deps": [ 109 | [ 110 | { 111 | "name": "boostorg/callable_traits", 112 | "revision": "684dfbd7dfbdd0438ef3670be10002ca33a71715", 113 | "tag": "master" 114 | } 115 | ], 116 | ] 117 | }, 118 | } 119 | } 120 | ``` 121 | 122 | The platform base image should have any build tools required such as CMake or Python as well as an `/opt/toolchain.cmake` if you are relying on any default CMake recipes. 123 | -------------------------------------------------------------------------------- /cppdock: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from collections import OrderedDict 4 | from collections import namedtuple 5 | from uuid import getnode as get_mac 6 | import argparse 7 | import shutil 8 | import glob 9 | import itertools 10 | import json 11 | import os 12 | import re 13 | import string 14 | import subprocess 15 | import sys 16 | import time 17 | from urllib.request import urlopen 18 | import warnings 19 | import zipfile 20 | 21 | root_path = os.path.abspath('.') 22 | default_json_file_path = root_path + '/cppdock.json' 23 | default_dep_path = "https://github.com/" 24 | ServiceFn = namedtuple('ServiceFn', ['nginx_proxy', 'postgres']) 25 | noop = lambda *x: None 26 | 27 | def remove_none(obj): 28 | if isinstance(obj, (list, tuple, set)): 29 | return type(obj)(remove_none(x) for x in obj if x is not None) 30 | elif isinstance(obj, dict): 31 | return type(obj)((remove_none(k), remove_none(v)) 32 | for k, v in obj.items() if k is not None) 33 | else: 34 | return obj 35 | 36 | def get_commands(): 37 | return { 38 | 'init': (command_init, parse_args_init), 39 | 'build': (command_build, parse_args_build), 40 | 'release': (command_release, parse_args_release), 41 | 'gen_certs': (command_gen_certs, parse_args_gen_certs), 42 | 'dev': (command_dev, parse_args_dev), 43 | 'dev_cluster': (command_dev_cluster, parse_args_dev_cluster), 44 | 'dev_registry': (command_dev_registry, parse_args_dev_registry), 45 | 'install_dep': (command_install_dep, parse_args_install_dep), 46 | 'install_src': (command_install_src, parse_args_install_src), 47 | 'init_dev_service': (command_init_dev_service, parse_args_init_dev_service), 48 | 'cluster_build': (command_cluster_build, parse_args_cluster_build), 49 | 'cluster_sync': (command_cluster_sync, parse_args_cluster_sync), 50 | 'cluster_start': (command_cluster_start, parse_args_cluster_start), 51 | 'cluster_stop': (command_cluster_stop, parse_args_cluster_stop), 52 | 'cluster_rm': (command_cluster_rm, parse_args_cluster_rm), 53 | 'clean': (command_clean, parse_args_clean), 54 | '_complete': (command_complete, lambda _ : os.sys.argv[2]), 55 | } 56 | 57 | def get_subcommands(): 58 | return filter(lambda x: not x.startswith('_'), get_commands().keys()) 59 | 60 | def parse_args_command(args): 61 | parser = argparse.ArgumentParser(add_help=False) 62 | parser.add_argument('command', choices = get_commands().keys()) 63 | return parser.parse_args(args) 64 | 65 | def create_args_parser(subcommand): 66 | parser = argparse.ArgumentParser(prog='cppdock ' + subcommand) 67 | parser.add_argument('-f', '--file', dest = 'json_file_path', 68 | default = default_json_file_path, 69 | help = 'Specify path to json file') 70 | return parser 71 | 72 | def parse_args_init(args): 73 | parser = argparse.ArgumentParser(prog='cppdock init') 74 | parser.add_argument('-f', '--file', dest = 'json_file_path', 75 | default = default_json_file_path, 76 | help = 'Specify path to json file') 77 | parser.add_argument('--print-config', 78 | dest = 'print_config', 79 | action = 'store_true', 80 | help = 'Print consolidated config') 81 | 82 | return parser.parse_args(args) 83 | 84 | def parse_args_build(args): 85 | parser = argparse.ArgumentParser(prog='cppdock build') 86 | parser.add_argument('-f', '--file', dest = 'json_file_path', 87 | default = default_json_file_path, 88 | help = 'Specify path to json file') 89 | 90 | parser.add_argument('--print-only', dest = 'print_only', 91 | action = 'store_true', 92 | help = 'Print Dockerfile without building') 93 | parser.add_argument('platform', default = None, 94 | nargs = '?', 95 | help = 'Name of target platform') 96 | 97 | return parser.parse_args(args) 98 | 99 | def parse_args_release(args): 100 | parser = argparse.ArgumentParser(prog='cppdock release') 101 | parser.add_argument('-f', '--file', dest = 'json_file_path', 102 | default = default_json_file_path, 103 | help = 'Specify path to json file') 104 | parser.add_argument('--push', dest = 'push', 105 | default = None, 106 | help = 'Tag name to push') 107 | parser.add_argument('platform', default = None, 108 | nargs = '?', 109 | help = 'Name of target platform') 110 | 111 | return parser.parse_args(args) 112 | 113 | def parse_args_gen_certs(args): 114 | parser = argparse.ArgumentParser(prog='cppdock gen_certs') 115 | parser.add_argument('-f', '--file', dest = 'json_file_path', 116 | default = default_json_file_path, 117 | help = 'Specify path to json file') 118 | parser.add_argument('platform', default = None, 119 | nargs = '?', 120 | help = 'Name of platform') 121 | 122 | return parser.parse_args(args) 123 | 124 | def parse_args_dev(args): 125 | parser = argparse.ArgumentParser(prog='cppdock dev') 126 | parser.add_argument('-f', '--file', dest = 'json_file_path', 127 | default = default_json_file_path, 128 | help = 'Specify path to json file') 129 | parser.add_argument('-p', '--port', dest = 'port', 130 | help = 'Specify port to `docker run`') 131 | parser.add_argument('--target', dest = 'target', 132 | default = None, 133 | help = 'Build target instead of shell') 134 | parser.add_argument('--shell', dest = 'shell', 135 | action = 'store_true', 136 | help = 'Forces shell after target builds') 137 | parser.add_argument('--shell-only', dest = 'shell_only', 138 | action = 'store_true', 139 | help = 'Run only as an interactive shell') 140 | parser.add_argument('--privileged', dest = 'privileged', 141 | action = 'store_true', 142 | help = 'Runs container with all host capabilities') 143 | 144 | group = parser.add_argument_group('required arguments') 145 | group.add_argument('platform', help = 'Name of target platform') 146 | 147 | return parser.parse_args(args) 148 | 149 | def parse_args_dev_cluster(args): 150 | parser = argparse.ArgumentParser(prog='cppdock dev_cluster') 151 | parser.add_argument('-f', '--file', dest = 'json_file_path', 152 | default = default_json_file_path, 153 | help = 'Specify path to json file') 154 | parser.add_argument('--print-only', dest = 'print_only', 155 | action = 'store_true', 156 | help = 'Print Compose file without deploying') 157 | parser.add_argument('--stop', dest = 'stop', 158 | action = 'store_true', 159 | help = 'Stops the dev cluster services') 160 | parser.add_argument('--clean', dest = 'clean', 161 | action = 'store_true', 162 | help = 'Stops the dev cluster services and deletes associated volumes') 163 | return parser.parse_args(args) 164 | 165 | def parse_args_dev_registry(args): 166 | parser = create_args_parser('dev_registry') 167 | parser.add_argument('--stop', dest = 'stop', 168 | action = 'store_true', 169 | help = 'Stops the dev registry') 170 | return parser.parse_args(args) 171 | 172 | def parse_args_install_dep(args): 173 | parser = argparse.ArgumentParser(prog='cppdock install_dep') 174 | group = parser.add_argument_group('required arguments') 175 | group.add_argument('platform', help = 'Token for target platform') 176 | group.add_argument('branch', help = 'SHA1 of git revision') 177 | group.add_argument('repo', help = 'Name of git repo (e.g. boostorg/hana)') 178 | return parser.parse_args(args) 179 | 180 | def parse_args_install_src(args): 181 | parser = argparse.ArgumentParser(prog='cppdock install_src') 182 | group = parser.add_argument_group('required arguments') 183 | group.add_argument('branch', help = 'SHA1 of git revision') 184 | group.add_argument('repo', help = 'Name of git repo (e.g. llvm-mirror/clang)') 185 | group.add_argument('dest_dir', help = 'Path to copy source files to') 186 | group = parser.add_argument_group('optional arguments') 187 | group.add_argument('src_dir', nargs='?', 188 | default='.', 189 | help = 'Path of subdirectory to copy from repo') 190 | return parser.parse_args(args) 191 | 192 | def parse_args_init_dev_service(args): 193 | parser = argparse.ArgumentParser(prog='cppdock init_dev_service') 194 | parser.add_argument('--shell-only', dest = 'shell_only', 195 | action = 'store_true', 196 | help = 'Run only as an interactive shell') 197 | parser.add_argument('--shell', dest = 'shell', 198 | action = 'store_true', 199 | help = 'Run as an interactive shell') 200 | parser.add_argument('--no-service', dest = 'keep_alive', 201 | default = True, 202 | action = 'store_false', 203 | help = 'Terminate container when process completes') 204 | group = parser.add_argument_group('required arguments') 205 | group.add_argument('build_type', help = 'Build type') 206 | group.add_argument('target', help = 'Build target name') 207 | 208 | return parser.parse_args(args) 209 | 210 | def parse_args_cluster_build(args): 211 | parser = create_args_parser('cluster_build') 212 | return parser.parse_args(args) 213 | 214 | def parse_args_cluster_sync(args): 215 | parser = create_args_parser('cluster_sync') 216 | return parser.parse_args(args) 217 | 218 | def parse_args_cluster_start(args): 219 | parser = create_args_parser('cluster_start') 220 | return parser.parse_args(args) 221 | 222 | def parse_args_cluster_stop(args): 223 | parser = create_args_parser('cluster_stop') 224 | return parser.parse_args(args) 225 | 226 | def parse_args_cluster_rm(args): 227 | parser = create_args_parser('cluster_rm') 228 | return parser.parse_args(args) 229 | 230 | def parse_args_clean(args): 231 | parser = create_args_parser('clean') 232 | group = parser.add_argument_group('required arguments') 233 | group.add_argument('clean_type', help = 'Type to specify subset of files or images to delete', 234 | choices = ['build', 'install', 'images', 'all']) 235 | parser.add_argument('platform', default = None, 236 | nargs = '?', 237 | help = 'Name of target platform') 238 | return parser.parse_args(args) 239 | 240 | def dispatch(command_args, cli_args): 241 | command = parse_args_command(command_args).command 242 | commands = get_commands() 243 | if command in commands: 244 | args = commands[command][1](cli_args) 245 | commands[command][0](args) 246 | 247 | def command_complete(args_str): 248 | subcommands = get_subcommands() 249 | 250 | args = args_str.split(); 251 | has_trailing_space = args_str.endswith(' ') 252 | # length includes a trailing empty string if there is a trailing space 253 | if args_str.endswith(' '): 254 | args.append('') 255 | # filter out completed options 256 | args = filter(lambda x: not x.startswith('-'), args[:-1]) + [args[-1]] 257 | length = len(args) 258 | 259 | if length < 2: 260 | # should not be possible from the completion script 261 | return 262 | 263 | candidates = [] 264 | completion = '' 265 | 266 | if length == 2: 267 | # complete subcommand 268 | completion = args[1] 269 | candidates = subcommands 270 | elif length == 3: 271 | # complete platform 272 | subcommand = args[1] 273 | if subcommand in ['build', 'dev', 'release']: 274 | completion = args[2] 275 | config = load_config_file(default_json_file_path) 276 | candidates = get_all_platforms(config) 277 | if subcommand == 'clean': 278 | completion = args[2] 279 | candidates = ['all', 'build', 'install', 'release'] 280 | elif length == 4: 281 | subcommand = args[1] 282 | if subcommand in ['clean']: 283 | completion = args[3] 284 | config = load_config_file(default_json_file_path) 285 | candidates = get_all_platforms(config) 286 | 287 | candidates = filter(lambda x: x.startswith(completion), candidates) 288 | print(' '.join(candidates)) 289 | 290 | def command_init(args): 291 | config = load_config_file_with_args(args) 292 | if args.print_config: 293 | print(json.dumps(config, indent=2, separators=(',', ': '))) 294 | 295 | def command_build(args): 296 | config = load_config_file_with_args(args) 297 | set_master_machine(config) 298 | 299 | platforms = [args.platform] if args.platform else get_all_platforms(config) 300 | for platform in platforms: 301 | make_build_image(config, platform, args.print_only) 302 | 303 | def command_release(args): 304 | config = load_config_file_with_args(args) 305 | config['__default_build_type'] = 'release' 306 | set_master_machine(config) 307 | print_only = False 308 | push_tag_name = args.push 309 | 310 | platforms = [args.platform] if args.platform else get_all_platforms(config) 311 | 312 | if push_tag_name and len(platforms) > 1: 313 | raise StandardError( 314 | "The `push` option is not allowed with multiple platforms") 315 | 316 | for platform in platforms: 317 | make_build_image(config, platform, print_only) 318 | install_dir = create_install_dir(config, platform) 319 | run_build_image_install(config, platform, install_dir) 320 | make_release_image(config, platform, install_dir) 321 | if push_tag_name: 322 | push_release_image(config, platform, push_tag_name) 323 | 324 | 325 | def make_build_image(config, platform_name, print_only = False): 326 | if is_platform_service(config, platform_name): 327 | fn = make_service_image_fn(config, platform_name) 328 | fn(config, platform_name, print_only) 329 | return 330 | 331 | make_base_image(config, platform_name, print_only) 332 | dockerfile = make_deps_dockerfile(config, platform_name) 333 | tag_name = get_build_tag_name(config, platform_name) 334 | make_docker_image(tag_name, dockerfile, print_only) 335 | 336 | def make_release_image(config, platform_name, context_dir): 337 | if is_platform_service(config, platform_name): 338 | return 339 | print_only = False 340 | build_tag_name = get_build_tag_name(config, platform_name) 341 | release_tag_name = get_release_tag_name(config, platform_name) 342 | dockerfile = make_release_dockerfile(config, platform_name, 343 | build_tag_name) 344 | if is_cluster_mode(config): 345 | machine_name = get_master_machine_name(config) 346 | make_docker_image_remote(machine_name, release_tag_name, dockerfile, 347 | context_dir) 348 | else: 349 | make_docker_image(release_tag_name, dockerfile, print_only, context_dir) 350 | return release_tag_name 351 | 352 | def make_release_dockerfile(config, platform_name, build_image): 353 | base_image = "ubuntu:bionic" # TODO use config or something 354 | 355 | return """ 356 | FROM {base_image} 357 | COPY ./ /opt/install 358 | WORKDIR /opt/install 359 | """.format(base_image = base_image) 360 | 361 | def push_release_image(config, platform_name, tag_name): 362 | image_name = get_release_tag_name(config, platform_name) 363 | subprocess.check_output(['docker', 'tag', image_name, tag_name]) 364 | subprocess.check_output(['docker', 'push', tag_name]) 365 | 366 | def make_service_image_fn(config, platform_name): 367 | service_type = get_service_type(config, platform_name) 368 | fn = ServiceFn(nginx_proxy = make_nginx_proxy_image, 369 | postgres = noop) 370 | return getattr(fn, service_type) 371 | 372 | def get_service_image_tag_name(config, platform_name): 373 | # this gets the image name to push to registry 374 | # or None if not needed 375 | fn = make_service_image_fn(config, platform_name) 376 | if fn is noop: 377 | return None 378 | return get_image_tag_name(config, platform_name, 'service') 379 | 380 | def command_gen_certs(args): 381 | config = load_config_file_with_args(args) 382 | if args.platform: 383 | domain = get_domain(config, args.platform) 384 | else: 385 | domain = config['cppdock'].get('domain') 386 | 387 | assert domain != None, "Config file must specify domain name" 388 | import_ssl_certificate(*generate_self_signed_ssl_certificate(domain)) 389 | 390 | def make_nginx_proxy_image(config, platform_name, print_only): 391 | tag_name = get_image_tag_name(config, platform_name, 'service') 392 | conf_data = get_nginx_proxy_conf(config, platform_name) 393 | dockerfile = """ 394 | FROM nginx:alpine 395 | RUN echo $'{conf_data}' > /etc/nginx/nginx.conf 396 | """.format(conf_data = conf_data.replace('\n', '\\n\\\n')) 397 | 398 | make_docker_image(tag_name, dockerfile, print_only) 399 | 400 | def command_dev(args): 401 | config = load_config_file_with_args(args) 402 | set_master_machine(config) 403 | platform_name = args.platform 404 | ports_arg = ' -p=' + args.port if hasattr(args, 'port') and args.port else None 405 | print_only = False 406 | make_build_image(config, platform_name, print_only) 407 | cluster_sync_source(config) 408 | run_build_image(config, platform_name, args.target, args.shell, 409 | args.shell_only, args.privileged, ports_arg) 410 | 411 | def command_dev_cluster(args): 412 | config = load_config_file_with_args(args) 413 | set_master_machine(config) 414 | 415 | if (args.stop or args.clean): 416 | dev_cluster_stop(config) 417 | if (args.clean): 418 | dev_cluster_clean_containers(config) 419 | dev_cluster_clean_volumes(config) 420 | elif (args.print_only): 421 | print(get_dev_compose_file(config)) 422 | else: 423 | # make the build images 424 | [make_build_image(config, p) for p in get_all_platforms(config)] 425 | 426 | if is_cluster_mode(config): 427 | print("Running in CLUSTER mode...") 428 | else: 429 | print("Running in LOCAL mode...") 430 | 431 | # tags, and pushes the dev images which should be to the dev_registry 432 | for platform_name in get_all_platforms(config): 433 | if is_platform_service(config, platform_name): 434 | tag_name = get_service_image_tag_name(config, platform_name) 435 | if tag_name: 436 | push_docker_image(config, tag_name) 437 | else: 438 | tag_name = get_build_tag_name(config, platform_name) 439 | push_docker_image(config, tag_name) 440 | 441 | cluster_sync_source(config) 442 | 443 | stack_name = get_stack_name(config) 444 | run_stack_deploy(config, stack_name) 445 | 446 | print("Finished deploying {stack_name}\n".format(stack_name = stack_name)) 447 | dev_service_name = subprocess.check_output(['docker', 'ps', '-q', '-f name=servicename']) 448 | 449 | def command_dev_registry(args): 450 | config = load_config_file_with_args(args) 451 | set_master_machine(config) 452 | 453 | if args.stop: 454 | dev_registry_rm() 455 | else: 456 | dev_registry_create() 457 | 458 | def command_install_dep(args): 459 | # This called inside a container so config is not allowed 460 | # Even if we add a merged config file to the container we 461 | # do not want it available at this stage or things will 462 | # be rebuilt for every config change 463 | check_sysroot() 464 | url = make_archive_url(args.repo, args.branch) 465 | input = urlopen(url) 466 | output = open('dep.zip', 'wb') 467 | buf = '' 468 | while True: 469 | buf = input.read(800) 470 | output.write(buf) 471 | output.flush() 472 | if (len(buf) == 0): 473 | break 474 | output.close() 475 | 476 | assert os.path.isfile('dep.zip') 477 | assert zipfile.is_zipfile('dep.zip') 478 | 479 | zipfile.ZipFile('dep.zip', 'r').extractall() 480 | recipe = find_recipe(args.platform, args.repo) 481 | cwd_dir = [x for x in os.listdir('.') if x.endswith(args.branch)][0] 482 | p = subprocess.Popen([recipe], cwd = cwd_dir) 483 | p.communicate('') 484 | if p.returncode != 0: 485 | print(""" 486 | 487 | FAILURE: Recipe returned error code {0}. 488 | """.format(p.returncode)) 489 | sys.exit(1) 490 | 491 | def command_install_src(args): 492 | url = make_archive_url(args.repo, args.branch) 493 | input = urlopen(url) 494 | output = open('dep.zip', 'wb') 495 | buf = '' 496 | while True: 497 | buf = input.read(800) 498 | output.write(buf) 499 | output.flush() 500 | if (len(buf) == 0): 501 | break 502 | output.close() 503 | 504 | assert os.path.isfile('dep.zip') 505 | assert zipfile.is_zipfile('dep.zip') 506 | 507 | zipfile.ZipFile('dep.zip', 'r').extractall() 508 | base_src_dir = [x for x in os.listdir('.') if x.endswith(args.branch)][0] 509 | src_dir = os.path.join(base_src_dir, args.src_dir); 510 | shutil.copytree(src_dir, args.dest_dir) 511 | shutil.rmtree(base_src_dir); 512 | 513 | def command_init_dev_service(args): 514 | # command run in container so config is not allowed 515 | check_sysroot() 516 | p = run_dev_service_target(args.build_type, args.target, 517 | args.shell_only, args.shell, 518 | args.keep_alive) 519 | if p: 520 | sys.exit(p.returncode) 521 | 522 | def command_cluster_build(args): 523 | config = load_config_file_with_args(args) 524 | machine_create_all(config) 525 | cluster_sync_source_init(config) 526 | cluster_sync_source(config) 527 | 528 | def command_cluster_sync(args): 529 | config = load_config_file_with_args(args) 530 | cluster_sync_source(config) 531 | 532 | def command_cluster_start(args): 533 | config = load_config_file_with_args(args) 534 | machine_start_all(config) 535 | 536 | def command_cluster_stop(args): 537 | config = load_config_file_with_args(args) 538 | machine_stop_all(config) 539 | 540 | def command_cluster_rm(args): 541 | config = load_config_file_with_args(args) 542 | machine_rm_all(config) 543 | 544 | def command_clean(args): 545 | # Use `cppdock dev_cluster --stop` for deployed stacks. 546 | config = load_config_file_with_args(args) 547 | set_master_machine(config) 548 | platforms = [args.platform] if args.platform else get_all_platforms(config) 549 | clean_types = [args.clean_type] if args.clean_type != 'all' else ['install', 550 | 'build', 551 | 'images'] 552 | for clean_type in clean_types: 553 | for platform_name in platforms: 554 | if clean_type == 'install': 555 | # remove the host machine install directory 556 | install_dir = get_install_path(config, platform_name) 557 | clean_install_files(config, platform_name, install_dir) 558 | remove_install_dir(config, install_dir) 559 | elif clean_type == 'build': 560 | # remove the build volume 561 | subprocess.check_output(['docker', 'volume', 'rm', '--force', 562 | get_volume_name(config, platform_name, 'release'), 563 | get_volume_name(config, platform_name, 'debug')]) 564 | elif clean_type == 'images': 565 | # remove all build and release images 566 | tag_names = [get_build_tag_name(config, platform_name), 567 | get_base_tag_name(config, platform_name), 568 | get_release_tag_name(config, platform_name)] 569 | subprocess.check_output(['docker', 'image', 'rm', '--force', 570 | get_build_tag_name(config, platform_name), 571 | get_base_tag_name(config, platform_name), 572 | get_release_tag_name(config, platform_name)]) 573 | 574 | else: 575 | raise ValueError('Unsupported clean type') 576 | 577 | def install_deps_platform(platform, items): 578 | for i in range(len(items)): 579 | repo, branch = items[i] 580 | branch = branch[0:40] 581 | install_dep(i, platform, repo, branch) 582 | 583 | def load_config_file_with_args(args): 584 | json_file_path = args.json_file_path 585 | return load_config_file(json_file_path) 586 | 587 | def load_config_file(json_file_path): 588 | config_paths = [] 589 | config_paths.insert(0, json_file_path) 590 | config = load_config_file_json(json_file_path) 591 | 592 | config = lock_config_file_deps(config) 593 | write_config_file(config, json_file_path) 594 | 595 | config = normalize_config_paths(config) 596 | config = merge_base_config(config, config_paths) 597 | config = process_config_replace_deps(config) 598 | 599 | source = config.get('source', {}) 600 | config['source'] = source 601 | source['allow_sync'] = calculate_allow_sync(config) 602 | source["path"] = calculate_source_path(config, config_paths) 603 | 604 | cppdock = config['cppdock'] 605 | cppdock["cppdock_image"] = cppdock.get("cppdock_image", 606 | "ricejasonf/cppdock") 607 | config["machine_list"] = get_machine_list(config) 608 | return config 609 | 610 | def load_config_file_json(json_file_path): 611 | if os.path.isdir(json_file_path): 612 | json_file_path = os.path.join(json_file_path, 'cppdock.json') 613 | 614 | if not os.path.isfile(json_file_path): 615 | raise ValueError('Config file not found: ' + json_file_path) 616 | stream = open(json_file_path, 'r') 617 | config = json.load(stream, object_pairs_hook=OrderedDict) 618 | 619 | # validate 620 | assert "cppdock" in config, "Config must have cppdock section" 621 | assert "name" in config["cppdock"], "Config must have 'name' in cppdock section" 622 | 623 | return config 624 | 625 | def normalize_config_paths(config): 626 | # this should happen after writing revision locks 627 | cppdock = config["cppdock"] 628 | source = config.get("source") 629 | if "base_config" in cppdock: 630 | cppdock["base_config"] = os.path.abspath(cppdock["base_config"]) 631 | if source and "path" in source and source["path"] != None: 632 | source["path"] = os.path.abspath(source["path"]) 633 | 634 | return config 635 | 636 | def write_config_file(config, json_file_path): 637 | try: 638 | with open(json_file_path, 'w') as fh: 639 | json.dump(config, fh, indent=2, separators=(',', ': ')) 640 | fh.write('\n') 641 | except: 642 | warnings.warn('WARNING: Unable to write to json file') 643 | 644 | def merge_base_config(config, config_paths): 645 | if "base_config" not in config["cppdock"]: 646 | return config 647 | base_config_path = os.path.abspath(config["cppdock"]["base_config"]) 648 | del config["cppdock"]["base_config"] 649 | config_paths.insert(0, base_config_path) 650 | base_config = load_config_file_json(base_config_path) 651 | 652 | # merge cppdock section 653 | cppdock_section = {} 654 | cppdock_section.update(base_config["cppdock"]) 655 | cppdock_section.update(config["cppdock"]) 656 | config["cppdock"] = cppdock_section 657 | 658 | # only include platforms specified in current config 659 | platforms = {} 660 | 661 | for platform_name in config.get("platforms", {}): 662 | platforms[platform_name] = merge_platform_config(platform_name, base_config, config) 663 | 664 | config["platforms"] = platforms 665 | return config 666 | 667 | def merge_platform_config(platform_name, base_config, config): 668 | result = {} 669 | platform = config["platforms"].get(platform_name, {}) 670 | base_platform = base_config.get("platforms", {}).get(platform_name, {}) 671 | 672 | result.update(base_platform) 673 | result.update(platform) 674 | if config.get('deps') or base_config.get('deps'): 675 | result["deps"] = [] 676 | result["deps"] += base_platform.get("deps", []) 677 | result["deps"] += platform.get("deps", []) 678 | return result 679 | 680 | def process_config_replace_deps(config): 681 | # TODO read "replace_deps" to replace all deps with a 682 | # specified name or a dep with a specific id 683 | # not sure if this is desirable yet 684 | return config 685 | 686 | def lock_config_file_deps(config): 687 | if 'platforms' not in config: 688 | return config 689 | for platform, settings in config['platforms'].items(): 690 | check_base_image_conflict(settings) 691 | 692 | for stage in settings.get('deps', []): 693 | if not isinstance(stage, list): 694 | raise "Each stage in 'deps' must be an array." 695 | for item in stage: 696 | item = lock_item_json(item) 697 | 698 | return config 699 | 700 | def lock_item_json(item): 701 | current_sha = get_current_sha_with_comment(item) 702 | item.update(revision = current_sha) 703 | 704 | return item 705 | 706 | def get_current_sha_with_comment(item): 707 | repo = item['name'] 708 | tag = item.get('tag') 709 | source = item.get('source') 710 | 711 | if 'revision' in item: 712 | return item['revision'] 713 | else: 714 | if source == 'docker': 715 | tag = tag or 'latest' 716 | repo_tag = repo + ":" + tag 717 | 718 | try: 719 | # pull the image from the registry to get the RepoDigest 720 | subprocess.check_output(['docker', 'pull', repo_tag]) 721 | except: 722 | raise RuntimeError("Unable to pull docker image from registry: " + repo_tag ) 723 | 724 | try: 725 | inspect_result = subprocess.check_output(['docker', 'inspect', repo_tag]) 726 | digest = json.loads(inspect_result)[0]['RepoDigests'][0].split('@')[1] 727 | except: 728 | raise RuntimeError("Unable to get SHA from docker image") 729 | 730 | return digest 731 | else: 732 | tag = normalize_branch_name(tag) 733 | lines = subprocess.check_output(['git', 'ls-remote', normalize_repo_name(repo)]).splitlines() 734 | for line in lines: 735 | if line.endswith(tag): 736 | return line[0:40] 737 | raise RuntimeError("Unable to get SHA from remote git repository") 738 | 739 | def normalize_repo_name(name): 740 | return default_dep_path + name; 741 | 742 | def normalize_branch_name(name): 743 | if name == None or len(name) == 0 or name == 'HEAD': 744 | return 'HEAD' 745 | elif name.startswith('refs/'): 746 | return name 747 | else: 748 | return "refs/heads/" + name 749 | 750 | def is_sha(name): 751 | return len(name) == 40 752 | 753 | def check_base_image_conflict(settings): 754 | if 'base_image' in settings and 'base_image_dockerfile' in settings: 755 | raise StandardError("Conflicting base image settings detected") 756 | 757 | def get_max_key_length(config): 758 | length = 0 759 | for section in config.sections(): 760 | for item in config.items(section): 761 | len_ = len(item[0]) 762 | if (len_ > length): 763 | length = len_ 764 | return length 765 | 766 | def make_archive_url(repo, branch): 767 | return 'https://github.com/{0}/archive/{1}.zip'.format(repo, branch) 768 | 769 | def get_base_image_dockerfile(config, platform_name): 770 | return config['platforms'][platform_name].get('base_image_dockerfile') 771 | 772 | def get_user_base_tag_name(config, platform_name): 773 | if get_base_image_dockerfile(config, platform_name): 774 | return get_image_tag_name(config, platform_name, 'user_base') 775 | 776 | tag_name = config['platforms'][platform_name].get('base_image') 777 | if tag_name: 778 | return tag_name 779 | 780 | platform_type = get_platform_type(config, platform_name) 781 | return get_config_option(config, 'platform_' + platform_type) 782 | 783 | def get_base_image_dockerfile_data(config, platform_name): 784 | tag_name = get_user_base_tag_name(config, platform_name) 785 | cppdock_tag_name = config['cppdock']['cppdock_image'] 786 | 787 | return """ 788 | FROM {tag_name} 789 | # installs cppdock in base image 790 | COPY --from={cppdock_tag_name} /opt/install/ /usr/local 791 | COPY --from={cppdock_tag_name} /root/.cppdock_recipes/ /root/.cppdock_recipes 792 | RUN mkdir -p /opt/install && \ 793 | mkdir -p /opt/build && \ 794 | touch /opt/toolchain.cmake 795 | """.format(tag_name = tag_name, 796 | cppdock_tag_name = cppdock_tag_name) 797 | 798 | def make_base_image(config, platform_name, print_only): 799 | make_base_image_dockerfile(config, platform_name, print_only) 800 | dockerfile = get_base_image_dockerfile_data(config, platform_name) 801 | tag_name = get_base_tag_name(config, platform_name) 802 | make_docker_image(tag_name, dockerfile, print_only) 803 | 804 | def make_base_image_dockerfile(config, platform_name, print_only): 805 | base_image_dockerfile = get_base_image_dockerfile(config, platform_name) 806 | 807 | if base_image_dockerfile: 808 | dockerfile = "./{0}".format(base_image_dockerfile) 809 | tag_name = get_user_base_tag_name(config, platform_name) 810 | 811 | with open(dockerfile, 'r') as file: 812 | dockerfile_data = file.read() 813 | make_docker_image(tag_name, dockerfile_data, print_only) 814 | 815 | def find_recipe(platform, repo): 816 | # make a flat list of recipe file paths 817 | # to search linearly 818 | base_paths = ['/opt/cppdock_recipes', 819 | '/opt/sysroot/cppdock_recipes', 820 | '/root/.cppdock_recipes'] 821 | 822 | repo = repo.replace('/', '-') 823 | repo_with_platform = repo + '-' + platform 824 | default_with_platform = 'default' + '-' + platform 825 | file_names = [repo_with_platform, 826 | repo, 827 | default_with_platform, 828 | 'default'] 829 | 830 | product = itertools.product(base_paths, file_names) 831 | xs = [os.path.join(a, b) for (a, b) in product] 832 | for x in xs: 833 | if is_verbose_mode(): 834 | print("Trying Recipe Location: {0}".format(x)) 835 | if os.path.isfile(x): 836 | return x 837 | raise ValueError('Unable to find cppdock recipe: ' + repo_with_platform) 838 | 839 | def check_sysroot(): 840 | # This should only be called inside a build container 841 | if not os.path.exists('/opt/sysroot'): 842 | # implicitly create /opt/sysroot if none exists 843 | os.symlink('/usr/local', '/opt/sysroot') 844 | assert os.path.isdir('/opt/sysroot/') 845 | 846 | def get_arg(args, i): 847 | next(iter(args[i:i+1]), None) 848 | 849 | def get_config_option(config, name): 850 | defaults = { 851 | 'project': None, 852 | 'platform_linux_x64': 'ricejasonf/cppdock:linux_x64', 853 | 'platform_emscripten': 'ricejasonf/cppdock:emscripten', 854 | 'platform_tvossimulator': 'ricejasonf/cppdock:tvossimulator', 855 | } 856 | value = None 857 | 858 | if name in defaults: 859 | value = defaults[name] 860 | if value == None: 861 | raise ValueError('Config option has no default for "{0}"'.format(name)) 862 | return value 863 | 864 | def make_docker_image(tag_name, dockerfile_data, print_only = False, context_dir = '.'): 865 | if print_only: 866 | print('###### Docker Image: ' + tag_name + ' ######') 867 | print(dockerfile_data) 868 | return 869 | p = subprocess.Popen(['docker', 'buildx', 'build', '--tag=' + tag_name, 870 | '--file=-', context_dir], stdin=subprocess.PIPE) 871 | out, err = p.communicate(bytes(dockerfile_data, 'utf-8')) 872 | if p.returncode == 0: 873 | print("Finished building {0}.".format(tag_name)) 874 | else: 875 | print(""" 876 | 877 | FAILURE: Build of {0} FAILED. 878 | 879 | """.format(tag_name)) 880 | sys.exit(1) 881 | 882 | return (p.returncode, out, err) 883 | 884 | def make_docker_image_remote(machine_name, tag_name, dockerfile_data, context_dir): 885 | # runs docker build on specifed machine 886 | cmd = ['docker', 'buildx', 'build', '--tag=' + tag_name, '--file=-', 887 | context_dir] 888 | cmd = ['docker-machine', 'ssh', machine_name, 'sudo'] + cmd 889 | p = subprocess.Popen(cmd, stdin=subprocess.PIPE) 890 | out, err = p.communicate(bytes(dockerfile_data, 'utf-8')) 891 | if p.returncode == 0: 892 | print("Finished building {0}.".format(tag_name)) 893 | else: 894 | print(""" 895 | 896 | FAILURE: Build of {0} FAILED. 897 | 898 | """.format(tag_name)) 899 | sys.exit(1) 900 | 901 | return (p.returncode, out, err) 902 | 903 | def push_docker_image(config, tag_name): 904 | # tags with registry and pushes 905 | registry = get_registry(config) 906 | rtag_name = registry + '/' + tag_name 907 | print("Pushing {}".format(rtag_name)) 908 | subprocess.check_output(['docker', 'tag', tag_name, rtag_name]) 909 | subprocess.check_output(['docker', 'push', rtag_name]) 910 | print("Finished pushing {0}".format(rtag_name)) 911 | return rtag_name 912 | 913 | def get_platform_type(config, platform_name): 914 | try: 915 | return config['platforms'][platform_name]['type'] 916 | except: 917 | raise ValueError('Platform name `{0}` not found.'.format(platform_name)) 918 | 919 | # is_platform_service(config, platform_name) 920 | # is_platform_service(platform_type) 921 | def is_platform_service(*argv): 922 | if len(argv) == 1: 923 | [platform_type] = argv 924 | elif len(argv) == 2: 925 | [config, platform_name] = argv 926 | platform_type = get_platform_type(config, platform_name) 927 | return platform_type == 'service' or platform_type.startswith('service:') 928 | 929 | def get_project_name(config): 930 | return config['cppdock']['name'] 931 | 932 | def make_deps_dockerfile(config, platform_name): 933 | platform_type = get_platform_type(config, platform_name) 934 | # move this to be used when building the base image 935 | #base_image = get_base_image(config, platform_name) or get_config_option(config, 'platform_' + platform_type) 936 | base_image = get_base_tag_name(config, platform_name) 937 | deps = get_deps_from_config(config, platform_name) 938 | deps_stages = '' 939 | deps_imports = '' 940 | 941 | build_type = get_build_type(config, platform_name) 942 | target = get_dev_service_target(config, platform_name) 943 | 944 | dev_service_init = get_dev_service_init(config, platform_name) 945 | copy_dev_service_init = '' 946 | if dev_service_init: 947 | copy_dev_service_init = "COPY {} /opt/dev_service_init".format( 948 | dev_service_init) 949 | 950 | for dep in deps: 951 | deps_stages += " ".join([make_deps_stage(base_image, platform_name, dep)]) 952 | deps_imports += make_deps_import(platform_name, dep[-1][2]) 953 | 954 | return """ 955 | {deps_stages} 956 | FROM {base_image} 957 | {deps_imports} 958 | {copy_dev_service_init} 959 | WORKDIR /opt/build 960 | CMD cppdock init_dev_service --shell-only {build_type} {target} 961 | """.format(deps_stages = deps_stages, 962 | base_image = base_image, 963 | deps_imports = deps_imports, 964 | copy_dev_service_init = copy_dev_service_init, 965 | build_type = build_type, 966 | target = target) 967 | return 968 | 969 | def make_deps_stage(base_image, platform_name, deps): 970 | copy_recipes_term = "" 971 | build_image = base_image 972 | 973 | # only one dep that is source: docker does not use make_install_dep 974 | 975 | docker_image_digest = get_stage_docker_image_digest(deps) 976 | if docker_image_digest is not None: 977 | build_image = docker_image_digest 978 | install_deps = '' 979 | else: 980 | install_deps = " ".join([make_install_dep(x, y, z, zz) for x, y, z, zz in deps]) 981 | 982 | revision = deps[-1][2].replace('/', '_') 983 | 984 | return """ 985 | FROM {0} as build_{2}_{1} 986 | {4} 987 | WORKDIR /usr/local/src 988 | {3} 989 | """.format(build_image, platform_name, revision, install_deps, copy_recipes_term) 990 | 991 | def get_stage_docker_image_digest(stage_deps): 992 | if len(stage_deps) == 1: 993 | for platform, revision, repo, source in stage_deps: 994 | if source == 'docker': 995 | # repo@revision 996 | return '{0}@{1}'.format(repo, revision) 997 | return None 998 | 999 | 1000 | def make_install_dep(platform, revision, repo, source=''): 1001 | if source == 'docker': 1002 | return """ 1003 | COPY --from={0}@{1} /opt/install /opt/sysroot 1004 | COPY --from={0}@{1} /opt/install /opt/install""".format(repo, revision) 1005 | 1006 | if not is_sha(revision): 1007 | return '' 1008 | repo_recipe_prefix = repo.replace('/', '-') 1009 | copy_local_recipe = '' 1010 | if os.path.isfile(os.path.join(root_path, 'cppdock_recipes', repo_recipe_prefix + '-' + platform)): 1011 | copy_local_recipe = """ 1012 | COPY cppdock_recipes/{0}-{1} /opt/cppdock_recipes/""".format(repo_recipe_prefix, platform) 1013 | elif os.path.isfile(os.path.join(root_path, 'cppdock_recipes', repo_recipe_prefix)): 1014 | copy_local_recipe = """ 1015 | COPY cppdock_recipes/{0} /opt/cppdock_recipes/""".format(repo_recipe_prefix) 1016 | 1017 | return """{3} 1018 | RUN cppdock install_dep {0} {1} {2}""".format(platform, revision, repo, copy_local_recipe) 1019 | 1020 | def make_deps_import(platform, revision): 1021 | return """ 1022 | COPY --from=build_{1}_{0} /opt/install/ /opt/sysroot""".format(platform, revision.replace('/', '_')) 1023 | 1024 | def get_deps_from_config(config, platform_name): 1025 | platform = config['platforms'][platform_name] 1026 | 1027 | return [get_deps_stage_items(stage, platform['type']) \ 1028 | for stage in platform.get('deps', [])] 1029 | 1030 | raise ValueError('Platform is not specified in cppdock json file') 1031 | 1032 | def get_deps_stage_items(deps_stage, platform_type): 1033 | return [(platform_type, item['revision'], item['name'], item.get('source')) for item in deps_stage] 1034 | 1035 | def get_run_options_mount_source(config): 1036 | src_path = get_build_source_path(config) 1037 | return ['--mount', 'type=bind,source=' + src_path + ',target=/opt/src,readonly'] 1038 | 1039 | def get_run_options_mount_build(config, platform_name): 1040 | volume_name = get_volume_name(config, platform_name) 1041 | return ['--volume', '{}:/opt/build'.format(volume_name)] 1042 | 1043 | def get_run_options_mount_install(config, platform_name, install_dir): 1044 | return ['--mount', 'type=bind,source=' + install_dir + ',target=/opt/install'] 1045 | 1046 | def run_build_image(config, platform_name, target, shell, shell_only, privileged, ports_arg): 1047 | tag_name = get_build_tag_name(config, platform_name) 1048 | 1049 | cmd = [] 1050 | shell_op = '--shell' if shell else None; 1051 | shell_only_op = '--shell-only' if shell_only else None; 1052 | privileged_op = '--privileged' if privileged else None; 1053 | if target: 1054 | build_type = get_build_type(config, platform_name) 1055 | cmd = ["cppdock", "init_dev_service", build_type, target, shell_op, 1056 | shell_only_op, '--no-service'] 1057 | mount_build = get_run_options_mount_build(config, platform_name) 1058 | mount_src = get_run_options_mount_source(config) 1059 | 1060 | subprocess.call(remove_none(['docker', 'run', '--rm', '-it', ports_arg, privileged_op] 1061 | + mount_src 1062 | + mount_build 1063 | + [ tag_name ] 1064 | + cmd)) 1065 | 1066 | def run_build_image_install(config, platform_name, install_dir): 1067 | tag_name = get_build_tag_name(config, platform_name) 1068 | src_path = get_build_source_path(config) 1069 | target = get_release_target(config, platform_name) 1070 | build_type = get_build_type(config, platform_name) 1071 | 1072 | cmd = ["cppdock", "init_dev_service", build_type, target, '--no-service'] 1073 | 1074 | mount_build = get_run_options_mount_build(config, platform_name) 1075 | mount_src = get_run_options_mount_source(config) 1076 | mount_install = get_run_options_mount_install(config, platform_name, install_dir) 1077 | returncode = subprocess.call(remove_none(['docker', 'run', '--rm'] 1078 | + mount_src 1079 | + mount_build 1080 | + mount_install 1081 | + [ tag_name ] 1082 | + cmd)) 1083 | if returncode != 0: 1084 | sys.exit(1) 1085 | 1086 | def get_install_path(config, platform): 1087 | return '/tmp/{}.install'.format(get_release_tag_name(config, platform)) 1088 | 1089 | def create_install_dir(config, platform): 1090 | path = get_install_path(config, platform) 1091 | cmd = ['mkdir', '-p', path] 1092 | 1093 | if is_cluster_mode(config): 1094 | name = get_master_machine_name(config) 1095 | cmd = ['docker-machine', 'ssh', name] + cmd 1096 | 1097 | p = subprocess.Popen(cmd, stdin = subprocess.PIPE) 1098 | p.communicate() 1099 | 1100 | if p.returncode != 0: 1101 | raise RuntimeError( 1102 | 'Failed to create install directory: {}'.format(path)) 1103 | return path 1104 | 1105 | def remove_install_dir(config, path): 1106 | assert path.startswith('/tmp/') and path.endswith('.install'), "Must only delete valid install path" 1107 | if not os.path.isdir(path): 1108 | return 1109 | cmd = ['rmdir', path] 1110 | 1111 | if is_cluster_mode(config): 1112 | name = get_master_machine_name(config) 1113 | cmd = ['docker-machine', 'ssh', name] + cmd 1114 | 1115 | p = subprocess.Popen(cmd, stdin = subprocess.PIPE) 1116 | p.communicate() 1117 | 1118 | if p.returncode != 0: 1119 | raise RuntimeError( 1120 | 'Failed to remove install directory: {}'.format(path)) 1121 | return path 1122 | 1123 | def clean_install_files(config, platform_name, install_dir): 1124 | # We have to delete the installs files in a container 1125 | # because the cppdock user wont have to permissions 1126 | if not os.path.isdir(install_dir): 1127 | return 1128 | base_image = config['cppdock']['cppdock_image'] 1129 | assert install_dir.startswith('/tmp/') and install_dir.endswith('.install'), "Must only delete valid install path" 1130 | cmd = ['/bin/bash', '-c', 'rm -rf /opt/install/*'] 1131 | mount_install = get_run_options_mount_install(config, platform_name, install_dir) 1132 | cmd = remove_none(['docker', 'run', '--rm'] 1133 | + mount_install 1134 | + [ base_image ] 1135 | + cmd) 1136 | subprocess.call(cmd) 1137 | 1138 | def run_stack_deploy(config, stack_name): 1139 | # docker stack deploy is already idempotent 1140 | compose_file_data = get_dev_compose_file(config) 1141 | p = subprocess.Popen(['docker', 'stack', 'deploy', '--prune', '--compose-file', '-', stack_name], stdin=subprocess.PIPE) 1142 | p.communicate(input=bytes(compose_file_data, 'utf-8')) 1143 | if p.returncode != 0: 1144 | print(""" 1145 | 1146 | FAILURE: `docker stack deploy` for `{stack_name}` error code {error_code}. 1147 | """.format(stack_name=stack_name, error_code=p.returncode)) 1148 | sys.exit(1) 1149 | 1150 | def run_stack_service_terminal(config, stack_name, platform_name): 1151 | service_name = get_service_name(config, platform_name) 1152 | print(""" 1153 | TODO shell into {service_name} somehow 1154 | """) 1155 | # container_id = # get_container_id(config, platform_name) 1156 | # TODO finish 1157 | #os.system("docker exec -it {0} bash".format(container_id) 1158 | 1159 | def get_dev_compose_file(config): 1160 | result = get_dev_compose_object(config) 1161 | return json.dumps(remove_none(result), indent=2, separators=(',', ': ')) 1162 | 1163 | def get_dev_compose_object(config): 1164 | platform_names = get_all_platforms(config) 1165 | service_confs = [get_dev_compose_file_service(config, platform_name) for platform_name in platform_names] 1166 | secrets = config.get('secrets', {}) 1167 | secrets.update(get_secret_files()); 1168 | 1169 | services = {} 1170 | volumes = {} 1171 | for x in service_confs: 1172 | services.update(x['service']) 1173 | secrets.update(x.get('secrets', {})) 1174 | volumes.update(x.get('volumes', {})) 1175 | 1176 | return { 1177 | 'version': '3.1', 1178 | 'services': services, 1179 | 'volumes': volumes, 1180 | 'secrets': secrets 1181 | } 1182 | 1183 | def get_dev_compose_file_service(config, platform_name): 1184 | platform = get_platform_config(config, platform_name) 1185 | if is_platform_service(config, platform_name): 1186 | return get_dev_compose_file_service_service(config, platform_name) 1187 | 1188 | # generate service definition for dev images 1189 | 1190 | service_name = get_service_name(config, platform_name) 1191 | image_tag = get_build_tag_name(config, platform_name) 1192 | image_tag = get_registry_tag(config, image_tag) 1193 | target = get_dev_service_target(config, platform_name) 1194 | build_type = get_build_type(config, platform_name) 1195 | 1196 | command = "cppdock init_dev_service {build_type} {target}".format( 1197 | build_type = build_type, 1198 | target = target) 1199 | 1200 | web_root = platform.get('web_root') 1201 | if web_root: 1202 | web_root = "web_root:{0}".format(web_root) 1203 | 1204 | src_mount_path = get_build_source_path(config) 1205 | src_mount = '{src_mount_path}:/opt/src:ro'.format(src_mount_path = src_mount_path) 1206 | domain = get_domain(config, platform_name) 1207 | environment = { 'DOMAIN': domain } 1208 | environment.update(platform.get('environment', {})) 1209 | secrets = platform.get('secrets', []) 1210 | build_volume_name = platform_name + '_build' 1211 | 1212 | return { 1213 | 'service': { 1214 | service_name: { 1215 | 'image': image_tag, 1216 | 'command': command, 1217 | 'environment': environment, 1218 | 'secrets': secrets, 1219 | 'volumes': [ 1220 | src_mount, 1221 | '{}:/opt/build'.format(build_volume_name), 1222 | web_root 1223 | ] 1224 | } 1225 | }, 1226 | 'volumes': { 1227 | build_volume_name: None 1228 | } 1229 | } 1230 | 1231 | def get_dev_compose_file_service_service(config, platform_name): 1232 | service_type = get_service_type(config, platform_name) 1233 | if (service_type == None): 1234 | default_compose_file_service(config, platform_name) 1235 | 1236 | fn = ServiceFn(nginx_proxy = get_service_nginx_proxy, 1237 | postgres = get_service_postgres) 1238 | return getattr(fn, service_type)(config, platform_name) 1239 | 1240 | def default_compose_file_service(config, platform_name): 1241 | result = {} 1242 | platform = get_platform_config(config, platform_name) 1243 | result.update(platform) 1244 | del result['type'] 1245 | return { 1246 | 'service': { 1247 | platform_name: result 1248 | } 1249 | } 1250 | 1251 | def get_volume_name(config, platform_name, build_type = None): 1252 | if build_type == None: 1253 | build_type = get_build_type(config, platform_name).lower() 1254 | return get_image_tag_name(config, platform_name, build_type) 1255 | 1256 | def get_build_tag_name(config, platform_name): 1257 | return get_image_tag_name(config, platform_name, "build") 1258 | 1259 | def get_release_tag_name(config, platform_name): 1260 | return get_image_tag_name(config, platform_name, "release") 1261 | 1262 | def get_base_tag_name(config, platform_name): 1263 | return get_image_tag_name(config, platform_name, 'base') 1264 | 1265 | def get_image_tag_name(config, platform_name, image_type): 1266 | return "{project}-{platform}{image_type}".format( 1267 | project = get_project_name(config), 1268 | platform = platform_name, 1269 | image_type = '-' + image_type if image_type != 'release' else '', 1270 | ) 1271 | 1272 | def get_stack_name(config): 1273 | return get_project_name(config) 1274 | 1275 | def get_all_platforms(config): 1276 | return config['platforms'].keys() 1277 | 1278 | def get_platform_config(config, platform_name): 1279 | return config['platforms'][platform_name] 1280 | 1281 | def get_dev_service_target(config, platform_name): 1282 | platform = get_platform_config(config, platform_name) 1283 | service_target = platform.get('service_target') 1284 | return platform.get('target') or service_target or 'check' 1285 | 1286 | def get_dev_service_init(config, platform_name): 1287 | base_path = os.path.join('.', 'cppdock_recipes') 1288 | path1 = os.path.join(base_path, 'this-' + platform_name) 1289 | path2 = os.path.join(base_path, 'this') 1290 | if os.path.isfile(path1): 1291 | return path1 1292 | elif os.path.isfile(path2): 1293 | return path2 1294 | else: 1295 | return None 1296 | 1297 | def run_dev_service_cmake(build_type, target = None, init_only = ''): 1298 | cmake_commands = ['cmake'] 1299 | emscripten_root = os.getenv('EMSCRIPTEN_ROOT') 1300 | if emscripten_root: 1301 | emcmake = os.path.join(emscripten_root, 'emcmake') 1302 | cmake_commands = [emcmake, 'cmake'] 1303 | p = None 1304 | if not os.path.isfile('/opt/build/CMakeCache.txt'): 1305 | p = subprocess.Popen(cmake_commands + [ 1306 | '-DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake', 1307 | '-DCMAKE_INSTALL_PREFIX=/opt/install', 1308 | '-DCMAKE_BUILD_TYPE=' + build_type, 1309 | '/opt/src'], 1310 | cwd = '/opt/build') 1311 | p.communicate('') 1312 | 1313 | if (p == None or p.returncode == 0) and target and not init_only: 1314 | p = subprocess.Popen(['cmake', '--build', '.', '--target', target], 1315 | cwd = '/opt/build') 1316 | p.communicate('') 1317 | return p 1318 | 1319 | return None 1320 | 1321 | def run_dev_service_init(build_type, target, init_only = ''): 1322 | cmd = remove_none(['/opt/dev_service_init']) 1323 | env = dict(os.environ, BUILD_TYPE = build_type, 1324 | TARGET = target, 1325 | INIT_ONLY = init_only) 1326 | 1327 | p = subprocess.Popen(cmd, cwd = '/opt/build', env = env) 1328 | p.communicate('') 1329 | return p 1330 | 1331 | def run_dev_service_target(build_type, target, 1332 | shell_only = False, 1333 | shell = False, 1334 | keep_alive = True): 1335 | # This should run in a docker container 1336 | 1337 | has_init= os.path.isfile('/opt/dev_service_init') 1338 | init_fn = run_dev_service_init if has_init else run_dev_service_cmake 1339 | 1340 | if shell_only: 1341 | init_fn(build_type, '', "INIT_ONLY") 1342 | p = subprocess.Popen(['/bin/bash']) 1343 | p.communicate('') 1344 | return p 1345 | 1346 | p = init_fn(build_type, target) 1347 | 1348 | if not keep_alive: 1349 | return p 1350 | 1351 | # Make the service target and just hang out 1352 | # Things will show up in the logs and the user can still shell in to inspect things 1353 | 1354 | if p and p.returncode != 0: 1355 | print("Service exited with error {}.".format(p.returncode)) 1356 | 1357 | # hang out so the container stays alive 1358 | 1359 | if shell: 1360 | p = subprocess.Popen(['/bin/bash']) 1361 | p.communicate('') 1362 | else: 1363 | p = subprocess.Popen(['tail', '-f', '/dev/null']) 1364 | p.communicate('') 1365 | return p 1366 | 1367 | def cluster_get_worker_token(): 1368 | try: 1369 | return subprocess.check_output(['docker', 'swarm', 'join-token', '-q', 'worker']) 1370 | except: 1371 | raise RuntimeError("Unable to get cluster worker token") 1372 | 1373 | def cluster_get_sync_path(path): 1374 | return "/home/ubuntu/sync{}".format(path) 1375 | 1376 | def cluster_sync_source(config): 1377 | if not is_allowed_cluster_sync(config): 1378 | print('Skipping source code sync') 1379 | return 1380 | machines = get_machine_list(config) 1381 | machine_names = [x[0] for x in machines] 1382 | for name in machine_names: 1383 | cluster_sync_source_rsync_node(config, name) 1384 | 1385 | # Intializes source code on all machines 1386 | # after they are built. 1387 | def cluster_sync_source_init(config): 1388 | machines = get_machine_list(config) 1389 | machine_names = [x[0] for x in machines] 1390 | 1391 | script_data = '' 1392 | source_init = config['source'].get('init') 1393 | source_repo = config['source'].get('repo') 1394 | if source_init: 1395 | with open(source_init, 'r') as fh: 1396 | script_data += fh.read() 1397 | elif source_repo: 1398 | source_repo_tag = config['source'].get('tag', 'master') 1399 | script_data = "git clone --depth 1 --branch {tag} --single-branch {repo} .".format( 1400 | repo = source_repo, 1401 | tag = source_repo_tag) 1402 | else: 1403 | script_data = '' 1404 | 1405 | for name in machine_names: 1406 | cluster_sync_source_init_node(config, name, script_data) 1407 | 1408 | def cluster_sync_source_init_node(config, name, script_data): 1409 | path = get_build_source_path(config) 1410 | print('Initializing source code on {}'.format(name)) 1411 | p = subprocess.Popen(['docker-machine', 'ssh', name, 'bash', '-s'], 1412 | stdin = subprocess.PIPE) 1413 | script_header = 'mkdir -p {path} && cd {path};' 1414 | p.communicate(bytes(script_header.format(path = path) + script_data, 'utf-8')) 1415 | if p.returncode != 0: 1416 | raise RuntimeError( 1417 | 'Failed to initialize source code on {name}'.format( 1418 | name = name)) 1419 | 1420 | def cluster_sync_source_rsync_node(config, name): 1421 | assert is_allowed_cluster_sync(config), "Must not attempt to rsync with nothing" 1422 | base_src_path = config['source']['path'] 1423 | assert base_src_path, 'Config source.path must be set' 1424 | base_sync_path = get_build_source_path(config) 1425 | def sync_dir(path): 1426 | src_path = os.path.join(base_src_path, path, '.') 1427 | dest_path = os.path.join(base_sync_path, path) 1428 | machine_rsync(name, src_path, dest_path) 1429 | 1430 | [sync_dir(x) for x in config['cppdock'].get('sync_dirs', [''])] 1431 | 1432 | def get_nginx_proxy_conf(config, platform_name): 1433 | domain = get_domain(config, platform_name) 1434 | assert domain, 'Nginx proxy requires a domain name' 1435 | locations = '' 1436 | upstreams = '' 1437 | redirect_servers = '' 1438 | 1439 | if domain.startswith('www.'): 1440 | non_www_domain = domain[4:] 1441 | redirect_servers += """ 1442 | server {{ 1443 | # redirect to https 1444 | server_name {domain} {non_www_domain}; 1445 | return 301 https://{domain}$request_uri; 1446 | }} 1447 | server {{ 1448 | # redirect to www. 1449 | listen 443 ssl; 1450 | server_name {non_www_domain}; 1451 | ssl_certificate /run/secrets/{domain}.crt; 1452 | ssl_certificate_key /run/secrets/{domain}.key; 1453 | return 301 https://{domain}$request_uri; 1454 | }}""".format(domain = domain, 1455 | non_www_domain = non_www_domain) 1456 | else: 1457 | redirect_servers += """ 1458 | server {{ 1459 | # redirect to https 1460 | server_name {domain}; 1461 | return 301 https://{domain}$request_uri; 1462 | }}""".format(domain = domain) 1463 | 1464 | for platform_name in get_all_platforms(config): 1465 | if is_platform_service(config, platform_name): 1466 | continue; 1467 | locations += get_nginx_proxy_conf_locations(config, platform_name) 1468 | upstreams += get_nginx_proxy_conf_upstreams(config, platform_name) 1469 | return """ 1470 | user nginx; 1471 | worker_processes 1; 1472 | 1473 | error_log /var/log/nginx/error.log warn; 1474 | pid /var/run/nginx.pid; 1475 | 1476 | events {{ 1477 | worker_connections 1024; 1478 | }} 1479 | 1480 | http {{ 1481 | include /etc/nginx/mime.types; 1482 | types {{ 1483 | application/wasm wasm; 1484 | font/woff2 woff2; 1485 | }} 1486 | default_type application/octet-stream; 1487 | resolver 127.0.0.11; 1488 | 1489 | log_format main \\'$remote_addr - $remote_user [$time_local] "$request" \\' 1490 | \\'$status $body_bytes_sent "$http_referer" \\' 1491 | \\'"$http_user_agent" "$http_x_forwarded_for"\\'; 1492 | 1493 | access_log /var/log/nginx/access.log main; 1494 | 1495 | sendfile on; 1496 | #tcp_nopush on; 1497 | 1498 | keepalive_timeout 65; 1499 | 1500 | #gzip on; 1501 | 1502 | {upstreams} 1503 | 1504 | {redirect_servers} 1505 | 1506 | server {{ 1507 | listen 443 ssl; 1508 | server_name {domain}; 1509 | ssl_certificate /run/secrets/{domain}.crt; 1510 | ssl_certificate_key /run/secrets/{domain}.key; 1511 | root /opt/web_root; 1512 | {locations} 1513 | location / {{ 1514 | # web app uri redirect 1515 | try_files $uri $uri/ /index.html; 1516 | }} 1517 | }} 1518 | }} 1519 | """.format(locations = locations, 1520 | upstreams = upstreams, 1521 | domain = domain, 1522 | redirect_servers = redirect_servers) 1523 | 1524 | def get_nginx_proxy_conf_upstreams(config, platform_name): 1525 | platform = config['platforms'][platform_name] 1526 | # upstream directive for each port exposed to the internal network 1527 | # ports: ["{name}:{protocol}:{port}"] | 1528 | # ["{name}:{port}"] | 1529 | # ["{port}"] 1530 | # name - (optional) specify name to append to service name 1531 | # prototcol - (optional) specify ws or http or something else 1532 | # port - (required) port number 1533 | upstreams = '' 1534 | port_specs = get_platform_port_specs(config, platform_name) 1535 | 1536 | for port_name, protocol, port in port_specs: 1537 | service_name = get_service_name(config, platform_name) 1538 | 1539 | if protocol == 'ws': 1540 | upstreams += """ 1541 | upstream docker-{service_name} {{ 1542 | server {service_name}:{port}; 1543 | }}""".format(service_name = service_name, 1544 | port = port) 1545 | else: 1546 | raise ValueError("Invalid protocol") 1547 | 1548 | return upstreams 1549 | 1550 | def get_nginx_proxy_conf_locations(config, platform_name): 1551 | platform = config['platforms'][platform_name] 1552 | # location directive for each port exposed to the internal network 1553 | # ports: ["{name}:{protocol}:{port}"] | 1554 | # ["{name}:{port}"] | 1555 | # ["{port}"] 1556 | # name - (optional) specify name to append to service name 1557 | # prototcol - (optional) specify ws or http or something else 1558 | # port - (required) port number 1559 | # uri should be uri prefix plus service_name 1560 | locations = '' 1561 | port_specs = get_platform_port_specs(config, platform_name) 1562 | 1563 | for port_name, protocol, port in port_specs: 1564 | service_name = get_service_name(config, platform_name) 1565 | 1566 | uri = config.get('uri_prefix') or '/' 1567 | uri += service_name 1568 | if len(port_name) > 0: 1569 | uri += '_' + port_name 1570 | if protocol == 'ws': 1571 | locations += get_nginx_location_websocket(uri, service_name, port) 1572 | else: 1573 | raise ValueError("Invalid protocol") 1574 | 1575 | return locations 1576 | 1577 | # returns [[name, protocol, port]] 1578 | def get_platform_port_specs(config, platform_name): 1579 | platform = config['platforms'][platform_name] 1580 | port_specs = platform.get('ports') or [] 1581 | results = [] 1582 | for port_spec in port_specs: 1583 | port_data = port_spec.split(':') 1584 | length = len(port_data) 1585 | 1586 | if length == 1: 1587 | [port] = port_data 1588 | port_name = '' 1589 | protocol = 'ws' 1590 | elif length == 2: 1591 | [port_name, port] = port_data 1592 | protocol = 'ws' 1593 | elif length == 3: 1594 | port_name, protocol, port = port_data 1595 | else: 1596 | raise ValueError("Invalid port specification") 1597 | 1598 | # only websockets are supported 1599 | if protocol not in ['ws']: 1600 | raise ValueError("Invalid protocol specified") 1601 | 1602 | results.append([port_name, protocol, port]) 1603 | 1604 | return results 1605 | 1606 | def get_nginx_location_websocket(uri, service_name, port): 1607 | # creates a location with an exact uri match of the platform name 1608 | # it proxies to the websocket backend 1609 | return """ 1610 | location = {uri} {{ 1611 | proxy_set_header X-Real-IP $remote_addr; 1612 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 1613 | proxy_set_header Host $http_host; 1614 | proxy_set_header X-NginX-Proxy true; 1615 | 1616 | proxy_pass http://docker-{service_name}; 1617 | proxy_redirect off; 1618 | 1619 | proxy_http_version 1.1; 1620 | proxy_set_header Upgrade $http_upgrade; 1621 | proxy_set_header Connection "upgrade"; 1622 | }}""".format(uri = uri, 1623 | service_name = service_name, 1624 | port = port) 1625 | 1626 | def get_service_nginx_proxy(config, platform_name): 1627 | domain = get_domain(config, platform_name) 1628 | assert domain, 'Nginx proxy requires a domain name' 1629 | platform = get_platform_config(config, platform_name) 1630 | service_name_fn = lambda x : get_service_name(config, x) 1631 | service_names = map(service_name_fn, get_all_platforms(config)) 1632 | tag_name = get_image_tag_name(config, platform_name, 'service') 1633 | tag_name = get_registry_tag(config, tag_name) 1634 | volume_name = platform.get('volume_name') or 'web_root' 1635 | return { 1636 | 'service': { 1637 | platform_name: { 1638 | 'image': tag_name, 1639 | 'ports': ['80:80', '443:443'], 1640 | 'volumes': [ 1641 | '{}:/opt/web_root:ro'.format(volume_name) 1642 | ], 1643 | 'secrets': [ 1644 | "{}.crt".format(domain), 1645 | "{}.key".format(domain) 1646 | ], 1647 | 'depends_on': service_names 1648 | } 1649 | }, 1650 | 'volumes': { 1651 | volume_name: None 1652 | } 1653 | } 1654 | 1655 | def get_service_postgres(config, platform_name): 1656 | volume_name = platform_name 1657 | return { 1658 | 'service': { 1659 | platform_name: { 1660 | 'image': 'postgres', 1661 | 'environment': { 1662 | 'POSTGRES_USER_FILE': '/run/secrets/POSTGRES_USER', 1663 | 'POSTGRES_PASSWORD_FILE': '/run/secrets/POSTGRES_PASSWORD' 1664 | }, 1665 | 'secrets': [ 1666 | 'POSTGRES_USER', 1667 | 'POSTGRES_PASSWORD' 1668 | ], 1669 | 'volumes': [ 1670 | '{}:/var/lib/postgresql/data'.format(volume_name) 1671 | ], 1672 | 'deploy': { 1673 | 'placement': { 1674 | 'constraints': ['node.role == manager'] 1675 | } 1676 | } 1677 | }, 1678 | }, 1679 | 'volumes': { 1680 | volume_name: None 1681 | } 1682 | } 1683 | 1684 | def is_cluster_mode(config): 1685 | return len(get_machine_list(config)) > 0 1686 | 1687 | def get_service_name(config, platform_name): 1688 | return platform_name 1689 | 1690 | def generate_self_signed_ssl_certificate(domain): 1691 | subj = '/C=US/ST=Nevada/L=Henderson/O=Dis/CN={}'.format(domain) 1692 | try: 1693 | key_data = subprocess.check_output(['openssl', 'genrsa', '4096']) 1694 | except: 1695 | raise RuntimeError('Failed to generate SSL key (via openssl)') 1696 | 1697 | # Not sure about this if we need it to ever be secure 1698 | print("Creating self signed certificate...") 1699 | p = subprocess.Popen([ 1700 | 'openssl', 'req', 1701 | '-new', '-x509', '-key', '/dev/stdin', 1702 | '-subj', subj], 1703 | stdin=subprocess.PIPE, 1704 | stdout=subprocess.PIPE) 1705 | crt_data, crt_err = p.communicate(bytes(key_data, 'utf-8')) 1706 | 1707 | if p.returncode != 0: 1708 | raise RuntimeError('Failed to generate SSL cert (via openssl)') 1709 | 1710 | return (domain, key_data, crt_data) 1711 | 1712 | def import_ssl_certificate(domain, key_data, crt_data): 1713 | # Add to ./cppdock_secrets/ 1714 | key_name = '{}.key'.format(domain) 1715 | crt_name = '{}.crt'.format(domain) 1716 | print('Adding certificates to secret database...') 1717 | make_secret_file(key_name, key_data) 1718 | make_secret_file(crt_name, crt_data) 1719 | 1720 | def make_secret_file(name, data): 1721 | path = os.path.join(os.getcwd(), 'cppdock_secrets', name) 1722 | with open(path, 'w') as fh: 1723 | fh.write(data) 1724 | 1725 | def get_secret_files(): 1726 | # We never want to look in the source.path for secrets 1727 | # so this is a valid use of getcwd 1728 | secrets_dir = os.path.join(os.getcwd(), 'cppdock_secrets') 1729 | if not os.path.isdir(secrets_dir): 1730 | return {} 1731 | names = [f for f in os.listdir(secrets_dir)] 1732 | secrets = {} 1733 | for name in names: 1734 | path = os.path.abspath(os.path.join(secrets_dir, name)) 1735 | if os.path.isfile(path): 1736 | secrets[name] = { 'file': path } 1737 | 1738 | return secrets 1739 | 1740 | def get_registry(config): 1741 | registry = config['cppdock'].get('registry') 1742 | return registry or '127.0.0.1:5000' 1743 | 1744 | def get_registry_tag(config, tag_name): 1745 | return get_registry(config) + '/' + tag_name 1746 | 1747 | def get_service_type(config, platform_name): 1748 | type = get_platform_type(config, platform_name) 1749 | if type == 'service' or not type.startswith('service:'): 1750 | return None 1751 | return type.split(':', 1)[1] 1752 | 1753 | def get_domain(config, platform_name): 1754 | platform = get_platform_config(config, platform_name) 1755 | return platform.get('domain') or config['cppdock'].get('domain') 1756 | 1757 | def get_build_type(config, platform_name): 1758 | platform = get_platform_config(config, platform_name) 1759 | default = config.get('__default_build_type', 'debug') 1760 | return platform.get('build_type') \ 1761 | or config['cppdock'].get('build_type', default) 1762 | 1763 | def get_release_target(config, platform_name): 1764 | platform = get_platform_config(config, platform_name) 1765 | return platform.get('release_target', 'install') 1766 | 1767 | 1768 | def dev_cluster_stop(config): 1769 | stack_name = get_stack_name(config) 1770 | subprocess.check_output(['docker', 'stack', 'rm', stack_name]) 1771 | 1772 | def dev_cluster_clean_containers(config): 1773 | stack_name = get_stack_name(config) 1774 | ids = get_container_ids(stack_name) 1775 | if not ids: 1776 | return 1777 | subprocess.check_output(['docker', 'container', 'rm', '--force'] + ids) 1778 | 1779 | def dev_cluster_clean_volumes(config): 1780 | stack_name = get_stack_name(config) 1781 | names = get_dev_compose_object(config)['volumes'].keys() 1782 | name_set = set(map(lambda x: stack_name + '_' + x, names)) 1783 | names = list(name_set.intersection(get_volume_names(stack_name))) 1784 | if not names: 1785 | return 1786 | subprocess.check_output(['docker', 'volume', 'rm'] + names) 1787 | 1788 | def get_container_ids(name): 1789 | # docker ps -a --filter "name=sc2_nbdl_test_server" --format="{{.ID}}" 1790 | # filter network to prevent accidentally remove similarly named containers 1791 | filter_name = 'name={}'.format(name) 1792 | filter_network = 'network={}'.format(name + '_default') 1793 | return subprocess.check_output(['docker', 'ps', '-a', 1794 | '--filter', filter_name, 1795 | '--filter', filter_network, 1796 | '--format', '{{.ID}}' 1797 | ]).split() 1798 | 1799 | def get_volume_names(name): 1800 | #docker volume ls --filter "name=sc2_nbdl_test" --format="{{.Name}}" 1801 | filter_name = 'name={}'.format(name) 1802 | return subprocess.check_output(['docker', 'volume', 'ls', 1803 | '--filter', filter_name, 1804 | '--format', '{{.Name}}' 1805 | ]).split() 1806 | 1807 | def get_machine_list(config): 1808 | if config.get('machine_list'): 1809 | return config['machine_list'] 1810 | 1811 | machines = config.get('machines', {}).items() 1812 | project_name = get_project_name(config) 1813 | machine_name = lambda x: (project_name + '_' + x).replace('_', '-') 1814 | is_manager = lambda x: x[1].get('manager') == True 1815 | 1816 | result = [(machine_name(name), values) for (name, values) in machines] 1817 | result.sort(key = is_manager, reverse = True) 1818 | 1819 | # prevent further access to unprocessed data 1820 | if machines: 1821 | del config['machines'] 1822 | 1823 | return result 1824 | 1825 | def get_machine_names(config): 1826 | return (x for (x, y) in get_machine_list(config)) 1827 | 1828 | # TODO maybe use this when we actuall do stuff that needs to be 1829 | # applied to all machines such as rsyncing source code 1830 | # for_every_machine or replace with some kind of iterator 1831 | def for_every_machine(config, fn): 1832 | for (name, values) in config['machine_list']: 1833 | set_machine(name, values) 1834 | fn(name, values) 1835 | 1836 | def set_master_machine(config): 1837 | machines = get_machine_list(config) 1838 | if len(machines) > 0: 1839 | set_machine(*(machines[0])) 1840 | 1841 | def get_master_machine_name(config): 1842 | machines = get_machine_list(config) 1843 | machine_names = [x[0] for x in machines] 1844 | assert len(machine_names) > 0 1845 | return machine_names[0] 1846 | 1847 | def set_machine(name, values): 1848 | x = machine_env(name, values) 1849 | env = os.environ 1850 | 1851 | print("Setting docker host to machine: " + name) 1852 | def set_env(name): 1853 | value = x.get(name) 1854 | if value: 1855 | env[name] = value 1856 | else: 1857 | del env[name] 1858 | 1859 | set_env('DOCKER_HOST') 1860 | set_env('DOCKER_TLS_VERIFY') 1861 | set_env('DOCKER_CERT_PATH') 1862 | set_env('DOCKER_MACHINE_NAME') 1863 | print('Using docker host: ' + env['DOCKER_HOST']) 1864 | 1865 | def machine_env(name, values): 1866 | if values.get('env'): 1867 | return values['env'] 1868 | else: 1869 | raw = subprocess.check_output(['docker-machine', 'env', name]) 1870 | 1871 | return dict(re.findall('export ([A-Z_]+)="([^"]+)"', raw)) 1872 | 1873 | def machine_start_all(config): 1874 | name_list = list(get_machine_names(config)) 1875 | subprocess.check_output(['docker-machine', 'start'] + name_list) 1876 | subprocess.check_output(['docker-machine', 'regenerate-certs', 1877 | '--force'] + name_list) 1878 | 1879 | def machine_stop_all(config): 1880 | names = get_machine_names(config) 1881 | subprocess.check_output(['docker-machine', 'stop'] + list(names)) 1882 | 1883 | def machine_rm_all(config): 1884 | names = get_machine_names(config) 1885 | subprocess.check_output(['docker-machine', 'rm', '-y'] + list(names)) 1886 | 1887 | def machine_create_all(config): 1888 | # The first machine is the host 1889 | xs = get_machine_list(config) 1890 | assert len(xs) > 0, '"machines" must be specified for cluster operations' 1891 | 1892 | machine_create_master(*(xs[0])) 1893 | def join_new(name, values): 1894 | join_token = machine_join_token(name_values) 1895 | machine_create(name, values, join_token) 1896 | 1897 | [join_new(name, values) for (name, values) in xs[1:]] 1898 | 1899 | def machine_create_master(name, values): 1900 | # The first machine that starts the swarm 1901 | machine_create(name, values) 1902 | set_machine(name, values) 1903 | subprocess.check_output(['docker', 'swarm', 'init']) 1904 | dev_registry_create() 1905 | 1906 | def machine_create(name, values, join_token = None): 1907 | labels = values.get('labels', {}).items() 1908 | ports = values.get('ports', [80, 443]) 1909 | # ops is a list of lists to be chained 1910 | ops = [] 1911 | # NOTE We are always setting `swarm-master` 1912 | # because a node could be promoted. 1913 | # (I think it just exposes a port) 1914 | ops += [['--swarm', '--swarm-master']] 1915 | ops += [['--driver', values['driver']]] 1916 | ops += [['--{}'.format(x[0]), x[1]] for x in values.get('options', [])] 1917 | ops += [['--engine-label', '{0}={1}'.format(k, v)] for k, v in labels] 1918 | if join_token: 1919 | ops += [['--swarm-discovery', 'token://' + join_token]] 1920 | if values['driver'] == 'amazonec2': 1921 | ops += [['--amazonec2-open-port', str(x)] for x in ports] 1922 | ops_flat = [x for x in itertools.chain.from_iterable(ops)] 1923 | subprocess.check_output(['docker-machine', 'create'] + ops_flat + [name]) 1924 | 1925 | def machine_join_token(name, values): 1926 | kind = 'manager' if values.get('manager') else 'worker' 1927 | return subprocess.check_output(['docker', 'swarm', 1928 | 'join-token', '-q', kind]) 1929 | 1930 | def machine_rsync(name, src_path, dest_path): 1931 | machine = machine_inspect(name); 1932 | ip_address = machine_ip_address(name) 1933 | driver = machine['Driver'] 1934 | ssh_key_path = driver['SSHKeyPath'] 1935 | node_path = "{user}@{ip_address}:{dest_path}".format( 1936 | user = driver['SSHUser'], 1937 | ip_address = ip_address, 1938 | dest_path = dest_path) 1939 | print('Syncing source code to ' + node_path) 1940 | ssh_command = ' '.join([ 1941 | 'ssh -i {}'.format(ssh_key_path), 1942 | '-o StrictHostKeyChecking=no', 1943 | '-o UserKnownHostsFile=/dev/null', 1944 | '-o LogLevel=quiet' 1945 | ]) 1946 | subprocess.check_output(['rsync', '--recursive', 1947 | '--progress', 1948 | '--times', 1949 | '-e', ssh_command, 1950 | src_path, node_path]) 1951 | 1952 | def machine_inspect(name): 1953 | result_s = subprocess.check_output(['docker-machine', 'inspect', name]) 1954 | return json.loads(result_s) 1955 | 1956 | def machine_ip_address(name): 1957 | result_s = subprocess.check_output(['docker-machine', 'ip', name]) 1958 | return result_s.strip() 1959 | 1960 | def dev_registry_create(): 1961 | registry_name = 'cppdock-registry' 1962 | subprocess.check_output(['docker', 'service', 'create', '--name', 1963 | 'cppdock-registry', '--publish', '5000:5000', 1964 | 'registry']) 1965 | 1966 | def dev_registry_rm(): 1967 | registry_name = 'cppdock-registry' 1968 | subprocess.check_output(['docker', 'service', 'rm', 'cppdock-registry']) 1969 | 1970 | def calculate_source_path(config, config_paths): 1971 | # should be called AFTER config rewrite 1972 | source = config["source"] 1973 | if source and "path" in source: 1974 | path = source["path"] 1975 | else: 1976 | path = config_paths[0] 1977 | 1978 | if path and not os.path.isdir(path): 1979 | path = os.path.dirname(path) 1980 | return path 1981 | 1982 | def calculate_allow_sync(config): 1983 | # should be called AFTER config rewrite 1984 | # if source path is explicitly null then 1985 | # forbid rsync no matter what 1986 | source = config.get('source') 1987 | if source and 'path' in source and source['path'] != None: 1988 | return source.get('allow_sync', True) 1989 | 1990 | return False 1991 | 1992 | def get_build_source_path(config): 1993 | # do we imply source path when we set cppdock.base_config? 1994 | path = config['source']['path'] 1995 | if is_cluster_mode(config): 1996 | path = path or os.path.abspath('./source') 1997 | path = cluster_get_sync_path(path) 1998 | assert path, 'Unable to create source path for build' 1999 | return path 2000 | 2001 | def is_allowed_cluster_sync(config): 2002 | return config['source']['allow_sync'] 2003 | 2004 | def is_verbose_mode(): 2005 | return True; 2006 | 2007 | 2008 | dispatch(os.sys.argv[1:2], os.sys.argv[2:]) 2009 | -------------------------------------------------------------------------------- /cppdock-complete.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | _cppdock_complete() 4 | { 5 | 6 | ITEMS=(cppdock _complete "${COMP_LINE}") 7 | COMPREPLY=($("${ITEMS[@]}")) 8 | } 9 | 10 | complete -F _cppdock_complete cppdock 11 | -------------------------------------------------------------------------------- /example/cppdock.ini: -------------------------------------------------------------------------------- 1 | [cppdock] 2 | project = my_app 3 | 4 | [linux_x64] 5 | ricejasonf/nbdl = 6 | boostorg/hana = develop 7 | boostorg/callable_traits = master 8 | 9 | -------------------------------------------------------------------------------- /recipes/boostorg-config: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp -r include/* /opt/install/include 5 | -------------------------------------------------------------------------------- /recipes/boostorg-mp11: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp -r include/* /opt/install/include 5 | -------------------------------------------------------------------------------- /recipes/chriskohlhoff-asio: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp asio/include/asio.hpp /opt/install/include 5 | cp -r asio/include/asio /opt/install/include 6 | -------------------------------------------------------------------------------- /recipes/cisco-openh264: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export CC=/usr/local/bin/clang++ 4 | export CXX=/usr/local/bin/clang++ 5 | export CXXFLAGS=-stdlib=libc++ 6 | export LD_LIBRARY_PATH=/usr/local/lib 7 | 8 | apt-get update && apt-get install -yq nasm \ 9 | && make install-static-lib \ 10 | && mkdir /opt/install/include && mkdir /opt/install/lib \ 11 | && mv /usr/local/include/wels /opt/install/include \ 12 | && mv /usr/local/lib/libopenh264.a /opt/install/lib 13 | -------------------------------------------------------------------------------- /recipes/cisco-openh264-emscripten: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=$PATH:/usr/local/src/emscripten 4 | emmake make OS=linux ARCH=asmjs install-static-lib \ 5 | && mkdir /opt/install/include && mkdir /opt/install/lib \ 6 | && mv /usr/local/include/wels /opt/install/include \ 7 | && mv /usr/local/lib/libopenh264.a /opt/install/lib 8 | -------------------------------------------------------------------------------- /recipes/default: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p build && cd build \ 4 | && cmake \ 5 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 6 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 7 | -DCMAKE_BUILD_TYPE=Release \ 8 | .. \ 9 | && make install 10 | -------------------------------------------------------------------------------- /recipes/default-emscripten: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p build && cd build \ 4 | && /opt/emsdk/upstream/emscripten/emcmake cmake \ 5 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 6 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 7 | -DCMAKE_BUILD_TYPE=Release \ 8 | .. \ 9 | && make install 10 | -------------------------------------------------------------------------------- /recipes/ericniebler-meta: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp -r include/* /opt/install/include 5 | -------------------------------------------------------------------------------- /recipes/howardhinnant-date: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp -r include/* /opt/install/include 5 | -------------------------------------------------------------------------------- /recipes/jedisct1-libsodium: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get update && apt-get install -yq libtool autoconf 4 | chmod u+x ./autogen.sh 5 | ./autogen.sh 6 | ./configure --prefix=/opt/install 7 | make && make install 8 | -------------------------------------------------------------------------------- /recipes/jedisct1-libsodium-emscripten: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=$PATH:/usr/local/src/emscripten 4 | apt-get update && apt-get install -yq libtool autoconf 5 | chmod u+x ./autogen.sh 6 | chmod u+x ./dist-build/emscripten.sh 7 | ./autogen.sh 8 | ./dist-build/emscripten.sh --standard 9 | cp -r libsodium-js/* /opt/install/ 10 | -------------------------------------------------------------------------------- /recipes/kvasir-io-mpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /opt/install/include 4 | cp -r src/* /opt/install/include 5 | -------------------------------------------------------------------------------- /recipes/ldionne-dyno: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # TODO 4 | # Dyno depends on Hana so there needs to be a way of 5 | # installing the dependency to the sysroot of the current build. 6 | # 7 | # This also means knowing what the dependency is before hand. 8 | 9 | mkdir -p /opt/install/include 10 | cp -r include/* /opt/install/include 11 | -------------------------------------------------------------------------------- /recipes/open-source-parsers-jsoncpp: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir build && cd build \ 4 | && cmake \ 5 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 6 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 7 | -DCMAKE_BUILD_TYPE=Release \ 8 | -DJSONCPP_WITH_TESTS=OFF \ 9 | -DJSONCPP_WITH_POST_BUILD_UNITTEST=OFF\ 10 | .. \ 11 | && make install 12 | -------------------------------------------------------------------------------- /recipes/rbock-sqlpp11: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir build && cd build \ 4 | && cmake \ 5 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 6 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 7 | -DCMAKE_BUILD_TYPE=Release \ 8 | -DENABLE_TESTS=Off \ 9 | .. \ 10 | && make install 11 | -------------------------------------------------------------------------------- /recipes/rbock-sqlpp11-connector-mysql: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir build && cd build \ 4 | && cmake \ 5 | -DCMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ 6 | -DCMAKE_INSTALL_PREFIX=/opt/install \ 7 | -DCMAKE_BUILD_TYPE=Release \ 8 | -DENABLE_TESTS=Off \ 9 | -DSQLPP11_INCLUDE_DIR=/opt/install/include \ 10 | -DDATE_INCLUDE_DIR=/opt/install/include/date \ 11 | -DMYSQL_INCLUDE_DIR=/opt/install/include/mariadb \ 12 | -DUSE_MARIADB=1 \ 13 | .. \ 14 | && make install 15 | -------------------------------------------------------------------------------- /recipes/xiph-opus: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | apt-get update && apt-get install -yq libtool autoconf 4 | chmod u+x ./autogen.sh 5 | ./autogen.sh 6 | ./configure --prefix=/opt/install CFLAGS='-O3' \ 7 | && make && make install 8 | -------------------------------------------------------------------------------- /recipes/xiph-opus-emscripten: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export PATH=$PATH:/usr/local/src/emscripten 4 | apt-get update && apt-get install -yq libtool autoconf 5 | chmod u+x ./autogen.sh 6 | ./autogen.sh 7 | emconfigure ./configure --prefix=/opt/install \ 8 | --disable-intrinsics \ 9 | --disable-rtcd \ 10 | CFLAGS='-O2' \ 11 | && emmake make && make install 12 | -------------------------------------------------------------------------------- /recipes_docker/Dockerfile-boost: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 as build_image 2 | 3 | RUN apt-get update && apt-get -yq install \ 4 | build-essential libc6-dev libstdc++-12-dev cmake 5 | 6 | ADD https://boostorg.jfrog.io/artifactory/main/release/1.85.0/source/boost_1_85_0.tar.gz boost_1_85_0.tar.gz 7 | RUN tar -xvzf boost_1_85_0.tar.gz && \ 8 | cd ./boost_1_85_0 && ./bootstrap.sh && \ 9 | ./b2 install --prefix=/opt/install 10 | 11 | FROM ubuntu:22.04 as boost 12 | 13 | COPY --from=build_image /opt/install/ /opt/install 14 | 15 | FROM ubuntu:22.04 as boost_header_only 16 | 17 | COPY --from=build_image /opt/install/include/ /opt/install/include 18 | -------------------------------------------------------------------------------- /recipes_docker/Dockerfile-libpq-linux_x64: -------------------------------------------------------------------------------- 1 | FROM ricejasonf/parmexpr as build 2 | 3 | RUN apt-get update -yq && apt-get install -yq wget 4 | 5 | WORKDIR /opt/src 6 | RUN wget https://ftp.postgresql.org/pub/source/v12.1/postgresql-12.1.tar.gz \ 7 | && tar -xvzf postgresql-12.1.tar.gz 8 | 9 | WORKDIR /opt/src/postgresql-12.1 10 | RUN mkdir /opt/install \ 11 | && ./configure --prefix /opt/install \ 12 | --without-readline \ 13 | --without-zlib \ 14 | && make \ 15 | && make -C src/include install \ 16 | && make -C src/interfaces install 17 | 18 | FROM ubuntu:eoan 19 | COPY --from=build /opt/install /opt/install 20 | -------------------------------------------------------------------------------- /test/json_lock/cppdock.json: -------------------------------------------------------------------------------- 1 | { 2 | "cppdock": { 3 | "name": "test_json_lock" 4 | }, 5 | "platforms": { 6 | "my_linux_x64": { 7 | "type": "linux_x64", 8 | "deps": [ 9 | [ 10 | { 11 | "tag": "master", 12 | "name": "ricejasonf/nbdl", 13 | "revision": "1ae8497b57640cfb095efe0a3c9e0f22f3ac15a9" 14 | } 15 | ], 16 | [ 17 | { 18 | "tag": "develop", 19 | "name": "boostorg/hana", 20 | "revision": "d9da8776aa1142c0f92a38d49829ae5c3e86c7bc" 21 | } 22 | ], 23 | [ 24 | { 25 | "tag": "latest", 26 | "name": "ricejasonf/cppdock", 27 | "source": "docker" 28 | } 29 | ] 30 | ] 31 | }, 32 | "my_emscripten": { 33 | "base_image": "ricejasonf/cppdock:emscripten_1_37_19", 34 | "type": "emscripten", 35 | "deps": [ 36 | [ 37 | { 38 | "tag": "develop", 39 | "name": "ricejasonf/nbdl", 40 | "revision": "157988d3266cc4ef84a2687b6ef9cdba992449ae" 41 | } 42 | ], 43 | [ 44 | { 45 | "tag": "develop", 46 | "name": "boostorg/hana", 47 | "revision": "d9da8776aa1142c0f92a38d49829ae5c3e86c7bc" 48 | } 49 | ] 50 | ] 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /test/json_lock/run_test.py: -------------------------------------------------------------------------------- 1 | # TODO 2 | # call `cppdock init` in this working dir 3 | # test that json file is the same except for specifying revisions in each dep 4 | # The dep with the hardcoded revision should NOT change 5 | 6 | import os 7 | import subprocess 8 | import sys 9 | import json 10 | 11 | expected_revision_boosthana = 'd9da8776aa1142c0f92a38d49829ae5c3e86c7bc' 12 | 13 | def start(): 14 | 15 | p = subprocess.Popen(['cppdock', 'init'], stdin=subprocess.PIPE) 16 | out, err = p.communicate('') 17 | if p.returncode != 0: 18 | print """ 19 | FAILURE 20 | """ 21 | 22 | stream = file('cppdock.json', 'r') 23 | config = json.load(stream) 24 | 25 | result_revision_boosthana = config['platforms']['my_emscripten']['deps'][1][0]['revision'] 26 | if not result_revision_boosthana == expected_revision_boosthana: 27 | raise ValueError('Boosthana locked revision changed.') 28 | 29 | platforms = config['platforms'].items() 30 | for platform, settings in platforms: 31 | for stage in settings['deps']: 32 | for item in stage: 33 | if 'revision' in item: 34 | continue 35 | else: 36 | raise ValueError('revision not added') 37 | 38 | start() 39 | -------------------------------------------------------------------------------- /toolchain/emscripten.cmake: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 3.7) 2 | 3 | # Borrows some stuff from emscripten's toolchain file but makes many assumptions 4 | 5 | set(CMAKE_INSTALL_PREFIX "/opt/install") 6 | set(EMSCRIPTEN_ROOT_PATH "/opt/emsdk/upstream/emscripten") 7 | set(CMAKE_SYSTEM_NAME Emscripten) 8 | set(CMAKE_SYSTEM_VERSION 1) 9 | set(CMAKE_CROSSCOMPILING TRUE) 10 | set(CMAKE_SYSTEM_PROCESSOR x86) 11 | set(CMAKE_SHARED_LIBRARY_SONAME_C_FLAG "-Wl,-soname,") 12 | set(UNIX 1) 13 | set(CMAKE_REQUIRED_FLAGS "-s ERROR_ON_UNDEFINED_SYMBOLS=1") 14 | list(APPEND CMAKE_MODULE_PATH "${EMSCRIPTEN_ROOT_PATH}/cmake/Modules") 15 | list(APPEND CMAKE_FIND_ROOT_PATH "${EMSCRIPTEN_ROOT_PATH}/system") 16 | set(CMAKE_C_COMPILER "${EMSCRIPTEN_ROOT_PATH}/emcc") 17 | set(CMAKE_CXX_COMPILER "${EMSCRIPTEN_ROOT_PATH}/em++") 18 | set(CMAKE_AR "${EMSCRIPTEN_ROOT_PATH}/emar" CACHE FILEPATH "Emscripten ar") 19 | set(CMAKE_RANLIB "${EMSCRIPTEN_ROOT_PATH}/emranlib" CACHE FILEPATH "Emscripten ranlib") 20 | 21 | set(CMAKE_C_COMPILER_ID_RUN TRUE) 22 | set(CMAKE_C_COMPILER_FORCED TRUE) 23 | set(CMAKE_C_COMPILER_WORKS TRUE) 24 | set(CMAKE_C_COMPILER_ID Clang) 25 | set(CMAKE_C_STANDARD_COMPUTED_DEFAULT 11) 26 | 27 | set(CMAKE_CXX_COMPILER_ID_RUN TRUE) 28 | set(CMAKE_CXX_COMPILER_FORCED TRUE) 29 | set(CMAKE_CXX_COMPILER_WORKS TRUE) 30 | set(CMAKE_CXX_COMPILER_ID Clang) 31 | set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT 98) 32 | 33 | set(CMAKE_C_PLATFORM_ID "emscripten") 34 | set(CMAKE_CXX_PLATFORM_ID "emscripten") 35 | 36 | # To find programs to execute during CMake run time with find_program(), e.g. 'git' or so, we allow looking 37 | # into system paths. 38 | set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) 39 | 40 | # Since Emscripten is a cross-compiler, we should never look at the system-provided directories like /usr/include and so on. 41 | # Therefore only CMAKE_FIND_ROOT_PATH should be used as a find directory. See http://www.cmake.org/cmake/help/v3.0/variable/CMAKE_FIND_ROOT_PATH_MODE_INCLUDE.html 42 | set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) 43 | set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) 44 | set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) 45 | 46 | set(CMAKE_SYSTEM_INCLUDE_PATH "/opt/sysroot/include") 47 | SET(CMAKE_EXECUTABLE_SUFFIX ".js") 48 | 49 | SET(CMAKE_C_USE_RESPONSE_FILE_FOR_LIBRARIES 1) 50 | SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_LIBRARIES 1) 51 | SET(CMAKE_C_USE_RESPONSE_FILE_FOR_OBJECTS 1) 52 | SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_OBJECTS 1) 53 | SET(CMAKE_C_USE_RESPONSE_FILE_FOR_INCLUDES 1) 54 | SET(CMAKE_CXX_USE_RESPONSE_FILE_FOR_INCLUDES 1) 55 | 56 | set(CMAKE_C_RESPONSE_FILE_LINK_FLAG "@") 57 | set(CMAKE_CXX_RESPONSE_FILE_LINK_FLAG "@") 58 | 59 | # Set a global EMSCRIPTEN variable that can be used in client CMakeLists.txt to detect when building using Emscripten. 60 | set(EMSCRIPTEN 1 CACHE BOOL "If true, we are targeting Emscripten output.") 61 | 62 | # Hardwire support for cmake-2.8/Modules/CMakeBackwardsCompatibilityC.cmake without having CMake to try complex things 63 | # to autodetect these: 64 | set(CMAKE_SKIP_COMPATIBILITY_TESTS 1) 65 | set(CMAKE_SIZEOF_CHAR 1) 66 | set(CMAKE_SIZEOF_UNSIGNED_SHORT 2) 67 | set(CMAKE_SIZEOF_SHORT 2) 68 | set(CMAKE_SIZEOF_INT 4) 69 | set(CMAKE_SIZEOF_UNSIGNED_LONG 4) 70 | set(CMAKE_SIZEOF_UNSIGNED_INT 4) 71 | set(CMAKE_SIZEOF_LONG 4) 72 | set(CMAKE_SIZEOF_VOID_P 4) 73 | set(CMAKE_SIZEOF_FLOAT 4) 74 | set(CMAKE_SIZEOF_DOUBLE 8) 75 | set(CMAKE_C_SIZEOF_DATA_PTR 4) 76 | set(CMAKE_CXX_SIZEOF_DATA_PTR 4) 77 | set(CMAKE_HAVE_LIMITS_H 1) 78 | set(CMAKE_HAVE_UNISTD_H 1) 79 | set(CMAKE_HAVE_PTHREAD_H 1) 80 | set(CMAKE_HAVE_SYS_PRCTL_H 1) 81 | set(CMAKE_WORDS_BIGENDIAN 0) 82 | set(CMAKE_DL_LIBS) 83 | 84 | set(CMAKE_C_FLAGS_RELEASE "-DNDEBUG -O2" CACHE STRING "Emscripten-overridden CMAKE_C_FLAGS_RELEASE") 85 | set(CMAKE_C_FLAGS_MINSIZEREL "-DNDEBUG -Os" CACHE STRING "Emscripten-overridden CMAKE_C_FLAGS_MINSIZEREL") 86 | set(CMAKE_C_FLAGS_RELWITHDEBINFO "-O2" CACHE STRING "Emscripten-overridden CMAKE_C_FLAGS_RELWITHDEBINFO") 87 | set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG -O2" CACHE STRING "Emscripten-overridden CMAKE_CXX_FLAGS_RELEASE") 88 | set(CMAKE_CXX_FLAGS_MINSIZEREL "-DNDEBUG -Os" CACHE STRING "Emscripten-overridden CMAKE_CXX_FLAGS_MINSIZEREL") 89 | set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2" CACHE STRING "Emscripten-overridden CMAKE_CXX_FLAGS_RELWITHDEBINFO") 90 | 91 | set(CMAKE_EXE_LINKER_FLAGS_RELEASE "-O2" CACHE STRING "Emscripten-overridden CMAKE_EXE_LINKER_FLAGS_RELEASE") 92 | set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "-Os" CACHE STRING "Emscripten-overridden CMAKE_EXE_LINKER_FLAGS_MINSIZEREL") 93 | set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "-O2 -g" CACHE STRING "Emscripten-overridden CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO") 94 | set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "-O2" CACHE STRING "Emscripten-overridden CMAKE_SHARED_LINKER_FLAGS_RELEASE") 95 | set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "-Os" CACHE STRING "Emscripten-overridden CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL") 96 | set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "-O2 -g" CACHE STRING "Emscripten-overridden CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO") 97 | set(CMAKE_MODULE_LINKER_FLAGS_RELEASE "-O2" CACHE STRING "Emscripten-overridden CMAKE_MODULE_LINKER_FLAGS_RELEASE") 98 | set(CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL "-Os" CACHE STRING "Emscripten-overridden CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL") 99 | set(CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO "-O2 -g" CACHE STRING "Emscripten-overridden CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO") 100 | 101 | set(CMAKE_CROSSCOMPILING_EMULATOR "/usr/local/bin/node" CACHE FILEPATH "Path to the emulator for the target system.") 102 | 103 | # Compiler Features 104 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert") 105 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") 106 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") 107 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") 108 | 109 | set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17") 110 | set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") 111 | set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") 112 | set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") 113 | 114 | # Disable debug symbols since linker is hanging for some reason 115 | set(CMAKE_CXX_FLAGS_DEBUG "") 116 | -------------------------------------------------------------------------------- /toolchain/linux_x64.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_INSTALL_PREFIX "/opt/sysroot") 2 | 3 | set(CMAKE_C_COMPILER clang) 4 | set(CMAKE_CXX_COMPILER clang++) 5 | 6 | set(CMAKE_C_COMPILER_WORKS 1) 7 | set(CMAKE_CXX_COMPILER_WORKS 1) 8 | 9 | set(CMAKE_CXX_FLAGS "-stdlib=libc++ --target=x86_64-unknown-linux-gnu" CACHE STRING "" FORCE) 10 | set(CMAKE_SYSTEM_INCLUDE_PATH "/opt/sysroot/include" CACHE STRING "" FORCE) 11 | list(APPEND CMAKE_FIND_ROOT_PATH "/opt/sysroot") 12 | include_directories("/opt/sysroot/include/c++/v1") 13 | 14 | # Compiler Features 15 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert") 16 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") 17 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") 18 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") 19 | 20 | set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17") 21 | set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") 22 | set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") 23 | set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") 24 | 25 | # pthreads 26 | set(CMAKE_THREAD_LIBS_INIT "-lpthread") 27 | set(CMAKE_HAVE_THREADS_LIBRARY 1) 28 | set(CMAKE_USE_WIN32_THREADS_INIT 0) 29 | set(CMAKE_USE_PTHREADS_INIT 1) 30 | -------------------------------------------------------------------------------- /toolchain/macosx.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_INSTALL_PREFIX /opt/sysroot) 2 | set(CMAKE_SYSROOT /opt/sysroot) 3 | set(CMAKE_SYSTEM_NAME Darwin) 4 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 5 | 6 | set(triple x86_64-apple-darwin) 7 | 8 | set(CMAKE_C_COMPILER clang) 9 | set(CMAKE_C_COMPILER_TARGET ${triple}) 10 | set(CMAKE_CXX_COMPILER clang++) 11 | set(CMAKE_CXX_COMPILER_TARGET ${triple}) 12 | 13 | set(CMAKE_EXE_LINKER_FLAGS "-mtvos-version-min=9.2") 14 | 15 | set(CMAKE_AR /usr/local/bin/ar CACHE FILEPATH "Archiver") 16 | set(CMAKE_STRIP /usr/local/bin/strip CACHE FILEPATH "Archiver") 17 | 18 | set(CMAKE_C_COMPILER_WORKS 1) 19 | set(CMAKE_CXX_COMPILER_WORKS 1) 20 | 21 | # pthreads 22 | set(CMAKE_THREAD_LIBS_INIT "-lpthread") 23 | set(CMAKE_HAVE_THREADS_LIBRARY 1) 24 | set(CMAKE_USE_WIN32_THREADS_INIT 0) 25 | set(CMAKE_USE_PTHREADS_INIT 1) 26 | 27 | # Compiler Features 28 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert") 29 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") 30 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") 31 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") 32 | 33 | set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17") 34 | set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") 35 | set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") 36 | set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") 37 | -------------------------------------------------------------------------------- /toolchain/tvossimulator.cmake: -------------------------------------------------------------------------------- 1 | set(CMAKE_INSTALL_PREFIX /opt/sysroot) 2 | set(CMAKE_SYSROOT /opt/sysroot) 3 | set(CMAKE_SYSTEM_NAME Darwin) 4 | set(CMAKE_SYSTEM_PROCESSOR x86_64) 5 | 6 | set(CMAKE_CXX_FLAGS "-stdlib=libc++" CACHE STRING "" FORCE) 7 | set(CMAKE_EXE_LINKER_FLAGS "-lc++abi -mtvos-version-min=9.2" CACHE STRING "" FORCE) 8 | set(CMAKE_SYSTEM_INCLUDE_PATH "/opt/sysroot/include" CACHE STRING "" FORCE) 9 | list(APPEND CMAKE_FIND_ROOT_PATH "/opt/sysroot") 10 | include_directories("/opt/sysroot/include") 11 | 12 | set(triple x86_64-apple-tvossimulator) 13 | 14 | set(CMAKE_C_COMPILER clang) 15 | set(CMAKE_C_COMPILER_TARGET ${triple}) 16 | set(CMAKE_CXX_COMPILER clang++) 17 | set(CMAKE_CXX_COMPILER_TARGET ${triple}) 18 | 19 | 20 | set(CMAKE_AR /usr/local/bin/ar CACHE FILEPATH "Archiver") 21 | set(CMAKE_STRIP /usr/local/bin/strip CACHE FILEPATH "Archiver") 22 | 23 | set(CMAKE_C_COMPILER_WORKS 1) 24 | set(CMAKE_CXX_COMPILER_WORKS 1) 25 | 26 | # pthreads 27 | set(CMAKE_THREAD_LIBS_INIT "-lpthread") 28 | set(CMAKE_HAVE_THREADS_LIBRARY 1) 29 | set(CMAKE_USE_WIN32_THREADS_INIT 0) 30 | set(CMAKE_USE_PTHREADS_INIT 1) 31 | 32 | # Compiler Features 33 | set(CMAKE_C_COMPILE_FEATURES "c_std_90;c_function_prototypes;c_std_99;c_restrict;c_variadic_macros;c_std_11;c_static_assert") 34 | set(CMAKE_C90_COMPILE_FEATURES "c_std_90;c_function_prototypes") 35 | set(CMAKE_C99_COMPILE_FEATURES "c_std_99;c_restrict;c_variadic_macros") 36 | set(CMAKE_C11_COMPILE_FEATURES "c_std_11;c_static_assert") 37 | 38 | set(CMAKE_CXX_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters;cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates;cxx_std_17") 39 | set(CMAKE_CXX98_COMPILE_FEATURES "cxx_std_98;cxx_template_template_parameters") 40 | set(CMAKE_CXX11_COMPILE_FEATURES "cxx_std_11;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") 41 | set(CMAKE_CXX14_COMPILE_FEATURES "cxx_std_14;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") 42 | -------------------------------------------------------------------------------- /toolchain/tvossimulator/Availability.h: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2007-2015 by Apple Inc.. All rights reserved. 3 | * 4 | * @APPLE_LICENSE_HEADER_START@ 5 | * 6 | * This file contains Original Code and/or Modifications of Original Code 7 | * as defined in and that are subject to the Apple Public Source License 8 | * Version 2.0 (the 'License'). You may not use this file except in 9 | * compliance with the License. Please obtain a copy of the License at 10 | * http://www.opensource.apple.com/apsl/ and read it before using this 11 | * file. 12 | * 13 | * The Original Code and all software distributed under the License are 14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 18 | * Please see the License for the specific language governing rights and 19 | * limitations under the License. 20 | * 21 | * @APPLE_LICENSE_HEADER_END@ 22 | */ 23 | 24 | #ifndef __AVAILABILITY__ 25 | #define __AVAILABILITY__ 26 | /* 27 | These macros are for use in OS header files. They enable function prototypes 28 | and Objective-C methods to be tagged with the OS version in which they 29 | were first available; and, if applicable, the OS version in which they 30 | became deprecated. 31 | 32 | The desktop Mac OS X and iOS each have different version numbers. 33 | The __OSX_AVAILABLE_STARTING() macro allows you to specify both the desktop 34 | and iOS version numbers. For instance: 35 | __OSX_AVAILABLE_STARTING(__MAC_10_2,__IPHONE_2_0) 36 | means the function/method was first available on Mac OS X 10.2 on the desktop 37 | and first available in iOS 2.0 on the iPhone. 38 | 39 | If a function is available on one platform, but not the other a _NA (not 40 | applicable) parameter is used. For instance: 41 | __OSX_AVAILABLE_STARTING(__MAC_10_3,__IPHONE_NA) 42 | means that the function/method was first available on Mac OS X 10.3, and it 43 | currently not implemented on the iPhone. 44 | 45 | At some point, a function/method may be deprecated. That means Apple 46 | recommends applications stop using the function, either because there is a 47 | better replacement or the functionality is being phased out. Deprecated 48 | functions/methods can be tagged with a __OSX_AVAILABLE_BUT_DEPRECATED() 49 | macro which specifies the OS version where the function became available 50 | as well as the OS version in which it became deprecated. For instance: 51 | __OSX_AVAILABLE_BUT_DEPRECATED(__MAC_10_0,__MAC_10_5,__IPHONE_NA,__IPHONE_NA) 52 | means that the function/method was introduced in Mac OS X 10.0, then 53 | became deprecated beginning in Mac OS X 10.5. On iOS the function 54 | has never been available. 55 | 56 | For these macros to function properly, a program must specify the OS version range 57 | it is targeting. The min OS version is specified as an option to the compiler: 58 | -mmacosx-version-min=10.x when building for Mac OS X, and -miphoneos-version-min=y.z 59 | when building for the iPhone. The upper bound for the OS version is rarely needed, 60 | but it can be set on the command line via: -D__MAC_OS_X_VERSION_MAX_ALLOWED=10x0 for 61 | Mac OS X and __IPHONE_OS_VERSION_MAX_ALLOWED = y0z00 for iOS. 62 | 63 | Examples: 64 | 65 | A function available in Mac OS X 10.5 and later, but not on the phone: 66 | 67 | extern void mymacfunc() __OSX_AVAILABLE_STARTING(__MAC_10_5,__IPHONE_NA); 68 | 69 | 70 | An Objective-C method in Mac OS X 10.5 and later, but not on the phone: 71 | 72 | @interface MyClass : NSObject 73 | -(void) mymacmethod __OSX_AVAILABLE_STARTING(__MAC_10_5,__IPHONE_NA); 74 | @end 75 | 76 | 77 | An enum available on the phone, but not available on Mac OS X: 78 | 79 | #if __IPHONE_OS_VERSION_MIN_REQUIRED 80 | enum { myEnum = 1 }; 81 | #endif 82 | Note: this works when targeting the Mac OS X platform because 83 | __IPHONE_OS_VERSION_MIN_REQUIRED is undefined which evaluates to zero. 84 | 85 | 86 | An enum with values added in different iPhoneOS versions: 87 | 88 | enum { 89 | myX = 1, // Usable on iPhoneOS 2.1 and later 90 | myY = 2, // Usable on iPhoneOS 3.0 and later 91 | myZ = 3, // Usable on iPhoneOS 3.0 and later 92 | ... 93 | Note: you do not want to use #if with enumeration values 94 | when a client needs to see all values at compile time 95 | and use runtime logic to only use the viable values. 96 | 97 | 98 | It is also possible to use the *_VERSION_MIN_REQUIRED in source code to make one 99 | source base that can be compiled to target a range of OS versions. It is best 100 | to not use the _MAC_* and __IPHONE_* macros for comparisons, but rather their values. 101 | That is because you might get compiled on an old OS that does not define a later 102 | OS version macro, and in the C preprocessor undefined values evaluate to zero 103 | in expresssions, which could cause the #if expression to evaluate in an unexpected 104 | way. 105 | 106 | #ifdef __MAC_OS_X_VERSION_MIN_REQUIRED 107 | // code only compiled when targeting Mac OS X and not iPhone 108 | // note use of 1050 instead of __MAC_10_5 109 | #if __MAC_OS_X_VERSION_MIN_REQUIRED < 1050 110 | // code in here might run on pre-Leopard OS 111 | #else 112 | // code here can assume Leopard or later 113 | #endif 114 | #endif 115 | 116 | 117 | */ 118 | 119 | #define __MAC_10_0 1000 120 | #define __MAC_10_1 1010 121 | #define __MAC_10_2 1020 122 | #define __MAC_10_3 1030 123 | #define __MAC_10_4 1040 124 | #define __MAC_10_5 1050 125 | #define __MAC_10_6 1060 126 | #define __MAC_10_7 1070 127 | #define __MAC_10_8 1080 128 | #define __MAC_10_9 1090 129 | #define __MAC_10_10 101000 130 | #define __MAC_10_10_2 101002 131 | #define __MAC_10_10_3 101003 132 | #define __MAC_10_11 101100 133 | #define __MAC_10_11_2 101102 134 | #define __MAC_10_11_3 101103 135 | #define __MAC_10_11_4 101104 136 | /* __MAC_NA is not defined to a value but is uses as a token by macros to indicate that the API is unavailable */ 137 | 138 | #define __IPHONE_2_0 20000 139 | #define __IPHONE_2_1 20100 140 | #define __IPHONE_2_2 20200 141 | #define __IPHONE_3_0 30000 142 | #define __IPHONE_3_1 30100 143 | #define __IPHONE_3_2 30200 144 | #define __IPHONE_4_0 40000 145 | #define __IPHONE_4_1 40100 146 | #define __IPHONE_4_2 40200 147 | #define __IPHONE_4_3 40300 148 | #define __IPHONE_5_0 50000 149 | #define __IPHONE_5_1 50100 150 | #define __IPHONE_6_0 60000 151 | #define __IPHONE_6_1 60100 152 | #define __IPHONE_7_0 70000 153 | #define __IPHONE_7_1 70100 154 | #define __IPHONE_8_0 80000 155 | #define __IPHONE_8_1 80100 156 | #define __IPHONE_8_2 80200 157 | #define __IPHONE_8_3 80300 158 | #define __IPHONE_8_4 80400 159 | #define __IPHONE_9_0 90000 160 | #define __IPHONE_9_1 90100 161 | #define __IPHONE_9_2 90200 162 | #define __IPHONE_9_3 90300 163 | /* __IPHONE_NA is not defined to a value but is uses as a token by macros to indicate that the API is unavailable */ 164 | 165 | #define __TVOS_9_0 90000 166 | #define __TVOS_9_1 90100 167 | #define __TVOS_9_2 90200 168 | 169 | #define __WATCHOS_1_0 10000 170 | #define __WATCHOS_2_0 20000 171 | 172 | #include 173 | 174 | #ifdef __IPHONE_OS_VERSION_MIN_REQUIRED 175 | #define __OSX_AVAILABLE_STARTING(_osx, _ios) __AVAILABILITY_INTERNAL##_ios 176 | #define __OSX_AVAILABLE_BUT_DEPRECATED(_osxIntro, _osxDep, _iosIntro, _iosDep) \ 177 | __AVAILABILITY_INTERNAL##_iosIntro##_DEP##_iosDep 178 | #define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(_osxIntro, _osxDep, _iosIntro, _iosDep, _msg) \ 179 | __AVAILABILITY_INTERNAL##_iosIntro##_DEP##_iosDep##_MSG(_msg) 180 | 181 | #elif defined(__MAC_OS_X_VERSION_MIN_REQUIRED) 182 | #define __OSX_AVAILABLE_STARTING(_osx, _ios) __AVAILABILITY_INTERNAL##_osx 183 | #define __OSX_AVAILABLE_BUT_DEPRECATED(_osxIntro, _osxDep, _iosIntro, _iosDep) \ 184 | __AVAILABILITY_INTERNAL##_osxIntro##_DEP##_osxDep 185 | #define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(_osxIntro, _osxDep, _iosIntro, _iosDep, _msg) \ 186 | __AVAILABILITY_INTERNAL##_osxIntro##_DEP##_osxDep##_MSG(_msg) 187 | 188 | #else 189 | #define __OSX_AVAILABLE_STARTING(_osx, _ios) 190 | #define __OSX_AVAILABLE_BUT_DEPRECATED(_osxIntro, _osxDep, _iosIntro, _iosDep) 191 | #define __OSX_AVAILABLE_BUT_DEPRECATED_MSG(_osxIntro, _osxDep, _iosIntro, _iosDep, _msg) 192 | #endif 193 | 194 | 195 | #if defined(__has_feature) 196 | #if __has_feature(attribute_availability_with_message) 197 | #define __OS_AVAILABILITY(_target, _availability) __attribute__((availability(_target,_availability))) 198 | #define __OS_AVAILABILITY_MSG(_target, _availability, _msg) __attribute__((availability(_target,_availability,message=_msg))) 199 | #else 200 | #define __OS_AVAILABILITY(_target, _availability) 201 | #define __OS_AVAILABILITY_MSG(_target, _availability, _msg) 202 | #endif 203 | #else 204 | #define __OS_AVAILABILITY(_target, _availability) 205 | #define __OS_AVAILABILITY_MSG(_target, _availability, _msg) 206 | #endif 207 | 208 | 209 | /* for use to document app extension usage */ 210 | #if defined(__has_feature) 211 | #if __has_feature(attribute_availability_app_extension) 212 | #define __OSX_EXTENSION_UNAVAILABLE(_msg) __OS_AVAILABILITY_MSG(macosx_app_extension,unavailable,_msg) 213 | #define __IOS_EXTENSION_UNAVAILABLE(_msg) __OS_AVAILABILITY_MSG(ios_app_extension,unavailable,_msg) 214 | #else 215 | #define __OSX_EXTENSION_UNAVAILABLE(_msg) 216 | #define __IOS_EXTENSION_UNAVAILABLE(_msg) 217 | #endif 218 | #else 219 | #define __OSX_EXTENSION_UNAVAILABLE(_msg) 220 | #define __IOS_EXTENSION_UNAVAILABLE(_msg) 221 | #endif 222 | 223 | #define __OS_EXTENSION_UNAVAILABLE(_msg) __OSX_EXTENSION_UNAVAILABLE(_msg) __IOS_EXTENSION_UNAVAILABLE(_msg) 224 | 225 | 226 | 227 | /* for use marking APIs available info for Mac OSX */ 228 | #if defined(__has_feature) 229 | #if __has_attribute(availability) 230 | #define __OSX_UNAVAILABLE __OS_AVAILABILITY(macosx,unavailable) 231 | #define __OSX_AVAILABLE(_vers) __OS_AVAILABILITY(macosx,introduced=_vers) 232 | #define __OSX_DEPRECATED(_start, _dep, _msg) __OSX_AVAILABLE(_start) __OS_AVAILABILITY_MSG(macosx,deprecated=_dep,_msg) 233 | #endif 234 | #endif 235 | 236 | #ifndef __OSX_UNAVAILABLE 237 | #define __OSX_UNAVAILABLE 238 | #endif 239 | 240 | #ifndef __OSX_AVAILABLE 241 | #define __OSX_AVAILABLE(_vers) 242 | #endif 243 | 244 | #ifndef __OSX_DEPRECATED 245 | #define __OSX_DEPRECATED(_start, _dep, _msg) 246 | #endif 247 | 248 | 249 | /* for use marking APIs available info for iOS */ 250 | #if defined(__has_feature) 251 | #if __has_attribute(availability) 252 | #define __IOS_UNAVAILABLE __OS_AVAILABILITY(ios,unavailable) 253 | #define __IOS_PROHIBITED __OS_AVAILABILITY(ios,unavailable) 254 | #define __IOS_AVAILABLE(_vers) __OS_AVAILABILITY(ios,introduced=_vers) 255 | #define __IOS_DEPRECATED(_start, _dep, _msg) __IOS_AVAILABLE(_start) __OS_AVAILABILITY_MSG(ios,deprecated=_dep,_msg) 256 | #endif 257 | #endif 258 | 259 | #ifndef __IOS_UNAVAILABLE 260 | #define __IOS_UNAVAILABLE 261 | #endif 262 | 263 | #ifndef __IOS_PROHIBITED 264 | #define __IOS_PROHIBITED 265 | #endif 266 | 267 | #ifndef __IOS_AVAILABLE 268 | #define __IOS_AVAILABLE(_vers) 269 | #endif 270 | 271 | #ifndef __IOS_DEPRECATED 272 | #define __IOS_DEPRECATED(_start, _dep, _msg) 273 | #endif 274 | 275 | 276 | /* for use marking APIs available info for tvOS */ 277 | #if defined(__has_feature) 278 | #if __has_feature(attribute_availability_tvos) 279 | #define __TVOS_UNAVAILABLE __OS_AVAILABILITY(tvos,unavailable) 280 | #define __TVOS_PROHIBITED __OS_AVAILABILITY(tvos,unavailable) 281 | #define __TVOS_AVAILABLE(_vers) __OS_AVAILABILITY(tvos,introduced=_vers) 282 | #define __TVOS_DEPRECATED(_start, _dep, _msg) __TVOS_AVAILABLE(_start) __OS_AVAILABILITY_MSG(tvos,deprecated=_dep,_msg) 283 | #endif 284 | #endif 285 | 286 | #ifndef __TVOS_UNAVAILABLE 287 | #define __TVOS_UNAVAILABLE 288 | #endif 289 | 290 | #ifndef __TVOS_PROHIBITED 291 | #define __TVOS_PROHIBITED 292 | #endif 293 | 294 | #ifndef __TVOS_AVAILABLE 295 | #define __TVOS_AVAILABLE(_vers) 296 | #endif 297 | 298 | #ifndef __TVOS_DEPRECATED 299 | #define __TVOS_DEPRECATED(_start, _dep, _msg) 300 | #endif 301 | 302 | 303 | /* for use marking APIs available info for Watch OS */ 304 | #if defined(__has_feature) 305 | #if __has_feature(attribute_availability_watchos) 306 | #define __WATCHOS_UNAVAILABLE __OS_AVAILABILITY(watchos,unavailable) 307 | #define __WATCHOS_PROHIBITED __OS_AVAILABILITY(watchos,unavailable) 308 | #define __WATCHOS_AVAILABLE(_vers) __OS_AVAILABILITY(watchos,introduced=_vers) 309 | #define __WATCHOS_DEPRECATED(_start, _dep, _msg) __WATCHOS_AVAILABLE(_start) __OS_AVAILABILITY_MSG(watchos,deprecated=_dep,_msg) 310 | #endif 311 | #endif 312 | 313 | #ifndef __WATCHOS_UNAVAILABLE 314 | #define __WATCHOS_UNAVAILABLE 315 | #endif 316 | 317 | #ifndef __WATCHOS_PROHIBITED 318 | #define __WATCHOS_PROHIBITED 319 | #endif 320 | 321 | #ifndef __WATCHOS_AVAILABLE 322 | #define __WATCHOS_AVAILABLE(_vers) 323 | #endif 324 | 325 | #ifndef __WATCHOS_DEPRECATED 326 | #define __WATCHOS_DEPRECATED(_start, _dep, _msg) 327 | #endif 328 | 329 | #if __has_include() 330 | #include 331 | #endif 332 | 333 | #endif /* __AVAILABILITY__ */ 334 | --------------------------------------------------------------------------------