├── .gitignore ├── LICENSE ├── README.md ├── RandomTeam_track1n2.zip ├── RandomTeam_track1n2 ├── Dockerfile ├── about.pdf ├── config │ └── custom_config.cfg └── sample_random_agent.py ├── _vizdoom.cfg ├── build.sh ├── cig2017.wad ├── cig2017_shaping.wad ├── f1 ├── Dockerfile └── F1_track1 │ ├── README │ ├── agent.py │ ├── config │ ├── my_custom_config.cfg │ └── my_custom_config_2.cfg │ ├── history.py │ ├── model.tfmodel │ ├── my_glorious_agent.py │ ├── run.sh │ └── tensorpack │ ├── README.md │ ├── RL │ ├── __init__.py │ ├── common.py │ ├── envbase.py │ ├── expreplay.py │ ├── gymenv.py │ ├── history.py │ └── simulator.py │ ├── __init__.py │ ├── callbacks │ ├── __init__.py │ ├── base.py │ ├── common.py │ ├── dump.py │ ├── graph.py │ ├── group.py │ ├── inference.py │ ├── param.py │ └── stat.py │ ├── dataflow │ ├── __init__.py │ ├── base.py │ ├── common.py │ ├── dataset │ │ ├── .gitignore │ │ ├── __init__.py │ │ ├── bsds500.py │ │ ├── cifar.py │ │ ├── ilsvrc.py │ │ ├── mnist.py │ │ ├── svhn.py │ │ └── visualqa.py │ ├── dftools.py │ ├── format.py │ ├── image.py │ ├── imgaug │ │ ├── __init__.py │ │ ├── _test.py │ │ ├── base.py │ │ ├── crop.py │ │ ├── deform.py │ │ ├── geometry.py │ │ ├── imgproc.py │ │ ├── meta.py │ │ ├── noise.py │ │ ├── noname.py │ │ └── paste.py │ ├── prefetch.py │ ├── raw.py │ ├── remote.py │ └── tf_func.py │ ├── models │ ├── __init__.py │ ├── _common.py │ ├── _test.py │ ├── batch_norm.py │ ├── conv2d.py │ ├── fc.py │ ├── image_sample.py │ ├── model_desc.py │ ├── nonlin.py │ ├── pool.py │ ├── regularize.py │ └── softmax.py │ ├── predict │ ├── __init__.py │ ├── base.py │ ├── common.py │ ├── concurrency.py │ └── dataset.py │ ├── tfutils │ ├── __init__.py │ ├── argscope.py │ ├── common.py │ ├── gradproc.py │ ├── modelutils.py │ ├── sessinit.py │ ├── summary.py │ ├── symbolic_functions.py │ └── varmanip.py │ ├── train │ ├── __init__.py │ ├── base.py │ ├── config.py │ ├── multigpu.py │ └── trainer.py │ └── utils │ ├── __init__.py │ ├── concurrency.py │ ├── discretize.py │ ├── fs.py │ ├── gpu.py │ ├── loadcaffe.py │ ├── logger.py │ ├── lut.py │ ├── naming.py │ ├── rect.py │ ├── serialize.py │ ├── stat.py │ ├── timer.py │ └── utils.py ├── host ├── Dockerfile ├── _vizdoom.ini └── host.py ├── intelact ├── Dockerfile └── IntelAct_track2 │ ├── agent │ ├── __init__.py │ ├── agent.py │ ├── doom_simulator.py │ └── ops.py │ ├── checkpoints │ ├── checkpoint │ ├── net │ └── net.meta │ ├── config │ └── config.cfg │ └── run_agent.py ├── no_host ├── Dockerfile ├── _nohost_vizdoom.cfg ├── config │ └── custom_config.cfg └── no_host.py ├── random ├── Dockerfile ├── config │ └── custom_config.cfg └── sample_random_agent.py ├── run.sh ├── run_i.sh └── wads ├── brit11.wad ├── cig2017.wad ├── exec.wad ├── greenwar.wad └── vex.wad /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | .idea 92 | doom2.wad 93 | random/about.pdf 94 | clyde 95 | 96 | submissions -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Michał Kempka 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # Sample and Historical submissions for [Visual Doom AI Competition 2017 at CIG2017](http://vizdoom.cs.put.edu.pl/competition-cig-2017) 3 | ![doom_logo](https://upload.wikimedia.org/wikipedia/it/d/dd/Logo_doom.png) 4 | >> Submissions require [Docker](https://www.docker.com/). All images except for the host require quite recent Nvidia drivers and [nvidia-docker](https://github.com/NVIDIA/nvidia-docker) to run CUDA. 5 | 6 | >>> GUI forwarding from Docker was tested only on a Linux host and it's not guaranteed to work properly on other systems at the moment. 7 | 8 | ## Quick Start 9 | ``` 10 | ./build.sh host 11 | ./build.sh random 12 | ./run.sh host -w 13 | ./run.sh random # in a different terminal 14 | ``` 15 | 16 | ## Building and Running the Images 17 | We have prepared two wrapper scripts which will [build](build.sh) and [run](run.sh) docker images with agents and the [host](host). 18 | 19 | >>> To run gui on X11 properly make sure that GID and UID are the same in Dockerfile as in your host. 20 | 21 | To build and launch given container run: 22 | ``` 23 | DIR=host # or any other directory with Dockerfile (e.g., random, f1, no_host, or intelact) 24 | 25 | # Build a docker image named cig2017_${DIR} 26 | ./build.sh ${DIR} 27 | 28 | # Run a docker image named cig2017_${DIR} 29 | ./run.sh ${DIR} 30 | ``` 31 | 32 | ### Provided Images 33 | > By default, the agents connect to **localhost** and have the GUI window disabled. To customize this behavior change **_vizdoom.cfg** file and rebuild the image. 34 | 35 | * [host](host) - the image will be used for the initialization of the game. All agents are supposed to connect to the host. By default, the host creates a 10-minutes deathmatch for 1 player on map01 with no bots. To change this behaviour use run.sh's optional parameters: 36 | 37 | ``` 38 | # Sample usage (a 12-minutes deathmatch for 8 players on map 1): 39 | ./run.sh host -p 8 -t 12 -m 1 40 | 41 | usage: Host script for ViZDoom Copmetition at CIG 2017. [-h] [-b BOTS_NUM] 42 | [-p PLAYERS_NUM] 43 | [-m MAP] 44 | [-t TIMELIMIT] [-c] 45 | [-w] 46 | 47 | optional arguments: 48 | -h, --help show this help message and exit 49 | -b BOTS_NUM, --bots BOTS_NUM 50 | number of bots to add [0,15] (default: 0) 51 | -p PLAYERS_NUM, --players PLAYERS_NUM 52 | number of players [1,16] (default: 1) 53 | -m MAP, --map MAP map number [1,5] (default: 1) 54 | -t TIMELIMIT, --time TIMELIMIT 55 | timelimit in minutes [1,999] (default: 10) 56 | -c, --console enable console (default: False) 57 | -w, --watch roam the map as a ghost spectator (default: False) 58 | ``` 59 | 60 | * [random](random) - a random agent which connects to the host and does not do anything smart. By changing the mode to ASYNC_SPECTATOR and enabling window visibility in **_vizdoom.cfg** you can replace the random. 61 | * [no_host](no_host) - random agent which does **NOT** connect to the host - it hosts a game for itself and can add built-in bots. This image will not be used by us but may be useful for training the agents. It runs faster since it is synchronized (mode=PLAYER) 62 | * [f1](f1) - the winner entry of the ViZDoom Competition 2016 Limitted Deathmatch by **Yuxin Wu** and **Yuandong Tian**, 63 | * [intelact](intelact) - the winner entry of the ViZDoom Competition 2016 Full Deathmatch (track 2) by **Alexey Dosovitskiy** and **Vladlen Koltun**. 64 | 65 | ### External files 66 | Some entries were too big to be shared on github, so we provide them externally: 67 | * [clyde](http://www.cs.put.poznan.pl/mkempka/misc/vdaic2016_agents/clyde.zip) - the 3rd place of the ViZDoom Competition 2016 Limitted Deathmatch by **Dino Ratcliffe** 68 | 69 | * [2017 submissions](http://www.cs.put.poznan.pl/mkempka/misc/vdaic2017_agents/) - best submissions of Visual Doom AI Competitions **2017** (both tracks) 70 | -------------------------------------------------------------------------------- /RandomTeam_track1n2.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/RandomTeam_track1n2.zip -------------------------------------------------------------------------------- /RandomTeam_track1n2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Cuda 7.5 with cudnn 5 4 | #FROM nvidia/cuda:7.5-cudnn5-devel 5 | # Cuda 8 with cudnn 5 6 | FROM nvidia/cuda:8.0-cudnn5-devel 7 | 8 | # ViZdoom dependencies 9 | RUN apt-get update && apt-get install -y \ 10 | build-essential \ 11 | bzip2 \ 12 | cmake \ 13 | curl \ 14 | git \ 15 | libboost-all-dev \ 16 | libbz2-dev \ 17 | libfluidsynth-dev \ 18 | libfreetype6-dev \ 19 | libgme-dev \ 20 | libgtk2.0-dev \ 21 | libjpeg-dev \ 22 | libopenal-dev \ 23 | libpng12-dev \ 24 | libsdl2-dev \ 25 | libwildmidi-dev \ 26 | libzmq3-dev \ 27 | nano \ 28 | nasm \ 29 | pkg-config \ 30 | rsync \ 31 | software-properties-common \ 32 | sudo \ 33 | tar \ 34 | timidity \ 35 | unzip \ 36 | wget \ 37 | zlib1g-dev \ 38 | python3-dev \ 39 | python3 \ 40 | python3-pip 41 | 42 | 43 | 44 | # Python with pip 45 | #RUN apt-get install -y python-dev python python-pip 46 | #RUN pip install pip --upgrade 47 | 48 | # Python3 with pip3 49 | RUN pip3 install pip --upgrade 50 | 51 | 52 | 53 | # Vizdoom and other pip packages if needed 54 | #RUN pip --no-cache-dir install \ 55 | # git+https://github.com/mwydmuch/ViZDoom \ 56 | # numpy \ 57 | #RUN pip --no-cache-dir install \ 58 | # https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl 59 | 60 | 61 | # Vizdoom and other pip3 packages if needed 62 | RUN pip3 --no-cache-dir install \ 63 | git+https://github.com/mwydmuch/ViZDoom \ 64 | opencv-python 65 | 66 | RUN pip3 --no-cache-dir install \ 67 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl 68 | 69 | 70 | # Enables X11 sharing and creates user home directory 71 | ENV USER_NAME cig2017 72 | ENV HOME_DIR /home/$USER_NAME 73 | 74 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 75 | ENV HOST_UID 1000 76 | ENV HOST_GID 1000 77 | 78 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 79 | mkdir -p ${HOME_DIR} && \ 80 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 81 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 82 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 83 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 84 | chown ${uid}:${gid} -R ${HOME_DIR} 85 | 86 | USER ${USER_NAME} 87 | WORKDIR ${HOME_DIR} 88 | 89 | 90 | # Copy agent files inside Docker image: 91 | COPY config config 92 | COPY sample_random_agent.py . 93 | 94 | 95 | ### Do not change this ### 96 | COPY cig2017.wad . 97 | COPY _vizdoom.cfg . 98 | ########################## 99 | # Uncomment to use doom2.wad: 100 | #COPY doom2.wad /usr/local/lib/python3.5/dist-packages/vizdoom 101 | 102 | CMD ./sample_random_agent.py 103 | -------------------------------------------------------------------------------- /RandomTeam_track1n2/about.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/RandomTeam_track1n2/about.pdf -------------------------------------------------------------------------------- /RandomTeam_track1n2/config/custom_config.cfg: -------------------------------------------------------------------------------- 1 | # Change if needed 2 | screen_resolution = RES_640X480 3 | screen_format = CRCGCB 4 | render_hud = true 5 | render_crosshair = true 6 | render_weapon = true 7 | render_decals = false 8 | render_particles = false 9 | 10 | # Add more if needed 11 | available_buttons = 12 | { 13 | TURN_LEFT 14 | TURN_RIGHT 15 | ATTACK 16 | 17 | MOVE_RIGHT 18 | MOVE_LEFT 19 | 20 | MOVE_FORWARD 21 | MOVE_BACKWARD 22 | TURN_LEFT_RIGHT_DELTA 23 | LOOK_UP_DOWN_DELTA 24 | } 25 | 26 | 27 | # Add more if needed 28 | available_game_variables = 29 | { 30 | HEALTH 31 | } -------------------------------------------------------------------------------- /RandomTeam_track1n2/sample_random_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import vizdoom as vzd 4 | from random import choice 5 | 6 | game = vzd.DoomGame() 7 | game.load_config("config/custom_config.cfg") 8 | 9 | # Name your agent and select color 10 | # colors: 0 - green, 1 - gray, 2 - brown, 3 - red, 4 - light gray, 5 - light brown, 6 - light red, 7 - light blue 11 | name = "SampleRandomAgent" 12 | color = 0 13 | game.add_game_args("+name {} +colorset {}".format(name, color)) 14 | game.init() 15 | 16 | # Three sample actions: turn left/right and shoot 17 | actions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] 18 | 19 | # Play until the game (episode) is over. 20 | while not game.is_episode_finished(): 21 | 22 | if game.is_player_dead(): 23 | # Use this to respawn immediately after death, new state will be available. 24 | game.respawn_player() 25 | 26 | # Or observe the game until automatic respawn. 27 | # game.advance_action(); 28 | # continue; 29 | 30 | # Analyze the state ... or not 31 | s = game.get_state() 32 | 33 | # Make your action. 34 | game.make_action(choice(actions)) 35 | 36 | # Log your frags every ~5 seconds 37 | if s.number % 175 == 0: 38 | print("Frags:", game.get_game_variable(vzd.GameVariable.FRAGCOUNT)) 39 | 40 | game.close() 41 | -------------------------------------------------------------------------------- /_vizdoom.cfg: -------------------------------------------------------------------------------- 1 | doom_scenario_path = cig2017.wad 2 | window_visible = False 3 | mode = ASYNC_PLAYER 4 | game_args += -join localhost -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ERROR_PREFIX="ERROR:" 4 | DIRECTORY=$1 5 | REPO_ROOT=`pwd` 6 | 7 | # TODO add usage and error that no arguments specified 8 | if [ ! -d "$DIRECTORY" ]; then 9 | echo "${ERROR_PREFIX} Directory '${DIRECTORY}' doesn't exist. Aborting'" >&2 10 | exit 1 11 | fi 12 | 13 | echo "Entering directory: ${DIRECTORY}" 14 | cd $DIRECTORY 15 | 16 | if [ ! -f Dockerfile ]; then 17 | echo "${ERROR_PREFIX} No Dockerfile found. Aborting." >&2 18 | exit 2 19 | fi 20 | 21 | image_tag="cig2017_`basename $DIRECTORY`" 22 | container_name=${image_tag} 23 | 24 | if [ ! -f ${REPO_ROOT}/cig2017.wad ]; then 25 | echo "${ERROR_PREFIX} cig2017.wad not found. Aborting." >&2 26 | exit 3 27 | fi 28 | if [ ! -f ${REPO_ROOT}/_vizdoom.cfg ]; then 29 | echo "${ERROR_PREFIX} _vizdoom.cfg not found. Aborting." >&2 30 | exit 4 31 | fi 32 | 33 | echo ${REPO_ROOT} 34 | cp ${REPO_ROOT}/cig2017.wad . 35 | cp ${REPO_ROOT}/_vizdoom.cfg . 36 | if [ -f ${REPO_ROOT}/doom2.wad ]; then 37 | cp ${REPO_ROOT}/doom2.wad . 38 | fi 39 | 40 | docker build -t ${image_tag} . 41 | rm -f cig2017.wad _vizdoom.cfg doom2.wad 42 | 43 | -------------------------------------------------------------------------------- /cig2017.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/cig2017.wad -------------------------------------------------------------------------------- /cig2017_shaping.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/cig2017_shaping.wad -------------------------------------------------------------------------------- /f1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Cuda 7.5 with cudnn 4.0.7 4 | FROM nvidia/cuda:7.5-devel 5 | 6 | RUN echo "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list 7 | ENV CUDNN_VERSION 4 8 | RUN apt-get update && apt-get install -y --no-install-recommends \ 9 | libcudnn4=4.0.7 libcudnn4-dev=4.0.7 10 | 11 | RUN apt-get update && apt-get install -y \ 12 | build-essential \ 13 | bzip2 \ 14 | cmake \ 15 | curl \ 16 | git \ 17 | libboost-all-dev \ 18 | libbz2-dev \ 19 | libfluidsynth-dev \ 20 | libfreetype6-dev \ 21 | libgme-dev \ 22 | libgtk2.0-dev \ 23 | libjpeg-dev \ 24 | libopenal-dev \ 25 | libpng12-dev \ 26 | libsdl2-dev \ 27 | libwildmidi-dev \ 28 | libzmq3-dev \ 29 | nano \ 30 | nasm \ 31 | pkg-config \ 32 | rsync \ 33 | software-properties-common \ 34 | sudo \ 35 | tar \ 36 | timidity \ 37 | unzip \ 38 | wget \ 39 | zlib1g-dev 40 | 41 | RUN apt-get update && apt-get install -y dbus 42 | 43 | # Python with pip 44 | RUN apt-get update && apt-get install -y python-dev python python-pip 45 | RUN pip install pip --upgrade 46 | 47 | # Vizdoom and other pip packages if needed 48 | RUN pip --no-cache-dir install \ 49 | git+https://github.com/mwydmuch/ViZDoom 50 | 51 | RUN pip --no-cache-dir install \ 52 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl 53 | RUN pip --no-cache-dir install opencv-python termcolor tqdm subprocess32 msgpack-python msgpack-numpy 54 | 55 | 56 | # Enables X11 sharing and creates user home directory 57 | ENV USER_NAME cig2017 58 | ENV HOME_DIR /home/$USER_NAME 59 | 60 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 61 | ENV HOST_UID 1000 62 | ENV HOST_GID 1000 63 | 64 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 65 | mkdir -p ${HOME_DIR} && \ 66 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 67 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 68 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 69 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 70 | chown ${uid}:${gid} -R ${HOME_DIR} 71 | 72 | USER ${USER_NAME} 73 | WORKDIR ${HOME_DIR} 74 | 75 | 76 | # Copy agent files inside Docker image: 77 | COPY F1_track1 . 78 | 79 | 80 | ### Do not change this ### 81 | COPY cig2017.wad . 82 | COPY _vizdoom.cfg . 83 | ########################## 84 | 85 | CMD python my_glorious_agent.py 86 | -------------------------------------------------------------------------------- /f1/F1_track1/README: -------------------------------------------------------------------------------- 1 | Team name: F1 2 | 3 | System: Ubuntu 4 | Language: Python2 5 | 6 | ################################################################## 7 | # Dependencies: 8 | 9 | Python packages: 10 | - Tensorflow 0.9.0 11 | - numpy >= 1.11.0 12 | - opencv >= 2 13 | - termcolor 14 | - tqdm 15 | - msgpack-python 16 | - msgpack-numpy 17 | - subprocess32 18 | 19 | 20 | ################################################################## 21 | # Additional Information # 22 | 23 | This directory contains the original submission working under early version of TensorFlow + tensorpack. 24 | You can remove the `tensorpack` directory and apply [this patch](https://gist.github.com/ppwwyyxx/62d5723dea411a515ee1a52b1a87a637) to run under latest version of TesnroFlow + tensorpack. 25 | -------------------------------------------------------------------------------- /f1/F1_track1/config/my_custom_config.cfg: -------------------------------------------------------------------------------- 1 | screen_format = CRCGCB 2 | screen_resolution = RES_512X384 3 | render_hud = false 4 | render_crosshair = true 5 | render_weapon = true 6 | render_decals = false 7 | render_particles = false 8 | 9 | # Add more if needed 10 | available_buttons = 11 | { 12 | TURN_LEFT 13 | TURN_RIGHT 14 | ATTACK 15 | MOVE_FORWARD_BACKWARD_DELTA 16 | MOVE_RIGHT 17 | MOVE_LEFT 18 | TURN_LEFT_RIGHT_DELTA 19 | TURN180 20 | } 21 | 22 | # Add more if needed 23 | available_game_variables = 24 | { 25 | HEALTH 26 | AMMO5 27 | FRAGCOUNT 28 | } 29 | -------------------------------------------------------------------------------- /f1/F1_track1/config/my_custom_config_2.cfg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/f1/F1_track1/config/my_custom_config_2.cfg -------------------------------------------------------------------------------- /f1/F1_track1/history.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: history.py 4 | # Author: Yuxin Wu 5 | 6 | import numpy as np 7 | from tensorpack.RL import HistoryFramePlayer 8 | 9 | __all__ = ['HistoryPlayerWithVar'] 10 | 11 | class HistoryPlayerWithVar(HistoryFramePlayer): 12 | def current_state(self): 13 | assert len(self.history) != 0 14 | assert len(self.history[0]) == 2, "state needs to be like [img, vars]" 15 | diff_len = self.history.maxlen - len(self.history) 16 | zeros = [np.zeros_like(self.history[0][0]) for k in range(diff_len)] 17 | for k in self.history: 18 | zeros.append(k[0]) 19 | img = np.concatenate(zeros, axis=2) 20 | gvar = self.history[-1][1] 21 | return img, gvar 22 | 23 | -------------------------------------------------------------------------------- /f1/F1_track1/model.tfmodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/f1/F1_track1/model.tfmodel -------------------------------------------------------------------------------- /f1/F1_track1/my_glorious_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | from vizdoom import * 5 | from agent import Runner 6 | import time 7 | 8 | game = DoomGame() 9 | game.load_config("config/my_custom_config.cfg") 10 | 11 | # Name your agent and select color 12 | # colors: 0 - green, 1 - gray, 2 - brown, 3 - red, 4 - light gray, 5 - light brown, 6 - light red, 7 - light blue 13 | game.add_game_args("+name F1 +colorset 2") 14 | game.init() 15 | 16 | print("F1 joined the party!") 17 | 18 | 19 | runner = Runner(game) 20 | 21 | 22 | # Play until the game (episode) is over. 23 | while not game.is_episode_finished(): 24 | if game.is_player_dead(): 25 | # Use this to respawn immediately after death, new state will be available. 26 | game.respawn_player() 27 | # Or observe the game until automatic respawn. 28 | #game.advance_action(); 29 | #continue; 30 | 31 | runner.step() 32 | 33 | 34 | 35 | game.close() 36 | -------------------------------------------------------------------------------- /f1/F1_track1/run.sh: -------------------------------------------------------------------------------- 1 | python2 my_glorious_agent.py 2 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Tensorpack High-Level Tutorial 3 | 4 | ### How to feed data 5 | Define a `DataFlow` instance to feed data. 6 | See [Dataflow documentation](https://github.com/ppwwyyxx/tensorpack/tree/master/tensorpack/dataflow) 7 | 8 | ### How to define a model 9 | 10 | Take a look at [mnist example](https://github.com/ppwwyyxx/tensorpack/blob/master/example_mnist.py) first. 11 | 12 | ### How to perform training 13 | 14 | 15 | ### How to perform inference 16 | 17 | 18 | ### How to add new models 19 | 20 | ### Use tensorboard summary 21 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/RL/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import importlib 7 | import os 8 | import os.path 9 | 10 | def _global_import(name): 11 | p = __import__(name, globals(), locals(), level=1) 12 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 13 | del globals()[name] 14 | for k in lst: 15 | globals()[k] = p.__dict__[k] 16 | 17 | for _, module_name, _ in walk_packages( 18 | [os.path.dirname(__file__)]): 19 | if not module_name.startswith('_'): 20 | _global_import(module_name) 21 | 22 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/RL/common.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: common.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | import numpy as np 8 | from collections import deque 9 | from .envbase import ProxyPlayer 10 | 11 | __all__ = ['PreventStuckPlayer', 'LimitLengthPlayer', 'AutoRestartPlayer', 12 | 'MapPlayerState'] 13 | 14 | class PreventStuckPlayer(ProxyPlayer): 15 | """ Prevent the player from getting stuck (repeating a no-op) 16 | by inserting a different action. Useful in games such as Atari Breakout 17 | where the agent needs to press the 'start' button to start playing. 18 | """ 19 | # TODO hash the state as well? 20 | def __init__(self, player, nr_repeat, action): 21 | """ 22 | :param nr_repeat: trigger the 'action' after this many of repeated action 23 | :param action: the action to be triggered to get out of stuck 24 | Does auto-reset, but doesn't auto-restart the underlying player. 25 | """ 26 | super(PreventStuckPlayer, self).__init__(player) 27 | self.act_que = deque(maxlen=nr_repeat) 28 | self.trigger_action = action 29 | 30 | def action(self, act): 31 | self.act_que.append(act) 32 | if self.act_que.count(self.act_que[0]) == self.act_que.maxlen: 33 | act = self.trigger_action 34 | r, isOver = self.player.action(act) 35 | if isOver: 36 | self.act_que.clear() 37 | return (r, isOver) 38 | 39 | def restart_episode(self): 40 | super(PreventStuckPlayer, self).restart_episode() 41 | self.act_que.clear() 42 | 43 | class LimitLengthPlayer(ProxyPlayer): 44 | """ Limit the total number of actions in an episode. 45 | Will auto restart the underlying player on timeout 46 | """ 47 | def __init__(self, player, limit): 48 | super(LimitLengthPlayer, self).__init__(player) 49 | self.limit = limit 50 | self.cnt = 0 51 | 52 | def action(self, act): 53 | r, isOver = self.player.action(act) 54 | self.cnt += 1 55 | if self.cnt >= self.limit: 56 | isOver = True 57 | self.finish_episode() 58 | self.restart_episode() 59 | if isOver: 60 | self.cnt = 0 61 | return (r, isOver) 62 | 63 | def restart_episode(self): 64 | self.player.restart_episode() 65 | self.cnt = 0 66 | 67 | class AutoRestartPlayer(ProxyPlayer): 68 | """ Auto-restart the player on episode ends, 69 | in case some player wasn't designed to do so. """ 70 | def action(self, act): 71 | r, isOver = self.player.action(act) 72 | if isOver: 73 | self.player.finish_episode() 74 | self.player.restart_episode() 75 | return r, isOver 76 | 77 | class MapPlayerState(ProxyPlayer): 78 | def __init__(self, player, func): 79 | super(MapPlayerState, self).__init__(player) 80 | self.func = func 81 | 82 | def current_state(self): 83 | return self.func(self.player.current_state()) 84 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/RL/envbase.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: envbase.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | from abc import abstractmethod, ABCMeta 8 | from collections import defaultdict 9 | import random 10 | from ..utils import get_rng 11 | 12 | __all__ = ['RLEnvironment', 'NaiveRLEnvironment', 'ProxyPlayer', 13 | 'DiscreteActionSpace'] 14 | 15 | class RLEnvironment(object): 16 | __meta__ = ABCMeta 17 | 18 | def __init__(self): 19 | self.reset_stat() 20 | 21 | @abstractmethod 22 | def current_state(self): 23 | """ 24 | Observe, return a state representation 25 | """ 26 | 27 | @abstractmethod 28 | def action(self, act): 29 | """ 30 | Perform an action. Will automatically start a new episode if isOver==True 31 | :param act: the action 32 | :returns: (reward, isOver) 33 | """ 34 | 35 | def restart_episode(self): 36 | """ Start a new episode, even if the current hasn't ended """ 37 | raise NotImplementedError() 38 | 39 | def finish_episode(self): 40 | """ get called when an episode finished""" 41 | pass 42 | 43 | def get_action_space(self): 44 | """ return an `ActionSpace` instance""" 45 | raise NotImplementedError() 46 | 47 | def reset_stat(self): 48 | """ reset all statistics counter""" 49 | self.stats = defaultdict(list) 50 | 51 | def play_one_episode(self, func, stat='score'): 52 | """ play one episode for eval. 53 | :param func: call with the state and return an action 54 | :param stat: a key or list of keys in stats 55 | :returns: the stat(s) after running this episode 56 | """ 57 | if not isinstance(stat, list): 58 | stat = [stat] 59 | while True: 60 | s = self.current_state() 61 | act = func(s) 62 | r, isOver = self.action(act) 63 | #print r 64 | if isOver: 65 | s = [self.stats[k] for k in stat] 66 | self.reset_stat() 67 | return s if len(s) > 1 else s[0] 68 | 69 | class ActionSpace(object): 70 | def __init__(self): 71 | self.rng = get_rng(self) 72 | 73 | @abstractmethod 74 | def sample(self): 75 | pass 76 | 77 | def num_actions(self): 78 | raise NotImplementedError() 79 | 80 | class DiscreteActionSpace(ActionSpace): 81 | def __init__(self, num): 82 | super(DiscreteActionSpace, self).__init__() 83 | self.num = num 84 | 85 | def sample(self): 86 | return self.rng.randint(self.num) 87 | 88 | def num_actions(self): 89 | return self.num 90 | 91 | def __repr__(self): 92 | return "DiscreteActionSpace({})".format(self.num) 93 | 94 | def __str__(self): 95 | return "DiscreteActionSpace({})".format(self.num) 96 | 97 | class NaiveRLEnvironment(RLEnvironment): 98 | """ for testing only""" 99 | def __init__(self): 100 | self.k = 0 101 | def current_state(self): 102 | self.k += 1 103 | return self.k 104 | def action(self, act): 105 | self.k = act 106 | return (self.k, self.k > 10) 107 | 108 | class ProxyPlayer(RLEnvironment): 109 | """ Serve as a proxy another player """ 110 | def __init__(self, player): 111 | self.player = player 112 | 113 | def reset_stat(self): 114 | self.player.reset_stat() 115 | 116 | def current_state(self): 117 | return self.player.current_state() 118 | 119 | def action(self, act): 120 | return self.player.action(act) 121 | 122 | @property 123 | def stats(self): 124 | return self.player.stats 125 | 126 | def restart_episode(self): 127 | self.player.restart_episode() 128 | 129 | def finish_episode(self): 130 | self.player.finish_episode() 131 | 132 | def get_action_space(self): 133 | return self.player.get_action_space() 134 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/RL/gymenv.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: gymenv.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | import time 8 | from ..utils import logger 9 | try: 10 | import gym 11 | except ImportError: 12 | logger.warn("Cannot import gym. GymEnv won't be available.") 13 | 14 | import threading 15 | 16 | from ..utils.fs import * 17 | from ..utils.stat import * 18 | from .envbase import RLEnvironment, DiscreteActionSpace 19 | 20 | __all__ = ['GymEnv'] 21 | 22 | _ALE_LOCK = threading.Lock() 23 | 24 | class GymEnv(RLEnvironment): 25 | """ 26 | An OpenAI/gym wrapper. Will auto restart. 27 | """ 28 | def __init__(self, name, dumpdir=None, viz=False, auto_restart=True): 29 | with _ALE_LOCK: 30 | self.gymenv = gym.make(name) 31 | if dumpdir: 32 | mkdir_p(dumpdir) 33 | self.gymenv.monitor.start(dumpdir) 34 | 35 | self.reset_stat() 36 | self.rwd_counter = StatCounter() 37 | self.restart_episode() 38 | self.auto_restart = auto_restart 39 | self.viz = viz 40 | 41 | def restart_episode(self): 42 | self.rwd_counter.reset() 43 | self._ob = self.gymenv.reset() 44 | 45 | def finish_episode(self): 46 | self.stats['score'].append(self.rwd_counter.sum) 47 | 48 | def current_state(self): 49 | if self.viz: 50 | self.gymenv.render() 51 | time.sleep(self.viz) 52 | return self._ob 53 | 54 | def action(self, act): 55 | self._ob, r, isOver, info = self.gymenv.step(act) 56 | self.rwd_counter.feed(r) 57 | if isOver: 58 | self.finish_episode() 59 | if self.auto_restart: 60 | self.restart_episode() 61 | return r, isOver 62 | 63 | def get_action_space(self): 64 | spc = self.gymenv.action_space 65 | assert isinstance(spc, gym.spaces.discrete.Discrete) 66 | return DiscreteActionSpace(spc.n) 67 | 68 | if __name__ == '__main__': 69 | env = GymEnv('Breakout-v0', viz=0.1) 70 | num = env.get_action_space().num_actions() 71 | 72 | from ..utils import * 73 | rng = get_rng(num) 74 | while True: 75 | act = rng.choice(range(num)) 76 | #print act 77 | r, o = env.action(act) 78 | env.current_state() 79 | if r != 0 or o: 80 | print r, o 81 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/RL/history.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: history.py 4 | # Author: Yuxin Wu 5 | 6 | import numpy as np 7 | from collections import deque 8 | from .envbase import ProxyPlayer 9 | 10 | __all__ = ['HistoryFramePlayer'] 11 | 12 | class HistoryFramePlayer(ProxyPlayer): 13 | """ Include history frames in state, or use black images 14 | Assume player will do auto-restart. 15 | """ 16 | def __init__(self, player, hist_len): 17 | """ 18 | :param hist_len: total length of the state, including the current 19 | and `hist_len-1` history 20 | """ 21 | super(HistoryFramePlayer, self).__init__(player) 22 | self.history = deque(maxlen=hist_len) 23 | 24 | s = self.player.current_state() 25 | self.history.append(s) 26 | 27 | def current_state(self): 28 | assert len(self.history) != 0 29 | diff_len = self.history.maxlen - len(self.history) 30 | if diff_len == 0: 31 | return np.concatenate(self.history, axis=2) 32 | zeros = [np.zeros_like(self.history[0]) for k in range(diff_len)] 33 | for k in self.history: 34 | zeros.append(k) 35 | assert len(zeros) == self.history.maxlen 36 | return np.concatenate(zeros, axis=2) 37 | 38 | def action(self, act): 39 | r, isOver = self.player.action(act) 40 | s = self.player.current_state() 41 | self.history.append(s) 42 | 43 | if isOver: # s would be a new episode 44 | self.history.clear() 45 | self.history.append(s) 46 | return (r, isOver) 47 | 48 | def restart_episode(self): 49 | super(HistoryFramePlayer, self).restart_episode() 50 | self.history.clear() 51 | self.history.append(self.player.current_state()) 52 | 53 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | import numpy # avoid https://github.com/tensorflow/tensorflow/issues/2034 6 | import cv2 # avoid https://github.com/tensorflow/tensorflow/issues/1924 7 | 8 | from . import models 9 | from . import train 10 | from . import utils 11 | from . import tfutils 12 | from . import callbacks 13 | from . import dataflow 14 | 15 | from .train import * 16 | from .models import * 17 | from .utils import * 18 | from .tfutils import * 19 | from .callbacks import * 20 | from .dataflow import * 21 | from .predict import * 22 | 23 | if int(numpy.__version__.split('.')[1]) < 9: 24 | logger.warn("Numpy < 1.9 could be extremely slow on some tasks.") 25 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | 8 | def _global_import(name): 9 | p = __import__(name, globals(), locals(), level=1) 10 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 11 | del globals()[name] 12 | for k in lst: 13 | globals()[k] = p.__dict__[k] 14 | 15 | for _, module_name, _ in walk_packages( 16 | [os.path.dirname(__file__)]): 17 | if not module_name.startswith('_'): 18 | _global_import(module_name) 19 | 20 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: base.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | import sys 7 | import os 8 | import time 9 | from abc import abstractmethod, ABCMeta 10 | 11 | from ..utils import * 12 | 13 | __all__ = ['Callback', 'PeriodicCallback'] 14 | 15 | class Callback(object): 16 | """ Base class for all callbacks """ 17 | __metaclass__ = ABCMeta 18 | 19 | def before_train(self): 20 | """ 21 | Called right before the first iteration. 22 | """ 23 | self._before_train() 24 | 25 | def _before_train(self): 26 | pass 27 | 28 | def setup_graph(self, trainer): 29 | """ 30 | Called before finalizing the graph. 31 | Use this callback to setup some ops used in the callback. 32 | 33 | :param trainer: a :class:`train.Trainer` instance 34 | """ 35 | self.trainer = trainer 36 | self.graph = tf.get_default_graph() 37 | self.epoch_num = self.trainer.config.starting_epoch - 1 38 | # self.epoch_num is always the number of epochs that finished updating parameters. 39 | with tf.name_scope(type(self).__name__): 40 | self._setup_graph() 41 | 42 | def _setup_graph(self): 43 | pass 44 | 45 | def after_train(self): 46 | """ 47 | Called after training. 48 | """ 49 | self._after_train() 50 | 51 | def _after_train(self): 52 | pass 53 | 54 | def trigger_step(self): 55 | """ 56 | Callback to be triggered after every step (every backpropagation) 57 | 58 | Could be useful to apply some tricks on parameters (clipping, low-rank, etc) 59 | """ 60 | 61 | @property 62 | def global_step(self): 63 | """ 64 | Access the global step value of this training. 65 | """ 66 | return self.trainer.global_step 67 | 68 | def trigger_epoch(self): 69 | """ 70 | Triggered after every epoch. 71 | 72 | In this function, self.epoch_num would be the number of epoch finished. 73 | """ 74 | self.epoch_num += 1 75 | self._trigger_epoch() 76 | 77 | def _trigger_epoch(self): 78 | pass 79 | 80 | def __str__(self): 81 | return type(self).__name__ 82 | 83 | class ProxyCallback(Callback): 84 | def __init__(self, cb): 85 | self.cb = cb 86 | 87 | def _before_train(self): 88 | self.cb.before_train() 89 | 90 | def _setup_graph(self): 91 | self.cb.setup_graph(self.trainer) 92 | 93 | def _after_train(self): 94 | self.cb.after_train() 95 | 96 | def _trigger_epoch(self): 97 | self.cb.trigger_epoch() 98 | 99 | def __str__(self): 100 | return str(self.cb) 101 | 102 | class PeriodicCallback(ProxyCallback): 103 | """ 104 | A callback to be triggered after every `period` epochs. 105 | Doesn't work for trigger_step 106 | """ 107 | def __init__(self, cb, period): 108 | """ 109 | :param cb: a `Callback` 110 | :param period: int 111 | """ 112 | super(PeriodicCallback, self).__init__(cb) 113 | self.period = int(period) 114 | 115 | def _trigger_epoch(self): 116 | if self.epoch_num % self.period == 0: 117 | self.cb.epoch_num = self.epoch_num - 1 118 | self.cb.trigger_epoch() 119 | 120 | def __str__(self): 121 | return "Periodic-" + str(self.cb) 122 | 123 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/common.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: common.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | import os, shutil 7 | import re 8 | 9 | from .base import Callback 10 | from ..utils import * 11 | from ..tfutils.varmanip import get_savename_from_varname 12 | 13 | __all__ = ['ModelSaver', 'MinSaver', 'MaxSaver'] 14 | 15 | class ModelSaver(Callback): 16 | """ 17 | Save the model to logger directory. 18 | """ 19 | def __init__(self, keep_recent=10, keep_freq=0.5, 20 | var_collections=tf.GraphKeys.VARIABLES): 21 | """ 22 | :param keep_recent: see `tf.train.Saver` documentation. 23 | :param keep_freq: see `tf.train.Saver` documentation. 24 | """ 25 | self.keep_recent = keep_recent 26 | self.keep_freq = keep_freq 27 | if not isinstance(var_collections, list): 28 | var_collections = [var_collections] 29 | self.var_collections = var_collections 30 | 31 | def _setup_graph(self): 32 | vars = [] 33 | for key in self.var_collections: 34 | vars.extend(tf.get_collection(key)) 35 | self.path = os.path.join(logger.LOG_DIR, 'model') 36 | self.saver = tf.train.Saver( 37 | var_list=ModelSaver._get_var_dict(vars), 38 | max_to_keep=self.keep_recent, 39 | keep_checkpoint_every_n_hours=self.keep_freq) 40 | self.meta_graph_written = False 41 | 42 | @staticmethod 43 | def _get_var_dict(vars): 44 | var_dict = {} 45 | for v in vars: 46 | name = get_savename_from_varname(v.name) 47 | if name not in var_dict: 48 | if name != v.name: 49 | logger.info( 50 | "{} renamed to {} when saving model.".format(v.name, name)) 51 | var_dict[name] = v 52 | else: 53 | logger.warn("Variable {} won't be saved \ 54 | because {} will be saved".format(v.name, var_dict[name].name)) 55 | return var_dict 56 | 57 | def _trigger_epoch(self): 58 | try: 59 | if not self.meta_graph_written: 60 | self.saver.export_meta_graph( 61 | os.path.join(logger.LOG_DIR, 62 | 'graph-{}.meta'.format(logger.get_time_str())), 63 | collection_list=self.graph.get_all_collection_keys()) 64 | self.meta_graph_written = True 65 | self.saver.save( 66 | tf.get_default_session(), 67 | self.path, 68 | global_step=self.global_step, 69 | write_meta_graph=False) 70 | 71 | # create a symbolic link for the latest model 72 | latest = self.saver.last_checkpoints[-1] 73 | basename = os.path.basename(latest) 74 | linkname = os.path.join(os.path.dirname(latest), 'latest') 75 | try: 76 | os.unlink(linkname) 77 | except OSError: 78 | pass 79 | os.symlink(basename, linkname) 80 | except (OSError, IOError): # disk error sometimes.. just ignore it 81 | logger.exception("Exception in ModelSaver.trigger_epoch!") 82 | 83 | class MinSaver(Callback): 84 | def __init__(self, monitor_stat, reverse=True): 85 | self.monitor_stat = monitor_stat 86 | self.reverse = reverse 87 | self.min = None 88 | 89 | def _get_stat(self): 90 | return self.trainer.stat_holder.get_stat_now(self.monitor_stat) 91 | 92 | def _need_save(self): 93 | if self.reverse: 94 | return self._get_stat() > self.min 95 | else: 96 | return self._get_stat() < self.min 97 | 98 | def _trigger_epoch(self): 99 | if self.min is None or self._need_save(): 100 | self.min = self._get_stat() 101 | self._save() 102 | 103 | def _save(self): 104 | ckpt = tf.train.get_checkpoint_state(logger.LOG_DIR) 105 | if ckpt is None: 106 | raise RuntimeError( 107 | "Cannot find a checkpoint state. Do you forget to use ModelSaver?") 108 | path = chpt.model_checkpoint_path 109 | newname = os.path.join(logger.LOG_DIR, 110 | 'max-' if self.reverse else 'min-' + self.monitor_stat) 111 | shutil.copy(path, newname) 112 | logger.info("Model with {} '{}' saved.".format( 113 | 'maximum' if self.reverse else 'minimum', self.monitor_stat)) 114 | 115 | class MaxSaver(MinSaver): 116 | def __init__(self, monitor_stat): 117 | super(MaxSaver, self).__init__(monitor_stat, True) 118 | 119 | 120 | 121 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/dump.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: dump.py 3 | # Author: Yuxin Wu 4 | 5 | import os 6 | import cv2 7 | import numpy as np 8 | 9 | from .base import Callback 10 | from ..utils import logger 11 | from ..tfutils import get_op_var_name 12 | 13 | __all__ = ['DumpParamAsImage'] 14 | 15 | class DumpParamAsImage(Callback): 16 | """ 17 | Dump a variable to image(s) after every epoch. 18 | """ 19 | def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False): 20 | """ 21 | :param var_name: the name of the variable. 22 | 23 | :param prefix: the filename prefix for saved images. Default is the op name. 24 | 25 | :param map_func: map the value of the variable to an image or list of 26 | images of shape [h, w] or [h, w, c]. If None, will use identity 27 | 28 | :param scale: a multiplier on pixel values, applied after map_func. default to 255 29 | :param clip: whether to clip the result to [0, 255] 30 | """ 31 | op_name, self.var_name = get_op_var_name(var_name) 32 | self.func = map_func 33 | if prefix is None: 34 | self.prefix = op_name 35 | else: 36 | self.prefix = prefix 37 | self.log_dir = logger.LOG_DIR 38 | self.scale = scale 39 | self.clip = clip 40 | 41 | def _before_train(self): 42 | # TODO might not work for multiGPU? 43 | self.var = self.graph.get_tensor_by_name(self.var_name) 44 | 45 | def _trigger_epoch(self): 46 | val = self.trainer.sess.run(self.var) 47 | if self.func is not None: 48 | val = self.func(val) 49 | if isinstance(val, list): 50 | for idx, im in enumerate(val): 51 | self._dump_image(im, idx) 52 | else: 53 | self._dump_image(val) 54 | 55 | def _dump_image(self, im, idx=None): 56 | assert im.ndim in [2, 3], str(im.ndim) 57 | fname = os.path.join( 58 | self.log_dir, 59 | self.prefix + '-ep{:03d}{}.png'.format( 60 | self.epoch_num, '-' + str(idx) if idx else '')) 61 | res = im * self.scale 62 | if self.clip: 63 | res = np.clip(res, 0, 255) 64 | cv2.imwrite(fname, res.astype('uint8')) 65 | 66 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/graph.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: graph.py 4 | # Author: Yuxin Wu 5 | 6 | """ Graph related callbacks""" 7 | 8 | from .base import Callback 9 | from ..utils import logger 10 | 11 | __all__ = ['RunOp'] 12 | 13 | class RunOp(Callback): 14 | """ Run an op periodically""" 15 | def __init__(self, setup_func, run_before=True, run_epoch=True): 16 | """ 17 | :param setup_func: a function that returns the op in the graph 18 | :param run_before: run the op before training 19 | :param run_epoch: run the op on every epoch trigger 20 | """ 21 | self.setup_func = setup_func 22 | self.run_before = run_before 23 | self.run_epoch = run_epoch 24 | 25 | def _setup_graph(self): 26 | self._op = self.setup_func() 27 | #self._op_name = self._op.name 28 | 29 | def _before_train(self): 30 | if self.run_before: 31 | self._op.run() 32 | 33 | def _trigger_epoch(self): 34 | if self.run_epoch: 35 | self._op.run() 36 | 37 | #def _log(self): 38 | #logger.info("Running op {} ...".format(self._op_name)) 39 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/group.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: group.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | from contextlib import contextmanager 7 | import time 8 | 9 | from .base import Callback 10 | from .stat import * 11 | from ..utils import * 12 | 13 | __all__ = ['Callbacks'] 14 | 15 | class CallbackTimeLogger(object): 16 | def __init__(self): 17 | self.times = [] 18 | self.tot = 0 19 | 20 | def add(self, name, time): 21 | self.tot += time 22 | self.times.append((name, time)) 23 | 24 | @contextmanager 25 | def timed_callback(self, name): 26 | s = time.time() 27 | yield 28 | self.add(name, time.time() - s) 29 | 30 | def log(self): 31 | """ log the time of some heavy callbacks """ 32 | if self.tot < 3: 33 | return 34 | msgs = [] 35 | for name, t in self.times: 36 | if t / self.tot > 0.3 and t > 1: 37 | msgs.append("{}: {:.3f}sec".format(name, t)) 38 | logger.info( 39 | "Callbacks took {:.3f} sec in total. {}".format( 40 | self.tot, '; '.join(msgs))) 41 | 42 | class Callbacks(Callback): 43 | """ 44 | A container to hold all callbacks, and execute them in the right order and proper session. 45 | """ 46 | def __init__(self, cbs): 47 | """ 48 | :param cbs: a list of `Callbacks` 49 | """ 50 | # check type 51 | for cb in cbs: 52 | assert isinstance(cb, Callback), cb.__class__ 53 | # move "StatPrinter" to the last 54 | for cb in cbs: 55 | if isinstance(cb, StatPrinter): 56 | sp = cb 57 | cbs.remove(sp) 58 | cbs.append(sp) 59 | break 60 | 61 | self.cbs = cbs 62 | 63 | def _setup_graph(self): 64 | with tf.name_scope(None): 65 | for cb in self.cbs: 66 | cb.setup_graph(self.trainer) 67 | 68 | def _before_train(self): 69 | for cb in self.cbs: 70 | cb.before_train() 71 | 72 | def _after_train(self): 73 | for cb in self.cbs: 74 | cb.after_train() 75 | 76 | def trigger_step(self): 77 | for cb in self.cbs: 78 | cb.trigger_step() 79 | 80 | def _trigger_epoch(self): 81 | tm = CallbackTimeLogger() 82 | 83 | test_sess_restored = False 84 | for cb in self.cbs: 85 | display_name = str(cb) 86 | with tm.timed_callback(display_name): 87 | cb.trigger_epoch() 88 | tm.log() 89 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/callbacks/stat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File: stat.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | import re, os 7 | import operator 8 | import json 9 | 10 | from .base import Callback 11 | from ..utils import * 12 | 13 | __all__ = ['StatHolder', 'StatPrinter', 'SendStat'] 14 | 15 | class StatHolder(object): 16 | """ 17 | A holder to keep all statistics aside from tensorflow events. 18 | """ 19 | def __init__(self, log_dir): 20 | """ 21 | :param log_dir: directory to save the stats. 22 | """ 23 | self.set_print_tag([]) 24 | self.blacklist_tag = set() 25 | self.stat_now = {} 26 | 27 | self.log_dir = log_dir 28 | self.filename = os.path.join(log_dir, 'stat.json') 29 | if os.path.isfile(self.filename): 30 | logger.info("Loading stats from {}...".format(self.filename)) 31 | with open(self.filename) as f: 32 | self.stat_history = json.load(f) 33 | else: 34 | self.stat_history = [] 35 | 36 | def add_stat(self, k, v): 37 | """ 38 | Add a stat. 39 | :param k: name 40 | :param v: value 41 | """ 42 | self.stat_now[k] = float(v) 43 | 44 | def set_print_tag(self, print_tag): 45 | """ 46 | Set name of stats to print. 47 | """ 48 | self.print_tag = None if print_tag is None else set(print_tag) 49 | 50 | def add_blacklist_tag(self, blacklist_tag): 51 | self.blacklist_tag |= set(blacklist_tag) 52 | 53 | def get_stat_now(self, key): 54 | """ 55 | Return the value of a stat in the current epoch. 56 | """ 57 | return self.stat_now[key] 58 | 59 | def get_stat_history(self, key): 60 | ret = [] 61 | for h in self.stat_history: 62 | v = h.get(key, None) 63 | if v is not None: ret.append(v) 64 | v = self.stat_now.get(key, None) 65 | if v is not None: ret.append(v) 66 | return ret 67 | 68 | def finalize(self): 69 | """ 70 | Called after finishing adding stats. Will print and write stats to disk. 71 | """ 72 | self._print_stat() 73 | self.stat_history.append(self.stat_now) 74 | self.stat_now = {} 75 | self._write_stat() 76 | 77 | def _print_stat(self): 78 | for k, v in sorted(self.stat_now.items(), key=operator.itemgetter(0)): 79 | if self.print_tag is None or k in self.print_tag: 80 | if k not in self.blacklist_tag: 81 | logger.info('{}: {:.5g}'.format(k, v)) 82 | 83 | def _write_stat(self): 84 | tmp_filename = self.filename + '.tmp' 85 | try: 86 | with open(tmp_filename, 'w') as f: 87 | json.dump(self.stat_history, f) 88 | os.rename(tmp_filename, self.filename) 89 | except IOError: # disk error sometimes.. 90 | logger.exception("Exception in StatHolder.finalize()!") 91 | 92 | class StatPrinter(Callback): 93 | """ 94 | Control what stats to print. 95 | """ 96 | def __init__(self, print_tag=None): 97 | """ 98 | :param print_tag: a list of regex to match scalar summary to print. 99 | If None, will print all scalar tags 100 | """ 101 | self.print_tag = print_tag 102 | 103 | def _before_train(self): 104 | self.trainer.stat_holder.set_print_tag(self.print_tag) 105 | 106 | def _trigger_epoch(self): 107 | self.trainer.stat_holder.add_stat('global_step', self.global_step) 108 | self.trainer.stat_holder.finalize() 109 | 110 | class SendStat(Callback): 111 | """ 112 | Execute a command with some specific stats. 113 | For example, send the stats to your phone through pushbullet: 114 | 115 | SendStat('curl -u your_id: https://api.pushbullet.com/v2/pushes \ 116 | -d type=note -d title="validation error" \ 117 | -d body={validation_error} > /dev/null 2>&1', 118 | 'validation_error') 119 | """ 120 | def __init__(self, command, stats): 121 | self.command = command 122 | if not isinstance(stats, list): 123 | stats = [stats] 124 | self.stats = stats 125 | 126 | def _trigger_epoch(self): 127 | holder = self.trainer.stat_holder 128 | v = {k: holder.get_stat_now(k) for k in self.stats} 129 | cmd = self.command.format(**v) 130 | ret = os.system(cmd) 131 | if ret != 0: 132 | logger.error("Command {} failed with ret={}!".format(cmd, ret)) 133 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import importlib 7 | import os 8 | import os.path 9 | 10 | from . import dataset 11 | from . import imgaug 12 | 13 | def _global_import(name): 14 | p = __import__(name, globals(), locals(), level=1) 15 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 16 | del globals()[name] 17 | for k in lst: 18 | globals()[k] = p.__dict__[k] 19 | 20 | __SKIP = ['dftools', 'dataset', 'imgaug'] 21 | for _, module_name, _ in walk_packages( 22 | [os.path.dirname(__file__)]): 23 | if not module_name.startswith('_') and \ 24 | module_name not in __SKIP: 25 | _global_import(module_name) 26 | 27 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/base.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: base.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | from abc import abstractmethod, ABCMeta 8 | from ..utils import get_rng 9 | 10 | __all__ = ['DataFlow', 'ProxyDataFlow', 'RNGDataFlow'] 11 | 12 | class DataFlow(object): 13 | """ Base class for all DataFlow """ 14 | __metaclass__ = ABCMeta 15 | 16 | @abstractmethod 17 | def get_data(self): 18 | """ 19 | A generator to generate data as a list. 20 | Datapoint should be a mutable list. 21 | Each component should be assumed immutable. 22 | """ 23 | 24 | def size(self): 25 | """ 26 | Size of this data flow. 27 | """ 28 | raise NotImplementedError() 29 | 30 | def reset_state(self): 31 | """ 32 | Reset state of the dataflow. Will always be called before consuming data points. 33 | for example, RNG **HAS** to be reset here if used in the DataFlow. 34 | Otherwise it may not work well with prefetching, because different 35 | processes will have the same RNG state. 36 | """ 37 | pass 38 | 39 | 40 | class RNGDataFlow(DataFlow): 41 | """ A dataflow with rng""" 42 | def reset_state(self): 43 | self.rng = get_rng(self) 44 | 45 | class ProxyDataFlow(DataFlow): 46 | """ Base class for DataFlow that proxies another""" 47 | def __init__(self, ds): 48 | """ 49 | :param ds: a :mod:`DataFlow` instance to proxy 50 | """ 51 | self.ds = ds 52 | 53 | def reset_state(self): 54 | """ 55 | Will reset state of the proxied DataFlow 56 | """ 57 | self.ds.reset_state() 58 | 59 | def size(self): 60 | return self.ds.size() 61 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dataset/.gitignore: -------------------------------------------------------------------------------- 1 | mnist_data 2 | cifar10_data 3 | cifar100_data 4 | svhn_data 5 | ilsvrc_metadata 6 | bsds500_data 7 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dataset/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | import os.path 8 | 9 | def global_import(name): 10 | p = __import__(name, globals(), locals(), level=1) 11 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 12 | for k in lst: 13 | globals()[k] = p.__dict__[k] 14 | 15 | for _, module_name, _ in walk_packages( 16 | [os.path.dirname(__file__)]): 17 | if not module_name.startswith('_'): 18 | global_import(module_name) 19 | 20 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dataset/bsds500.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: bsds500.py 4 | # Author: Yuxin Wu 5 | 6 | import os, glob 7 | import cv2 8 | import numpy as np 9 | 10 | from ...utils import logger, get_rng, get_dataset_path 11 | from ...utils.fs import download 12 | from ..base import RNGDataFlow 13 | 14 | try: 15 | from scipy.io import loadmat 16 | __all__ = ['BSDS500'] 17 | except ImportError: 18 | logger.warn("Cannot import scipy. BSDS500 dataset won't be available!") 19 | __all__ = [] 20 | 21 | DATA_URL = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz" 22 | IMG_W, IMG_H = 481, 321 23 | 24 | class BSDS500(RNGDataFlow): 25 | """ 26 | `Berkeley Segmentation Data Set and Benchmarks 500 27 | `_. 28 | 29 | Produce (image, label) pair, where image has shape (321, 481, 3) and 30 | ranges in [0,255]. Label is binary and has shape (321, 481). 31 | Those pixels annotated as boundaries by <=2 annotators are set to 0. 32 | This is used in `Holistically-Nested Edge Detection 33 | `_. 34 | """ 35 | 36 | def __init__(self, name, data_dir=None, shuffle=True): 37 | """ 38 | :param name: 'train', 'test', 'val' 39 | :param data_dir: a directory containing the original 'BSR' directory. 40 | """ 41 | # check and download data 42 | if data_dir is None: 43 | data_dir = get_dataset_path('bsds500_data') 44 | if not os.path.isdir(os.path.join(data_dir, 'BSR')): 45 | download(DATA_URL, data_dir) 46 | filename = DATA_URL.split('/')[-1] 47 | filepath = os.path.join(data_dir, filename) 48 | import tarfile 49 | tarfile.open(filepath, 'r:gz').extractall(data_dir) 50 | self.data_root = os.path.join(data_dir, 'BSR', 'BSDS500', 'data') 51 | assert os.path.isdir(self.data_root) 52 | 53 | self.shuffle = shuffle 54 | assert name in ['train', 'test', 'val'] 55 | self._load(name) 56 | 57 | def _load(self, name): 58 | image_glob = os.path.join(self.data_root, 'images', name, '*.jpg') 59 | image_files = glob.glob(image_glob) 60 | gt_dir = os.path.join(self.data_root, 'groundTruth', name) 61 | self.data = np.zeros((len(image_files), IMG_H, IMG_W, 3), dtype='uint8') 62 | self.label = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32') 63 | 64 | for idx, f in enumerate(image_files): 65 | im = cv2.imread(f, cv2.IMREAD_COLOR) 66 | assert im is not None 67 | if im.shape[0] > im.shape[1]: 68 | im = np.transpose(im, (1,0,2)) 69 | assert im.shape[:2] == (IMG_H, IMG_W), "{} != {}".format(im.shape[:2], (IMG_H, IMG_W)) 70 | 71 | imgid = os.path.basename(f).split('.')[0] 72 | gt_file = os.path.join(gt_dir, imgid) 73 | gt = loadmat(gt_file)['groundTruth'][0] 74 | n_annot = gt.shape[0] 75 | gt = sum(gt[k]['Boundaries'][0][0] for k in range(n_annot)) 76 | gt[gt <= 2] = 0 77 | gt = gt.astype('float32') 78 | gt /= np.max(gt) 79 | if gt.shape[0] > gt.shape[1]: 80 | gt = gt.transpose() 81 | assert gt.shape == (IMG_H, IMG_W) 82 | 83 | self.data[idx] = im 84 | self.label[idx] = gt 85 | #self.label[self.label<0.9] = 0 86 | 87 | def size(self): 88 | return self.data.shape[0] 89 | 90 | def get_data(self): 91 | idxs = np.arange(self.data.shape[0]) 92 | if self.shuffle: 93 | self.rng.shuffle(idxs) 94 | for k in idxs: 95 | yield [self.data[k], self.label[k]] 96 | 97 | 98 | if __name__ == '__main__': 99 | a = BSDS500('val') 100 | for k in a.get_data(): 101 | cv2.imshow("haha", k[1].astype('uint8')*255) 102 | cv2.waitKey(1000) 103 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dataset/svhn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: svhn.py 4 | # Author: Yuxin Wu 5 | 6 | import os 7 | import random 8 | import numpy as np 9 | from six.moves import range 10 | 11 | from ...utils import logger, get_rng, get_dataset_path 12 | from ..base import RNGDataFlow 13 | 14 | try: 15 | import scipy.io 16 | __all__ = ['SVHNDigit'] 17 | except ImportError: 18 | logger.warn("Cannot import scipy. SVHNDigit dataset won't be available!") 19 | __all__ = [] 20 | 21 | SVHN_URL = "http://ufldl.stanford.edu/housenumbers/" 22 | 23 | class SVHNDigit(RNGDataFlow): 24 | """ 25 | SVHN Cropped Digit Dataset 26 | return img of 32x32x3, label of 0-9 27 | """ 28 | Cache = {} 29 | 30 | def __init__(self, name, data_dir=None, shuffle=True): 31 | """ 32 | :param name: 'train', 'test', or 'extra' 33 | :param data_dir: a directory containing the original {train,test,extra}_32x32.mat 34 | """ 35 | self.shuffle = shuffle 36 | 37 | if name in SVHNDigit.Cache: 38 | self.X, self.Y = SVHNDigit.Cache[name] 39 | return 40 | if data_dir is None: 41 | data_dir = get_dataset_path('svhn_data') 42 | assert name in ['train', 'test', 'extra'], name 43 | filename = os.path.join(data_dir, name + '_32x32.mat') 44 | assert os.path.isfile(filename), \ 45 | "File {} not found! Please download it from {}.".format(filename, SVHN_URL) 46 | logger.info("Loading {} ...".format(filename)) 47 | data = scipy.io.loadmat(filename) 48 | self.X = data['X'].transpose(3,0,1,2) 49 | self.Y = data['y'].reshape((-1)) 50 | self.Y[self.Y==10] = 0 51 | SVHNDigit.Cache[name] = (self.X, self.Y) 52 | 53 | def size(self): 54 | return self.X.shape[0] 55 | 56 | def get_data(self): 57 | n = self.X.shape[0] 58 | idxs = np.arange(n) 59 | if self.shuffle: 60 | self.rng.shuffle(idxs) 61 | for k in idxs: 62 | yield [self.X[k], self.Y[k]] 63 | 64 | @staticmethod 65 | def get_per_pixel_mean(): 66 | """ 67 | return 32x32x3 image 68 | """ 69 | a = SVHNDigit('train') 70 | b = SVHNDigit('test') 71 | c = SVHNDigit('extra') 72 | return np.concatenate((a.X, b.X, c.X)).mean(axis=0) 73 | 74 | if __name__ == '__main__': 75 | a = SVHNDigit('train') 76 | b = SVHNDigit.get_per_pixel_mean() 77 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dataset/visualqa.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: visualqa.py 4 | # Author: Yuxin Wu 5 | 6 | from ..base import DataFlow 7 | from ...utils import * 8 | from ...utils.timer import * 9 | from six.moves import zip, map 10 | from collections import Counter 11 | import json 12 | 13 | __all__ = ['VisualQA'] 14 | 15 | def read_json(fname): 16 | f = open(fname) 17 | ret = json.load(f) 18 | f.close() 19 | return ret 20 | 21 | # TODO shuffle 22 | class VisualQA(DataFlow): 23 | """ 24 | Visual QA dataset. See http://visualqa.org/ 25 | Simply read q/a json file and produce q/a pairs in their original format. 26 | """ 27 | def __init__(self, question_file, annotation_file): 28 | with timed_operation('Reading VQA JSON file'): 29 | qobj, aobj = list(map(read_json, [question_file, annotation_file])) 30 | self.task_type = qobj['task_type'] 31 | self.questions = qobj['questions'] 32 | self._size = len(self.questions) 33 | 34 | self.anno = aobj['annotations'] 35 | assert len(self.anno) == len(self.questions), \ 36 | "{}!={}".format(len(self.anno), len(self.questions)) 37 | self._clean() 38 | 39 | def _clean(self): 40 | for a in self.anno: 41 | for aa in a['answers']: 42 | del aa['answer_id'] 43 | 44 | def size(self): 45 | return self._size 46 | 47 | def get_data(self): 48 | for q, a in zip(self.questions, self.anno): 49 | assert q['question_id'] == a['question_id'] 50 | yield [q, a] 51 | 52 | def get_common_answer(self, n): 53 | """ Get the n most common answers (could be phrases) 54 | n=3000 ~= thresh 4 55 | """ 56 | cnt = Counter() 57 | for anno in self.anno: 58 | cnt[anno['multiple_choice_answer'].lower()] += 1 59 | return [k[0] for k in cnt.most_common(n)] 60 | 61 | def get_common_question_words(self, n): 62 | """ Get the n most common words in questions 63 | n=4600 ~= thresh 6 64 | """ 65 | from nltk.tokenize import word_tokenize # will need to download 'punckt' 66 | cnt = Counter() 67 | for q in self.questions: 68 | cnt.update(word_tokenize(q['question'].lower())) 69 | del cnt['?'] # probably don't need this 70 | ret = cnt.most_common(n) 71 | return [k[0] for k in ret] 72 | 73 | if __name__ == '__main__': 74 | vqa = VisualQA('/home/wyx/data/VQA/MultipleChoice_mscoco_train2014_questions.json', 75 | '/home/wyx/data/VQA/mscoco_train2014_annotations.json') 76 | for k in vqa.get_data(): 77 | print(json.dumps(k)) 78 | break 79 | # vqa.get_common_question_words(100) 80 | vqa.get_common_answer(100) 81 | #from IPython import embed; embed() 82 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/dftools.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: dftools.py 3 | # Author: Yuxin Wu 4 | 5 | import sys, os 6 | import cv2 7 | import multiprocessing 8 | 9 | from ..utils.concurrency import DIE 10 | from ..utils.fs import mkdir_p 11 | 12 | __all__ = ['dump_dataset_images', 'dataflow_to_process_queue'] 13 | 14 | # TODO pass a name_func to write label as filename? 15 | def dump_dataset_images(ds, dirname, max_count=None, index=0): 16 | """ Dump images from a `DataFlow` to a directory. 17 | 18 | :param ds: a `DataFlow` instance. 19 | :param dirname: name of the directory. 20 | :param max_count: max number of images to dump 21 | :param index: the index of the image component in a data point. 22 | """ 23 | mkdir_p(dirname) 24 | if max_count is None: 25 | max_count = sys.maxint 26 | ds.reset_state() 27 | for i, dp in enumerate(ds.get_data()): 28 | if i % 100 == 0: 29 | print(i) 30 | if i > max_count: 31 | return 32 | img = dp[index] 33 | cv2.imwrite(os.path.join(dirname, "{}.jpg".format(i)), img) 34 | 35 | def dataflow_to_process_queue(ds, size, nr_consumer): 36 | """ 37 | Convert a `DataFlow` to a multiprocessing.Queue. 38 | The dataflow will only be reset in the spawned process. 39 | 40 | :param ds: a `DataFlow` 41 | :param size: size of the queue 42 | :param nr_consumer: number of consumer of the queue. 43 | will add this many of `DIE` sentinel to the end of the queue. 44 | :returns: (queue, process). The process will take data from `ds` to fill 45 | the queue once you start it. Each element is (task_id, dp). 46 | """ 47 | q = multiprocessing.Queue(size) 48 | class EnqueProc(multiprocessing.Process): 49 | def __init__(self, ds, q, nr_consumer): 50 | super(EnqueProc, self).__init__() 51 | self.ds = ds 52 | self.q = q 53 | 54 | def run(self): 55 | self.ds.reset_state() 56 | try: 57 | for idx, dp in enumerate(self.ds.get_data()): 58 | self.q.put((idx, dp)) 59 | finally: 60 | for _ in range(nr_consumer): 61 | self.q.put((DIE, None)) 62 | 63 | proc = EnqueProc(ds, q, nr_consumer) 64 | return q, proc 65 | 66 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/format.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File: format.py 3 | # Author: Yuxin Wu 4 | 5 | from ..utils import logger, get_rng 6 | from ..utils.timer import timed_operation 7 | from ..utils.loadcaffe import get_caffe_pb 8 | from .base import RNGDataFlow 9 | 10 | import random 11 | from tqdm import tqdm 12 | from six.moves import range 13 | 14 | try: 15 | import h5py 16 | except ImportError: 17 | logger.warn("Error in 'import h5py'. HDF5Data won't be available.") 18 | __all__ = [] 19 | else: 20 | __all__ = ['HDF5Data'] 21 | 22 | try: 23 | import lmdb 24 | except ImportError: 25 | logger.warn("Error in 'import lmdb'. LMDBData won't be available.") 26 | else: 27 | __all__.extend(['LMDBData', 'CaffeLMDB']) 28 | 29 | 30 | """ 31 | Adapters for different data format. 32 | """ 33 | 34 | class HDF5Data(RNGDataFlow): 35 | """ 36 | Zip data from different paths in an HDF5 file. Will load all data into memory. 37 | """ 38 | def __init__(self, filename, data_paths, shuffle=True): 39 | """ 40 | :param filename: h5 data file. 41 | :param data_paths: list of h5 paths to zipped. For example ['images', 'labels'] 42 | :param shuffle: shuffle the order of all data. 43 | """ 44 | self.f = h5py.File(filename, 'r') 45 | logger.info("Loading {} to memory...".format(filename)) 46 | self.dps = [self.f[k].value for k in data_paths] 47 | lens = [len(k) for k in self.dps] 48 | assert all([k==lens[0] for k in lens]) 49 | self._size = lens[0] 50 | self.shuffle = shuffle 51 | 52 | def size(self): 53 | return self._size 54 | 55 | def get_data(self): 56 | idxs = list(range(self._size)) 57 | if self.shuffle: 58 | self.rng.shuffle(idxs) 59 | for k in idxs: 60 | yield [dp[k] for dp in self.dps] 61 | 62 | 63 | class LMDBData(RNGDataFlow): 64 | """ Read a lmdb and produce k,v pair """ 65 | def __init__(self, lmdb_dir, shuffle=True): 66 | self._lmdb = lmdb.open(lmdb_dir, readonly=True, lock=False, 67 | map_size=1099511627776 * 2, max_readers=100) 68 | self._txn = self._lmdb.begin() 69 | self._shuffle = shuffle 70 | self._size = self._txn.stat()['entries'] 71 | if shuffle: 72 | self.keys = self._txn.get('__keys__') 73 | if not self.keys: 74 | self.keys = [] 75 | with timed_operation("Loading LMDB keys ...", log_start=True), \ 76 | tqdm(total=self._size, ascii=True) as pbar: 77 | for k in self._txn.cursor(): 78 | if k != '__keys__': 79 | self.keys.append(k) 80 | pbar.update() 81 | 82 | def reset_state(self): 83 | super(LMDBData, self).reset_state() 84 | self._txn = self._lmdb.begin() 85 | 86 | def size(self): 87 | return self._size 88 | 89 | def get_data(self): 90 | if not self._shuffle: 91 | c = self._txn.cursor() 92 | while c.next(): 93 | k, v = c.item() 94 | if k != '__keys__': 95 | yield [k, v] 96 | else: 97 | s = self.size() 98 | self.rng.shuffle(self.keys) 99 | for k in self.keys: 100 | v = self._txn.get(k) 101 | yield [k, v] 102 | 103 | class LMDBDataDecoder(LMDBData): 104 | def __init__(self, lmdb_dir, decoder, shuffle=True): 105 | """ 106 | :param decoder: a function taking k, v and return a data point, 107 | or return None to skip 108 | """ 109 | super(LMDBDataDecoder, self).__init__(lmdb_dir, shuffle) 110 | self.decoder = decoder 111 | 112 | def get_data(self): 113 | for dp in super(LMDBDataDecoder, self).get_data(): 114 | v = self.decoder(dp[0], dp[1]) 115 | if v: yield v 116 | 117 | class CaffeLMDB(LMDBDataDecoder): 118 | """ Read a Caffe LMDB file where each value contains a caffe.Datum protobuf """ 119 | def __init__(self, lmdb_dir, shuffle=True): 120 | cpb = get_caffe_pb() 121 | def decoder(k, v): 122 | try: 123 | datum = cpb.Datum() 124 | datum.ParseFromString(v) 125 | img = np.fromstring(datum.data, dtype=np.uint8) 126 | img = img.reshape(datum.channels, datum.height, datum.width) 127 | except Exception: 128 | log_once("Cannot read key {}".format(k)) 129 | return None 130 | return [img.transpose(1, 2, 0), datum.label] 131 | 132 | super(CaffeLMDB, self).__init__( 133 | lmdb_dir, decoder=decoder, shuffle=shuffle) 134 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/image.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: image.py 3 | # Author: Yuxin Wu 4 | 5 | import numpy as np 6 | import cv2 7 | import copy 8 | from .base import DataFlow, ProxyDataFlow 9 | from .common import MapDataComponent, MapData 10 | from .imgaug import AugmentorList 11 | 12 | __all__ = ['ImageFromFile', 'AugmentImageComponent', 'AugmentImageComponents'] 13 | 14 | class ImageFromFile(DataFlow): 15 | """ Generate rgb images from list of files """ 16 | def __init__(self, files, channel=3, resize=None): 17 | """ :param files: list of file paths 18 | :param channel: 1 or 3 channel 19 | :param resize: a (h, w) tuple. If given, will force a resize 20 | """ 21 | assert len(files) 22 | self.files = files 23 | self.channel = int(channel) 24 | self.resize = resize 25 | 26 | def size(self): 27 | return len(self.files) 28 | 29 | def get_data(self): 30 | for f in self.files: 31 | im = cv2.imread( 32 | f, cv2.IMREAD_GRAYSCALE if self.channel == 1 else cv2.IMREAD_COLOR) 33 | if self.channel == 3: 34 | im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) 35 | if self.resize is not None: 36 | im = cv2.resize(im, self.resize[::-1]) 37 | yield [im] 38 | 39 | 40 | class AugmentImageComponent(MapDataComponent): 41 | """ 42 | Augment the image component of datapoints 43 | """ 44 | def __init__(self, ds, augmentors, index=0): 45 | """ 46 | :param ds: a `DataFlow` instance. 47 | :param augmentors: a list of `ImageAugmentor` instance to be applied in order. 48 | :param index: the index (or list of indices) of the image component in the produced datapoints by `ds`. default to be 0 49 | """ 50 | self.augs = AugmentorList(augmentors) 51 | super(AugmentImageComponent, self).__init__( 52 | ds, lambda x: self.augs.augment(x), index) 53 | 54 | def reset_state(self): 55 | self.ds.reset_state() 56 | self.augs.reset_state() 57 | 58 | 59 | class AugmentImageComponents(MapData): 60 | """ Augment a list of images of the same shape, with the same parameters""" 61 | def __init__(self, ds, augmentors, index=(0,1)): 62 | """ 63 | :param ds: a `DataFlow` instance. 64 | :param augmentors: a list of `ImageAugmentor` instance to be applied in order. 65 | :param index: tuple of indices of the image components 66 | """ 67 | self.augs = AugmentorList(augmentors) 68 | self.ds = ds 69 | 70 | def func(dp): 71 | im = dp[index[0]] 72 | im, prms = self.augs._augment_return_params(im) 73 | dp[index[0]] = im 74 | for idx in index[1:]: 75 | dp[idx] = self.augs._augment(dp[idx], prms) 76 | return dp 77 | 78 | super(AugmentImageComponents, self).__init__(ds, func) 79 | 80 | def reset_state(self): 81 | self.ds.reset_state() 82 | self.augs.reset_state() 83 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | import os 6 | from pkgutil import walk_packages 7 | 8 | __all__ = [] 9 | 10 | def global_import(name): 11 | p = __import__(name, globals(), locals(), level=1) 12 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 13 | del globals()[name] 14 | for k in lst: 15 | globals()[k] = p.__dict__[k] 16 | 17 | for _, module_name, _ in walk_packages( 18 | [os.path.dirname(__file__)]): 19 | if not module_name.startswith('_'): 20 | global_import(module_name) 21 | 22 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: _test.py 4 | # Author: Yuxin Wu 5 | 6 | import sys 7 | import cv2 8 | from . import AugmentorList 9 | from .crop import * 10 | from .imgproc import * 11 | from .noname import * 12 | from .deform import * 13 | 14 | 15 | anchors = [(0.2, 0.2), (0.7, 0.2), (0.8, 0.8), (0.5, 0.5), (0.2, 0.5)] 16 | augmentors = AugmentorList([ 17 | Contrast((0.8,1.2)), 18 | Flip(horiz=True), 19 | GaussianDeform(anchors, (360,480), 0.2, randrange=20), 20 | #RandomCropRandomShape(0.3) 21 | ]) 22 | 23 | img = cv2.imread(sys.argv[1]) 24 | newimg, prms = augmentors._augment_return_params(img) 25 | cv2.imshow(" ", newimg.astype('uint8')) 26 | cv2.waitKey() 27 | 28 | newimg = augmentors._augment(img, prms) 29 | cv2.imshow(" ", newimg.astype('uint8')) 30 | cv2.waitKey() 31 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: base.py 3 | # Author: Yuxin Wu 4 | 5 | from abc import abstractmethod, ABCMeta 6 | from ...utils import get_rng 7 | from six.moves import zip 8 | 9 | __all__ = ['ImageAugmentor', 'AugmentorList'] 10 | 11 | class ImageAugmentor(object): 12 | """ Base class for an image augmentor""" 13 | __metaclass__ = ABCMeta 14 | 15 | def __init__(self): 16 | self.reset_state() 17 | 18 | def _init(self, params=None): 19 | if params: 20 | for k, v in params.items(): 21 | if k != 'self': 22 | setattr(self, k, v) 23 | 24 | def reset_state(self): 25 | self.rng = get_rng(self) 26 | 27 | def augment(self, img): 28 | """ 29 | Perform augmentation on the image in-place. 30 | :param img: an [h,w] or [h,w,c] image 31 | :returns: the augmented image, always of type 'float32' 32 | """ 33 | img, params = self._augment_return_params(img) 34 | return img 35 | 36 | def _augment_return_params(self, img): 37 | """ 38 | Augment the image and return both image and params 39 | """ 40 | prms = self._get_augment_params(img) 41 | return (self._augment(img, prms), prms) 42 | 43 | @abstractmethod 44 | def _augment(self, img, param): 45 | """ 46 | augment with the given param and return the new image 47 | """ 48 | 49 | def _get_augment_params(self, img): 50 | """ 51 | get the augmentor parameters 52 | """ 53 | return None 54 | 55 | def _fprop_coord(self, coord, param): 56 | return coord 57 | 58 | def _rand_range(self, low=1.0, high=None, size=None): 59 | if high is None: 60 | low, high = 0, low 61 | if size == None: 62 | size = [] 63 | return self.rng.uniform(low, high, size) 64 | 65 | class AugmentorList(ImageAugmentor): 66 | """ 67 | Augment by a list of augmentors 68 | """ 69 | def __init__(self, augmentors): 70 | """ 71 | :param augmentors: list of `ImageAugmentor` instance to be applied 72 | """ 73 | self.augs = augmentors 74 | super(AugmentorList, self).__init__() 75 | 76 | def _get_augment_params(self, img): 77 | # the next augmentor requires the previous one to finish 78 | raise RuntimeError("Cannot simply get parameters of a AugmentorList!") 79 | 80 | def _augment_return_params(self, img): 81 | assert img.ndim in [2, 3], img.ndim 82 | img = img.astype('float32') 83 | 84 | prms = [] 85 | for a in self.augs: 86 | img, prm = a._augment_return_params(img) 87 | prms.append(prm) 88 | return img, prms 89 | 90 | def _augment(self, img, param): 91 | assert img.ndim in [2, 3], img.ndim 92 | img = img.astype('float32') 93 | for aug, prm in zip(self.augs, param): 94 | img = aug._augment(img, prm) 95 | return img 96 | 97 | def reset_state(self): 98 | """ Will reset state of each augmentor """ 99 | for a in self.augs: 100 | a.reset_state() 101 | 102 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/deform.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: deform.py 3 | # Author: Yuxin Wu 4 | 5 | from .base import ImageAugmentor 6 | from ...utils import logger 7 | import numpy as np 8 | 9 | __all__ = ['GaussianDeform', 'GaussianMap'] 10 | 11 | # TODO really needs speedup 12 | 13 | class GaussianMap(object): 14 | """ Generate gaussian weighted deformation map""" 15 | def __init__(self, image_shape, sigma=0.5): 16 | assert len(image_shape) == 2 17 | self.shape = image_shape 18 | self.sigma = sigma 19 | 20 | def get_gaussian_weight(self, anchor): 21 | ret = np.zeros(self.shape, dtype='float32') 22 | 23 | y, x = np.mgrid[:self.shape[0], :self.shape[1]] 24 | y = y.astype('float32') / ret.shape[0] - anchor[0] 25 | x = x.astype('float32') / ret.shape[1] - anchor[1] 26 | g = np.exp(-(x**2 + y ** 2) / self.sigma) 27 | #cv2.imshow(" ", g) 28 | #cv2.waitKey() 29 | return g 30 | 31 | def np_sample(img, coords): 32 | # a numpy implementation of ImageSample layer 33 | coords = np.maximum(coords, 0) 34 | coords = np.minimum(coords, np.array([img.shape[0]-1, img.shape[1]-1])) 35 | 36 | lcoor = np.floor(coords).astype('int32') 37 | ucoor = lcoor + 1 38 | ucoor = np.minimum(ucoor, np.array([img.shape[0]-1, img.shape[1]-1])) 39 | diff = coords - lcoor 40 | neg_diff = 1.0 - diff 41 | 42 | lcoory, lcoorx = np.split(lcoor, 2, axis=2) 43 | ucoory, ucoorx = np.split(ucoor, 2, axis=2) 44 | diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3)) 45 | neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3)) 46 | diffy, diffx = np.split(diff, 2, axis=2) 47 | ndiffy, ndiffx = np.split(neg_diff, 2, axis=2) 48 | 49 | ret = img[lcoory,lcoorx,:] * ndiffx * ndiffy + \ 50 | img[ucoory, ucoorx,:] * diffx * diffy + \ 51 | img[lcoory, ucoorx,:] * ndiffy * diffx + \ 52 | img[ucoory,lcoorx,:] * diffy * ndiffx 53 | return ret[:,:,0,:] 54 | 55 | # TODO input/output with different shape 56 | class GaussianDeform(ImageAugmentor): 57 | """ 58 | Some kind of deformation. Quite slow. 59 | """ 60 | def __init__(self, anchors, shape, sigma=0.5, randrange=None): 61 | """ 62 | :param anchors: in [0,1] coordinate 63 | :param shape: image shape in [h, w] 64 | :param sigma: sigma for Gaussian weight 65 | :param randrange: default to shape[0] / 8 66 | """ 67 | logger.warn("GaussianDeform is slow. Consider using it with 4 or more prefetching processes.") 68 | super(GaussianDeform, self).__init__() 69 | self.anchors = anchors 70 | self.K = len(self.anchors) 71 | self.shape = shape 72 | self.grid = np.mgrid[0:self.shape[0], 0:self.shape[1]].transpose(1,2,0) 73 | self.grid = self.grid.astype('float32') # HxWx2 74 | 75 | gm = GaussianMap(self.shape, sigma=sigma) 76 | self.gws = np.array([gm.get_gaussian_weight(ank) 77 | for ank in self.anchors], dtype='float32') # KxHxW 78 | self.gws = self.gws.transpose(1, 2, 0) #HxWxK 79 | if randrange is None: 80 | self.randrange = self.shape[0] / 8 81 | else: 82 | self.randrange = randrange 83 | 84 | def _get_augment_params(self, img): 85 | v = self.rng.rand(self.K, 2).astype('float32') - 0.5 86 | v = v * 2 * self.randrange 87 | return v 88 | 89 | def _augment(self, img, v): 90 | grid = self.grid + np.dot(self.gws, v) 91 | return np_sample(img, grid) 92 | 93 | def _fprop_coord(self, coord, param): 94 | raise NotImplementedError() 95 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/geometry.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: geometry.py 4 | # Author: Yuxin Wu 5 | 6 | from .base import ImageAugmentor 7 | import math 8 | import cv2 9 | import numpy as np 10 | 11 | __all__ = ['Rotation', 'RotationAndCropValid'] 12 | 13 | class Rotation(ImageAugmentor): 14 | """ Random rotate the image w.r.t a random center""" 15 | def __init__(self, max_deg, center_range=(0,1), 16 | interp=cv2.INTER_CUBIC, 17 | border=cv2.BORDER_REPLICATE): 18 | """ 19 | :param max_deg: max abs value of the rotation degree 20 | :param center_range: the location of the rotation center 21 | """ 22 | self._init(locals()) 23 | 24 | def _get_augment_params(self, img): 25 | center = img.shape[1::-1] * self._rand_range( 26 | self.center_range[0], self.center_range[1], (2,)) 27 | deg = self._rand_range(-self.max_deg, self.max_deg) 28 | return cv2.getRotationMatrix2D(tuple(center), deg, 1) 29 | 30 | def _augment(self, img, rot_m): 31 | ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], 32 | flags=self.interp, borderMode=self.border) 33 | return ret 34 | 35 | class RotationAndCropValid(ImageAugmentor): 36 | """ Random rotate and crop the largest possible rect without the border 37 | This will produce images of different shapes. 38 | """ 39 | def __init__(self, max_deg, interp=cv2.INTER_CUBIC): 40 | self._init(locals()) 41 | 42 | def _get_augment_params(self, img): 43 | deg = self._rand_range(-self.max_deg, self.max_deg) 44 | return deg 45 | 46 | def _augment(self, img, deg): 47 | center = (img.shape[1]*0.5, img.shape[0]*0.5) 48 | rot_m = cv2.getRotationMatrix2D(center, deg, 1) 49 | ret = cv2.warpAffine(img, rot_m, img.shape[1::-1], 50 | flags=self.interp, borderMode=cv2.BORDER_CONSTANT) 51 | neww, newh = RotationAndCropValid.largest_rotated_rect(ret.shape[1], ret.shape[0], deg) 52 | neww = min(neww, ret.shape[1]) 53 | newh = min(newh, ret.shape[0]) 54 | newx = center[0] - neww * 0.5 55 | newy = center[1] - newh * 0.5 56 | #print(ret.shape, deg, newx, newy, neww, newh) 57 | return ret[newy:newy+newh,newx:newx+neww] 58 | 59 | @staticmethod 60 | def largest_rotated_rect(w, h, angle): 61 | """ http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders """ 62 | angle = angle / 180.0 * math.pi 63 | if w <= 0 or h <= 0: 64 | return 0,0 65 | 66 | width_is_longer = w >= h 67 | side_long, side_short = (w,h) if width_is_longer else (h,w) 68 | 69 | # since the solutions for angle, -angle and 180-angle are all the same, 70 | # if suffices to look at the first quadrant and the absolute values of sin,cos: 71 | sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle)) 72 | if side_short <= 2.*sin_a*cos_a*side_long: 73 | # half constrained case: two crop corners touch the longer side, 74 | # the other two corners are on the mid-line parallel to the longer line 75 | x = 0.5*side_short 76 | wr,hr = (x/sin_a,x/cos_a) if width_is_longer else (x/cos_a,x/sin_a) 77 | else: 78 | # fully constrained case: crop touches all 4 sides 79 | cos_2a = cos_a*cos_a - sin_a*sin_a 80 | wr,hr = (w*cos_a - h*sin_a)/cos_2a, (h*cos_a - w*sin_a)/cos_2a 81 | 82 | return wr,hr 83 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/imgproc.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: imgproc.py 3 | # Author: Yuxin Wu 4 | 5 | from .base import ImageAugmentor 6 | import numpy as np 7 | import cv2 8 | 9 | __all__ = ['Brightness', 'Contrast', 'MeanVarianceNormalize', 'GaussianBlur', 10 | 'Gamma', 'Clip'] 11 | 12 | class Brightness(ImageAugmentor): 13 | """ 14 | Random adjust brightness. 15 | """ 16 | def __init__(self, delta, clip=True): 17 | """ 18 | Randomly add a value within [-delta,delta], and clip in [0,255] if clip is True. 19 | """ 20 | assert delta > 0 21 | self._init(locals()) 22 | 23 | def _get_augment_params(self, img): 24 | v = self._rand_range(-self.delta, self.delta) 25 | return v 26 | 27 | def _augment(self, img, v): 28 | img += v 29 | if self.clip: 30 | img = np.clip(img, 0, 255) 31 | return img 32 | 33 | class Contrast(ImageAugmentor): 34 | """ 35 | Apply x = (x - mean) * contrast_factor + mean to each channel 36 | and clip to [0, 255] 37 | """ 38 | def __init__(self, factor_range, clip=True): 39 | """ 40 | :param factor_range: an interval to random sample the `contrast_factor`. 41 | :param clip: boolean. 42 | """ 43 | self._init(locals()) 44 | 45 | def _get_augment_params(self, img): 46 | return self._rand_range(*self.factor_range) 47 | 48 | def _augment(self, img, r): 49 | mean = np.mean(img, axis=(0,1), keepdims=True) 50 | img = (img - mean) * r + mean 51 | if self.clip: 52 | img = np.clip(img, 0, 255) 53 | return img 54 | 55 | class MeanVarianceNormalize(ImageAugmentor): 56 | """ 57 | Linearly scales image to have zero mean and unit norm. 58 | x = (x - mean) / adjusted_stddev 59 | where adjusted_stddev = max(stddev, 1.0/sqrt(num_pixels * channels)) 60 | """ 61 | def __init__(self, all_channel=True): 62 | """ 63 | :param all_channel: if True, normalize all channels together. else separately. 64 | """ 65 | self.all_channel = all_channel 66 | 67 | def _augment(self, img, _): 68 | if self.all_channel: 69 | mean = np.mean(img) 70 | std = np.std(img) 71 | else: 72 | mean = np.mean(img, axis=(0,1), keepdims=True) 73 | std = np.std(img, axis=(0,1), keepdims=True) 74 | std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape))) 75 | img = (img - mean) / std 76 | return img 77 | 78 | 79 | class GaussianBlur(ImageAugmentor): 80 | def __init__(self, max_size=3): 81 | """:params max_size: (maximum kernel size-1)/2""" 82 | self._init(locals()) 83 | 84 | def _get_augment_params(self, img): 85 | sx, sy = self.rng.randint(self.max_size, size=(2,)) 86 | sx = sx * 2 + 1 87 | sy = sy * 2 + 1 88 | return sx, sy 89 | 90 | def _augment(self, img, s): 91 | return cv2.GaussianBlur(img, s, sigmaX=0, sigmaY=0, 92 | borderType=cv2.BORDER_REPLICATE) 93 | 94 | 95 | class Gamma(ImageAugmentor): 96 | def __init__(self, range=(-0.5, 0.5)): 97 | self._init(locals()) 98 | def _get_augment_params(self, _): 99 | return self._rand_range(*self.range) 100 | def _augment(self, img, gamma): 101 | lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8') 102 | img = np.clip(img, 0, 255).astype('uint8') 103 | img = cv2.LUT(img, lut).astype('float32') 104 | return img 105 | 106 | class Clip(ImageAugmentor): 107 | def __init__(self, min=0, max=255): 108 | assert delta > 0 109 | self._init(locals()) 110 | 111 | def _augment(self, img, _): 112 | img = np.clip(img, self.min, self.max) 113 | return img 114 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/meta.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: meta.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | from .base import ImageAugmentor 8 | 9 | __all__ = ['RandomChooseAug', 'MapImage', 'Identity', 'RandomApplyAug'] 10 | 11 | class Identity(ImageAugmentor): 12 | def _augment(self, img, _): 13 | return img 14 | 15 | class RandomApplyAug(ImageAugmentor): 16 | """ Randomly apply the augmentor with a prob. Otherwise do nothing""" 17 | def __init__(self, aug, prob): 18 | self._init(locals()) 19 | 20 | def _get_augment_params(self, img): 21 | p = self.rng.rand() 22 | if p < self.prob: 23 | prm = self.aug._get_augment_params(img) 24 | return (True, prm) 25 | else: 26 | return (False, None) 27 | 28 | def reset_state(self): 29 | super(RandomApplyAug, self).reset_state() 30 | self.aug.reset_state() 31 | 32 | def _augment(self, img, prm): 33 | if not prm[0]: 34 | return img 35 | else: 36 | return self.aug._augment(img, prm[1]) 37 | 38 | class RandomChooseAug(ImageAugmentor): 39 | def __init__(self, aug_lists): 40 | """ 41 | :param aug_lists: list of augmentor, or list of (augmentor, probability) tuple 42 | """ 43 | if isinstance(aug_lists[0], (tuple, list)): 44 | prob = [k[1] for k in aug_lists] 45 | aug_lists = [k[0] for k in aug_lists] 46 | self._init(locals()) 47 | else: 48 | prob = 1.0 / len(aug_lists) 49 | self._init(locals()) 50 | 51 | def reset_state(self): 52 | super(RandomChooseAug, self).reset_state() 53 | for a in self.aug_lists: 54 | a.reset_state() 55 | 56 | def _get_augment_params(self, img): 57 | aug_idx = self.rng.choice(len(self.aug_lists), p=self.prob) 58 | aug_prm = self.aug_lists[aug_idx]._get_augment_params(img) 59 | return aug_idx, aug_prm 60 | 61 | def _augment(self, img, prm): 62 | idx, prm = prm 63 | return self.aug_lists[idx]._augment(img, prm) 64 | 65 | class MapImage(ImageAugmentor): 66 | """ 67 | Map the image array by a function. 68 | """ 69 | def __init__(self, func): 70 | """ 71 | :param func: a function which takes a image array and return a augmented one 72 | """ 73 | self.func = func 74 | 75 | def _augment(self, img, _): 76 | return self.func(img) 77 | 78 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/noise.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: noise.py 4 | # Author: Yuxin Wu 5 | 6 | from .base import ImageAugmentor 7 | import numpy as np 8 | import cv2 9 | 10 | __all__ = ['JpegNoise', 'GaussianNoise'] 11 | 12 | class JpegNoise(ImageAugmentor): 13 | def __init__(self, quality_range=(40, 100)): 14 | self._init(locals()) 15 | 16 | def _get_augment_params(self, img): 17 | return self.rng.randint(*self.quality_range) 18 | 19 | def _augment(self, img, q): 20 | enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1] 21 | return cv2.imdecode(enc, 1) 22 | 23 | 24 | class GaussianNoise(ImageAugmentor): 25 | def __init__(self, scale=10, clip=True): 26 | self._init(locals()) 27 | 28 | def _get_augment_params(self, img): 29 | return self.rng.randn(*img.shape) 30 | 31 | def _augment(self, img, noise): 32 | ret = img + noise 33 | if self.clip: 34 | ret = np.clip(ret, 0, 255) 35 | return ret 36 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/noname.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: noname.py 3 | # Author: Yuxin Wu 4 | 5 | from .base import ImageAugmentor 6 | from ...utils import logger 7 | import numpy as np 8 | import cv2 9 | 10 | __all__ = ['Flip', 'Resize', 'RandomResize'] 11 | 12 | class Flip(ImageAugmentor): 13 | """ 14 | Random flip. 15 | """ 16 | def __init__(self, horiz=False, vert=False, prob=0.5): 17 | """ 18 | Only one of horiz, vert can be set. 19 | 20 | :param horiz: whether or not apply horizontal flip. 21 | :param vert: whether or not apply vertical flip. 22 | :param prob: probability of flip. 23 | """ 24 | if horiz and vert: 25 | raise ValueError("Please use two Flip instead.") 26 | elif horiz: 27 | self.code = 1 28 | elif vert: 29 | self.code = 0 30 | else: 31 | raise ValueError("Are you kidding?") 32 | self.prob = prob 33 | self._init() 34 | 35 | def _get_augment_params(self, img): 36 | return self._rand_range() < self.prob 37 | 38 | def _augment(self, img, do): 39 | if do: 40 | img = cv2.flip(img, self.code) 41 | return img 42 | 43 | def _fprop_coord(self, coord, param): 44 | raise NotImplementedError() 45 | 46 | 47 | class Resize(ImageAugmentor): 48 | """ Resize image to a target size""" 49 | def __init__(self, shape): 50 | """ 51 | :param shape: shape in (h, w) 52 | """ 53 | self._init(locals()) 54 | 55 | def _augment(self, img, _): 56 | return cv2.resize( 57 | img, self.shape[::-1], 58 | interpolation=cv2.INTER_CUBIC) 59 | 60 | class RandomResize(ImageAugmentor): 61 | """ randomly rescale w and h of the image""" 62 | def __init__(self, xrange, yrange, minimum=(0,0), aspect_ratio_thres=0.15): 63 | """ 64 | :param xrange: (min, max) scaling ratio 65 | :param yrange: (min, max) scaling ratio 66 | :param minimum: (xmin, ymin). Avoid scaling down too much. 67 | :param aspect_ratio_thres: at most change k=20% aspect ratio 68 | """ 69 | self._init(locals()) 70 | 71 | def _get_augment_params(self, img): 72 | cnt = 0 73 | while True: 74 | sx = self._rand_range(*self.xrange) 75 | sy = self._rand_range(*self.yrange) 76 | destX = int(max(sx * img.shape[1], self.minimum[0])) 77 | destY = int(max(sy * img.shape[0], self.minimum[1])) 78 | oldr = img.shape[1] * 1.0 / img.shape[0] 79 | newr = destX * 1.0 / destY 80 | diff = abs(newr - oldr) / oldr 81 | if diff <= self.aspect_ratio_thres: 82 | return (destX, destY) 83 | cnt += 1 84 | if cnt > 50: 85 | logger.warn("RandomResize failed to augment an image") 86 | 87 | def _augment(self, img, dsize): 88 | return cv2.resize(img, dsize, interpolation=cv2.INTER_CUBIC) 89 | 90 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/imgaug/paste.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: paste.py 4 | # Author: Yuxin Wu 5 | 6 | from .base import ImageAugmentor 7 | 8 | from abc import abstractmethod 9 | import numpy as np 10 | 11 | __all__ = [ 'CenterPaste', 'BackgroundFiller', 'ConstantBackgroundFiller'] 12 | 13 | 14 | class BackgroundFiller(object): 15 | """ Base class for all BackgroundFiller""" 16 | def fill(self, background_shape, img): 17 | """ 18 | Return a proper background image of background_shape, given img 19 | 20 | :param background_shape: a shape of [h, w] 21 | :param img: an image 22 | :returns: a background image 23 | """ 24 | return self._fill(background_shape, img) 25 | 26 | @abstractmethod 27 | def _fill(self, background_shape, img): 28 | pass 29 | 30 | class ConstantBackgroundFiller(BackgroundFiller): 31 | """ Fill the background by a constant """ 32 | def __init__(self, value): 33 | """ 34 | :param value: the value to fill the background. 35 | """ 36 | self.value = value 37 | 38 | def _fill(self, background_shape, img): 39 | assert img.ndim in [3, 1] 40 | if img.ndim == 3: 41 | return_shape = background_shape + (3,) 42 | else: 43 | return_shape = background_shape 44 | return np.zeros(return_shape) + self.value 45 | 46 | class CenterPaste(ImageAugmentor): 47 | """ 48 | Paste the image onto the center of a background canvas. 49 | """ 50 | def __init__(self, background_shape, background_filler=None): 51 | """ 52 | :param background_shape: shape of the background canvas. 53 | :param background_filler: a `BackgroundFiller` instance. Default to zero-filler. 54 | """ 55 | if background_filler is None: 56 | background_filler = ConstantBackgroundFiller(0) 57 | 58 | self._init(locals()) 59 | 60 | def _augment(self, img, _): 61 | img_shape = img.shape[:2] 62 | assert self.background_shape[0] > img_shape[0] and self.background_shape[1] > img_shape[1] 63 | 64 | background = self.background_filler.fill( 65 | self.background_shape, img) 66 | h0 = int((self.background_shape[0] - img_shape[0]) * 0.5) 67 | w0 = int((self.background_shape[1] - img_shape[1]) * 0.5) 68 | background[h0:h0+img_shape[0], w0:w0+img_shape[1]] = img 69 | img = background 70 | return img 71 | 72 | def _fprop_coord(self, coord, param): 73 | raise NotImplementedError() 74 | 75 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/raw.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: raw.py 4 | # Author: Yuxin Wu 5 | 6 | import numpy as np 7 | from six.moves import range 8 | from .base import DataFlow, RNGDataFlow 9 | from ..utils.serialize import loads 10 | 11 | __all__ = ['FakeData', 'DataFromQueue', 'DataFromList'] 12 | try: 13 | import zmq 14 | except: 15 | pass 16 | else: 17 | __all__.append('DataFromSocket') 18 | 19 | class FakeData(RNGDataFlow): 20 | """ Generate fake fixed data of given shapes""" 21 | def __init__(self, shapes, size, random=True, dtype='float32'): 22 | """ 23 | :param shapes: a list of lists/tuples 24 | :param size: size of this DataFlow 25 | """ 26 | super(FakeData, self).__init__() 27 | self.shapes = shapes 28 | self._size = int(size) 29 | self.random = random 30 | self.dtype = dtype 31 | 32 | def size(self): 33 | return self._size 34 | 35 | def get_data(self): 36 | if self.random: 37 | for _ in range(self._size): 38 | yield [self.rng.rand(*k).astype(self.dtype) for k in self.shapes] 39 | else: 40 | v = [self.rng.rand(*k).astype(self.dtype) for k in self.shapes] 41 | for _ in range(self._size): 42 | yield v 43 | 44 | class DataFromQueue(DataFlow): 45 | """ Produce data from a queue """ 46 | def __init__(self, queue): 47 | self.queue = queue 48 | 49 | def get_data(self): 50 | while True: 51 | yield self.queue.get() 52 | 53 | class DataFromList(RNGDataFlow): 54 | """ Produce data from a list""" 55 | def __init__(self, lst, shuffle=True): 56 | super(DataFromList, self).__init__() 57 | self.lst = lst 58 | self.shuffle = shuffle 59 | 60 | def size(self): 61 | return len(self.lst) 62 | 63 | def get_data(self): 64 | if not self.shuffle: 65 | for k in self.lst: 66 | yield k 67 | else: 68 | idxs = self.rng.shuffle(np.arange(len(self.lst))) 69 | for k in idxs: 70 | yield self.lst[k] 71 | 72 | class DataFromSocket(DataFlow): 73 | """ Produce data from a zmq socket""" 74 | def __init__(self, socket_name): 75 | self._name = socket_name 76 | 77 | def get_data(self): 78 | try: 79 | ctx = zmq.Context() 80 | socket = ctx.socket(zmq.PULL) 81 | socket.bind(self._name) 82 | 83 | while True: 84 | dp = loads(socket.recv(copy=False)) 85 | yield dp 86 | finally: 87 | ctx.destroy(linger=0) 88 | 89 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/remote.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: remote.py 4 | # Author: Yuxin Wu 5 | 6 | from ..utils import logger 7 | try: 8 | import zmq 9 | except ImportError: 10 | logger.warn("Error in 'import zmq'. remote feature won't be available") 11 | __all__ = [] 12 | else: 13 | __all__ = ['serve_data', 'RemoteData'] 14 | 15 | from .base import DataFlow 16 | from .common import RepeatedData 17 | from ..utils import logger 18 | from ..utils.serialize import dumps, loads 19 | 20 | def serve_data(ds, addr): 21 | ctx = zmq.Context() 22 | socket = ctx.socket(zmq.PUSH) 23 | socket.set_hwm(10) 24 | socket.bind(addr) 25 | ds = RepeatedData(ds, -1) 26 | try: 27 | ds.reset_state() 28 | logger.info("Serving data at {}".format(addr)) 29 | while True: 30 | for dp in ds.get_data(): 31 | socket.send(dumps(dp), copy=False) 32 | finally: 33 | socket.setsockopt(zmq.LINGER, 0) 34 | socket.close() 35 | if not ctx.closed: 36 | ctx.destroy(0) 37 | 38 | class RemoteData(DataFlow): 39 | def __init__(self, addr): 40 | self.ctx = zmq.Context() 41 | self.socket = self.ctx.socket(zmq.PULL) 42 | self.socket.set_hwm(10) 43 | self.socket.connect(addr) 44 | 45 | def get_data(self): 46 | while True: 47 | dp = loads(self.socket.recv(copy=False)) 48 | yield dp 49 | 50 | if __name__ == '__main__': 51 | import sys 52 | from tqdm import tqdm 53 | from .raw import FakeData 54 | addr = "tcp://127.0.0.1:8877" 55 | if sys.argv[1] == 'serve': 56 | ds = FakeData([(128,244,244,3)], 1000) 57 | serve_data(ds, addr) 58 | else: 59 | ds = RemoteData(addr) 60 | logger.info("Each DP is 73.5MB") 61 | with tqdm(total=10000) as pbar: 62 | for k in ds.get_data(): 63 | pbar.update() 64 | 65 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/dataflow/tf_func.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: tf_func.py 4 | # Author: Yuxin Wu 5 | 6 | from .base import ProxyDataFlow 7 | from ..utils import logger 8 | 9 | try: 10 | import tensorflow as tf 11 | except ImportError: 12 | logger.warn("Cannot import tensorflow. TFFuncMapper won't be available.") 13 | __all__ = [] 14 | else: 15 | __all__ = ['TFFuncMapper'] 16 | 17 | class TFFuncMapper(ProxyDataFlow): 18 | def __init__(self, ds, 19 | get_placeholders, symbf, apply_symbf_on_dp, device='/cpu:0'): 20 | """ 21 | :param get_placeholders: a function returning the placeholders 22 | :param symbf: a symbolic function taking the placeholders 23 | :param apply_symbf_on_dp: apply the above function to datapoint 24 | """ 25 | super(TFFuncMapper, self).__init__(ds) 26 | self.get_placeholders = get_placeholders 27 | self.symbf = symbf 28 | self.apply_symbf_on_dp = apply_symbf_on_dp 29 | self.device = device 30 | 31 | def reset_state(self): 32 | super(TFFuncMapper, self).reset_state() 33 | self.graph = tf.Graph() 34 | with self.graph.as_default(), \ 35 | tf.device(self.device): 36 | self.placeholders = self.get_placeholders() 37 | self.output_vars = self.symbf(self.placeholders) 38 | self.sess = tf.Session() 39 | 40 | def run_func(vals): 41 | return self.sess.run(self.output_vars, 42 | feed_dict=dict(zip(self.placeholders, vals))) 43 | self.run_func = run_func 44 | 45 | def get_data(self): 46 | for dp in self.ds.get_data(): 47 | dp = self.apply_symbf_on_dp(dp, self.run_func) 48 | if dp: 49 | yield dp 50 | 51 | 52 | if __name__ == '__main__': 53 | from .raw import FakeData 54 | from .prefetch import PrefetchDataZMQ 55 | from .image import AugmentImageComponent 56 | from . import imgaug 57 | ds = FakeData([[224, 224, 3]], 100000, random=False) 58 | 59 | def tf_aug(v): 60 | v = v[0] 61 | v = tf.image.random_brightness(v, 0.1) 62 | v = tf.image.random_contrast(v, 0.8, 1.2) 63 | v = tf.image.random_flip_left_right(v) 64 | return v 65 | ds = TFFuncMapper(ds, 66 | lambda: [tf.placeholder(tf.float32, [224, 224, 3], name='img')], 67 | tf_aug, 68 | lambda dp, f: [f([dp[0]])[0]] 69 | ) 70 | #ds = AugmentImageComponent(ds, 71 | #[imgaug.Brightness(0.1, clip=False), 72 | #imgaug.Contrast((0.8, 1.2), clip=False), 73 | #imgaug.Flip(horiz=True) 74 | #]) 75 | #ds = PrefetchDataZMQ(ds, 4) 76 | ds.reset_state() 77 | 78 | import tqdm 79 | itr = ds.get_data() 80 | for k in tqdm.trange(100000): 81 | next(itr) 82 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | from types import ModuleType 7 | import tensorflow as tf 8 | import os 9 | import os.path 10 | from ..utils import logger 11 | 12 | def _global_import(name): 13 | p = __import__(name, globals(), locals(), level=1) 14 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 15 | for k in lst: 16 | globals()[k] = p.__dict__[k] 17 | 18 | for _, module_name, _ in walk_packages( 19 | [os.path.dirname(__file__)]): 20 | if not module_name.startswith('_'): 21 | _global_import(module_name) 22 | 23 | 24 | class LinearWrap(object): 25 | """ A simple wrapper to easily create linear graph, 26 | for layers with one input&output, or tf function with one input&output 27 | """ 28 | 29 | class TFModuleFunc(object): 30 | def __init__(self, mod, tensor): 31 | self._mod = mod 32 | self._t = tensor 33 | 34 | def __getattr__(self, name): 35 | ret = getattr(self._mod, name) 36 | if isinstance(ret, ModuleType): 37 | return LinearWrap.TFModuleFunc(ret, self._t) 38 | else: 39 | # assume to be a tf function 40 | def f(*args, **kwargs): 41 | o = ret(self._t, *args, **kwargs) 42 | return LinearWrap(o) 43 | return f 44 | 45 | def __init__(self, tensor): 46 | self._t = tensor 47 | 48 | def __getattr__(self, layer_name): 49 | layer = eval(layer_name) 50 | if hasattr(layer, 'f'): 51 | # this is a registered tensorpack layer 52 | if layer.use_scope: 53 | def f(name, *args, **kwargs): 54 | ret = layer(name, self._t, *args, **kwargs) 55 | return LinearWrap(ret) 56 | else: 57 | def f(*args, **kwargs): 58 | ret = layer(self._t, *args, **kwargs) 59 | return LinearWrap(ret) 60 | return f 61 | else: 62 | if layer_name != 'tf': 63 | logger.warn("You're calling LinearWrap.__getattr__ with something neither a layer nor 'tf'!") 64 | assert isinstance(layer, ModuleType) 65 | return LinearWrap.TFModuleFunc(layer, self._t) 66 | 67 | def apply(self, func, *args, **kwargs): 68 | """ send tensor to the first argument of a simple func""" 69 | ret = func(self._t, *args, **kwargs) 70 | return LinearWrap(ret) 71 | 72 | def __call__(self): 73 | return self._t 74 | 75 | def tensor(self): 76 | return self._t 77 | 78 | def print_tensor(self): 79 | print(self._t) 80 | return self 81 | 82 | 83 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/_common.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: _common.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | from functools import wraps 7 | import six 8 | import copy, os 9 | 10 | from ..tfutils import * 11 | from ..tfutils.modelutils import * 12 | from ..tfutils.summary import * 13 | from ..utils import logger 14 | 15 | # make sure each layer is only logged once 16 | _layer_logged = set() 17 | 18 | def disable_layer_logging(): 19 | class ContainEverything: 20 | def __contains__(self, x): 21 | return True 22 | # can use nonlocal in python3, but how 23 | globals()['_layer_logged'] = ContainEverything() 24 | 25 | def layer_register( 26 | summary_activation=False, 27 | log_shape=True, 28 | use_scope=True): 29 | """ 30 | Register a layer. 31 | :param summary_activation: Define the default behavior of whether to 32 | summary the output(activation) of this layer. 33 | Can be overriden when creating the layer. 34 | :param log_shape: log input/output shape of this layer 35 | """ 36 | 37 | def wrapper(func): 38 | @wraps(func) 39 | def wrapped_func(*args, **kwargs): 40 | if use_scope: 41 | name, inputs = args[0], args[1] 42 | args = args[1:] # actual positional args used to call func 43 | assert isinstance(name, six.string_types), name 44 | else: 45 | assert not log_shape and not summary_activation 46 | inputs = args[0] 47 | name = None 48 | do_summary = kwargs.pop( 49 | 'summary_activation', summary_activation) 50 | 51 | # TODO use inspect.getcallargs to enhance? 52 | # update from current argument scope 53 | actual_args = copy.copy(get_arg_scope()[func.__name__]) 54 | actual_args.update(kwargs) 55 | 56 | if name is not None: 57 | with tf.variable_scope(name) as scope: 58 | do_log_shape = log_shape and scope.name not in _layer_logged 59 | do_summary = do_summary and scope.name not in _layer_logged 60 | if do_log_shape: 61 | logger.info("{} input: {}".format(scope.name, get_shape_str(inputs))) 62 | 63 | # run the actual function 64 | outputs = func(*args, **actual_args) 65 | 66 | if do_log_shape: 67 | # log shape info and add activation 68 | logger.info("{} output: {}".format( 69 | scope.name, get_shape_str(outputs))) 70 | _layer_logged.add(scope.name) 71 | 72 | if do_summary: 73 | if isinstance(outputs, list): 74 | for x in outputs: 75 | add_activation_summary(x, scope.name) 76 | else: 77 | add_activation_summary(outputs, scope.name) 78 | else: 79 | # run the actual function 80 | outputs = func(*args, **actual_args) 81 | return outputs 82 | 83 | wrapped_func.f = func # attribute to access the underlining function object 84 | wrapped_func.use_scope = use_scope 85 | return wrapped_func 86 | 87 | # need some special handling for sphinx to work with the arguments 88 | on_doc = os.environ.get('READTHEDOCS') == 'True' \ 89 | or os.environ.get('TENSORPACK_DOC_BUILDING') 90 | if on_doc: 91 | from decorator import decorator 92 | wrapper = decorator(wrapper) 93 | 94 | return wrapper 95 | 96 | def shape2d(a): 97 | """ 98 | a: a int or tuple/list of length 2 99 | """ 100 | if type(a) == int: 101 | return [a, a] 102 | if isinstance(a, (list, tuple)): 103 | assert len(a) == 2 104 | return list(a) 105 | raise RuntimeError("Illegal shape: {}".format(a)) 106 | 107 | def shape4d(a): 108 | # for use with tensorflow NHWC ops 109 | return [1] + shape2d(a) + [1] 110 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: _test.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | import numpy as np 8 | from . import * 9 | import unittest 10 | 11 | class TestModel(unittest.TestCase): 12 | def run_variable(self, var): 13 | sess = tf.Session() 14 | sess.run(tf.initialize_all_variables()) 15 | if isinstance(var, list): 16 | return sess.run(var) 17 | else: 18 | return sess.run([var])[0] 19 | 20 | def make_variable(self, *args): 21 | if len(args) > 1: 22 | return [tf.Variable(k) for k in args] 23 | else: 24 | return tf.Variable(args[0]) 25 | 26 | def run_test_case(case): 27 | suite = unittest.TestLoader().loadTestsFromTestCase(case) 28 | unittest.TextTestRunner(verbosity=2).run(suite) 29 | 30 | if __name__ == '__main__': 31 | import tensorpack 32 | from tensorpack.utils import logger 33 | logger.disable_logger() 34 | subs = tensorpack.models._test.TestModel.__subclasses__() 35 | for cls in subs: 36 | run_test_case(cls) 37 | 38 | 39 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/batch_norm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: batch_norm.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | from copy import copy 8 | import re 9 | 10 | from .model_desc import get_current_tower_context 11 | from ..utils import logger, EXTRA_SAVE_VARS_KEY 12 | from ._common import layer_register 13 | 14 | __all__ = ['BatchNorm'] 15 | 16 | # http://stackoverflow.com/questions/33949786/how-could-i-use-batch-normalization-in-tensorflow 17 | # TF batch_norm only works for 4D tensor right now: #804 18 | # decay: being too close to 1 leads to slow start-up. torch use 0.9. 19 | # eps: torch: 1e-5. Lasagne: 1e-4 20 | @layer_register(log_shape=False) 21 | def BatchNorm(x, use_local_stat=None, decay=0.9, epsilon=1e-5): 22 | """ 23 | Batch normalization layer as described in: 24 | 25 | `Batch Normalization: Accelerating Deep Network Training by 26 | Reducing Internal Covariance Shift `_. 27 | 28 | Notes: 29 | 30 | * Whole-population mean/variance is calculated by a running-average mean/variance. 31 | * Epsilon for variance is set to 1e-5, as is `torch/nn `_. 32 | 33 | :param input: a NHWC or NC tensor 34 | :param use_local_stat: bool. whether to use mean/var of this batch or the moving average. 35 | Default to True in training and False in predicting. 36 | :param decay: decay rate. default to 0.999. 37 | :param epsilon: default to 1e-5. 38 | """ 39 | 40 | shape = x.get_shape().as_list() 41 | assert len(shape) in [2, 4] 42 | 43 | n_out = shape[-1] # channel 44 | assert n_out is not None 45 | beta = tf.get_variable('beta', [n_out]) 46 | gamma = tf.get_variable('gamma', [n_out], 47 | initializer=tf.ones_initializer) 48 | 49 | if len(shape) == 2: 50 | batch_mean, batch_var = tf.nn.moments(x, [0], keep_dims=False) 51 | else: 52 | batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], keep_dims=False) 53 | # just to make a clear name. 54 | batch_mean = tf.identity(batch_mean, 'mean') 55 | batch_var = tf.identity(batch_var, 'variance') 56 | 57 | emaname = 'EMA' 58 | ctx = get_current_tower_context() 59 | if use_local_stat is None: 60 | use_local_stat = ctx.is_training 61 | assert use_local_stat == ctx.is_training 62 | 63 | if ctx.is_training: 64 | # training tower 65 | with tf.name_scope(None): # https://github.com/tensorflow/tensorflow/issues/2740 66 | ema = tf.train.ExponentialMovingAverage(decay=decay, name=emaname) 67 | ema_apply_op = ema.apply([batch_mean, batch_var]) 68 | ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var) 69 | if ctx.is_main_training_tower: 70 | # inside main training tower 71 | tf.add_to_collection(EXTRA_SAVE_VARS_KEY, ema_mean) 72 | tf.add_to_collection(EXTRA_SAVE_VARS_KEY, ema_var) 73 | else: 74 | assert not use_local_stat 75 | if ctx.is_main_tower: 76 | # not training, but main tower. need to create the vars 77 | with tf.name_scope(None): 78 | ema = tf.train.ExponentialMovingAverage(decay=decay, name=emaname) 79 | ema_apply_op = ema.apply([batch_mean, batch_var]) 80 | ema_mean, ema_var = ema.average(batch_mean), ema.average(batch_var) 81 | else: 82 | # use statistics in another tower 83 | G = tf.get_default_graph() 84 | # figure out the var name 85 | with tf.name_scope(None): 86 | ema = tf.train.ExponentialMovingAverage(decay=decay, name=emaname) 87 | mean_var_name = ema.average_name(batch_mean) + ':0' 88 | var_var_name = ema.average_name(batch_var) + ':0' 89 | ema_mean = ctx.find_tensor_in_main_tower(G, mean_var_name) 90 | ema_var = ctx.find_tensor_in_main_tower(G, var_var_name) 91 | #logger.info("In prediction, using {} instead of {} for {}".format( 92 | #mean_name, ema_mean.name, batch_mean.name)) 93 | 94 | if use_local_stat: 95 | with tf.control_dependencies([ema_apply_op]): 96 | batch = tf.cast(tf.shape(x)[0], tf.float32) 97 | mul = tf.select(tf.equal(batch, 1.0), 1.0, batch / (batch - 1)) 98 | batch_var = batch_var * mul # use unbiased variance estimator in training 99 | return tf.nn.batch_normalization( 100 | x, batch_mean, batch_var, beta, gamma, epsilon, 'bn') 101 | else: 102 | return tf.nn.batch_normalization( 103 | x, ema_mean, ema_var, beta, gamma, epsilon, 'bn') 104 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/conv2d.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: conv2d.py 4 | # Author: Yuxin Wu 5 | 6 | import numpy as np 7 | import tensorflow as tf 8 | import math 9 | from ._common import * 10 | from ..utils import map_arg 11 | 12 | __all__ = ['Conv2D'] 13 | 14 | @layer_register() 15 | def Conv2D(x, out_channel, kernel_shape, 16 | padding='SAME', stride=1, 17 | W_init=None, b_init=None, 18 | nl=tf.nn.relu, split=1, use_bias=True): 19 | """ 20 | 2D convolution on 4D inputs. 21 | 22 | :param input: a tensor of shape NHWC 23 | :param kernel_shape: (h, w) or a int 24 | :param stride: (h, w) or a int. default to 1 25 | :param padding: 'valid' or 'same'. default to 'same' 26 | :param split: split channels as used in Alexnet. Default to 1 (no split) 27 | :param W_init: initializer for W. default to `xavier_initializer_conv2d`. 28 | :param b_init: initializer for b. default to zero initializer. 29 | :param nl: nonlinearity. default to `relu`. 30 | :param use_bias: whether to use bias. a boolean default to True 31 | :returns: a NHWC tensor 32 | """ 33 | in_shape = x.get_shape().as_list() 34 | in_channel = in_shape[-1] 35 | assert in_channel is not None, "Input to Conv2D cannot have unknown channel!" 36 | assert in_channel % split == 0 37 | assert out_channel % split == 0 38 | 39 | kernel_shape = shape2d(kernel_shape) 40 | padding = padding.upper() 41 | filter_shape = kernel_shape + [in_channel / split, out_channel] 42 | stride = shape4d(stride) 43 | 44 | if W_init is None: 45 | #W_init = tf.truncated_normal_initializer(stddev=3e-2) 46 | W_init = tf.contrib.layers.xavier_initializer_conv2d() 47 | if b_init is None: 48 | b_init = tf.constant_initializer() 49 | 50 | W = tf.get_variable('W', filter_shape, initializer=W_init) 51 | if use_bias: 52 | b = tf.get_variable('b', [out_channel], initializer=b_init) 53 | 54 | if split == 1: 55 | conv = tf.nn.conv2d(x, W, stride, padding) 56 | else: 57 | inputs = tf.split(3, split, x) 58 | kernels = tf.split(3, split, W) 59 | outputs = [tf.nn.conv2d(i, k, stride, padding) 60 | for i, k in zip(inputs, kernels)] 61 | conv = tf.concat(3, outputs) 62 | return nl(tf.nn.bias_add(conv, b) if use_bias else conv, name='output') 63 | 64 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/fc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: fc.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | import math 8 | 9 | from ._common import layer_register 10 | from ..tfutils.symbolic_functions import * 11 | 12 | __all__ = ['FullyConnected'] 13 | 14 | @layer_register() 15 | def FullyConnected(x, out_dim, 16 | W_init=None, b_init=None, 17 | nl=tf.nn.relu, use_bias=True): 18 | """ 19 | Fully-Connected layer. 20 | 21 | :param input: a tensor to be flattened except the first dimension. 22 | :param out_dim: output dimension 23 | :param W_init: initializer for W. default to `xavier_initializer_conv2d`. 24 | :param b_init: initializer for b. default to zero initializer. 25 | :param nl: nonlinearity. default to `relu`. 26 | :param use_bias: whether to use bias. a boolean default to True 27 | :returns: a 2D tensor 28 | """ 29 | x = batch_flatten(x) 30 | in_dim = x.get_shape().as_list()[1] 31 | 32 | if W_init is None: 33 | #W_init = tf.truncated_normal_initializer(stddev=1 / math.sqrt(float(in_dim))) 34 | W_init = tf.uniform_unit_scaling_initializer(factor=1.43) 35 | if b_init is None: 36 | b_init = tf.constant_initializer() 37 | 38 | W = tf.get_variable('W', [in_dim, out_dim], initializer=W_init) 39 | if use_bias: 40 | b = tf.get_variable('b', [out_dim], initializer=b_init) 41 | prod = tf.nn.xw_plus_b(x, W, b) if use_bias else tf.matmul(x, W) 42 | return nl(prod, name='output') 43 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/nonlin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: nonlin.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | from copy import copy 8 | 9 | from ._common import * 10 | from .batch_norm import BatchNorm 11 | 12 | __all__ = ['Maxout', 'PReLU', 'LeakyReLU', 'BNReLU'] 13 | 14 | @layer_register() 15 | def Maxout(x, num_unit): 16 | """ 17 | Maxout as in `Maxout Networks `_. 18 | 19 | :param input: a NHWC or NC tensor. 20 | :param num_unit: a int. must be divisible by C. 21 | :returns: a NHW(C/num_unit) tensor 22 | """ 23 | input_shape = x.get_shape().as_list() 24 | ndim = len(input_shape) 25 | assert ndim == 4 or ndim == 2 26 | ch = input_shape[-1] 27 | assert ch is not None and ch % num_unit == 0 28 | if ndim == 4: 29 | x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit]) 30 | else: 31 | x = tf.reshape(x, [-1, ch / num_unit, num_unit]) 32 | return tf.reduce_max(x, ndim, name='output') 33 | 34 | @layer_register(log_shape=False) 35 | def PReLU(x, init=tf.constant_initializer(0.001), name=None): 36 | """ 37 | Parameterized relu as in `Delving Deep into Rectifiers: Surpassing 38 | Human-Level Performance on ImageNet Classification 39 | `_. 40 | 41 | :param input: any tensor. 42 | :param init: initializer for the p. default to 0.001. 43 | """ 44 | alpha = tf.get_variable('alpha', [], initializer=init) 45 | x = ((1 + alpha) * x + (1 - alpha) * tf.abs(x)) 46 | if name is None: 47 | name = 'output' 48 | return tf.mul(x, 0.5, name=name) 49 | 50 | @layer_register(log_shape=False) 51 | def LeakyReLU(x, alpha, name=None): 52 | """ 53 | Leaky relu as in `Rectifier Nonlinearities Improve Neural Network Acoustic 54 | Models 55 | `_. 56 | 57 | :param input: any tensor. 58 | :param alpha: the negative slope. 59 | """ 60 | alpha = float(alpha) 61 | x = ((1 + alpha) * x + (1 - alpha) * tf.abs(x)) 62 | if name is None: 63 | name = 'output' 64 | return tf.mul(x, 0.5, name=name) 65 | 66 | # I'm not a layer, but I return a nonlinearity. 67 | def BNReLU(is_training=None, **kwargs): 68 | """ 69 | :param is_traning: boolean 70 | :param kwargs: args for BatchNorm 71 | :returns: an activation function that performs BN + ReLU (a too common combination) 72 | """ 73 | def BNReLU(x, name=None): 74 | x = BatchNorm('bn', x, use_local_stat=is_training, **kwargs) 75 | x = tf.nn.relu(x, name=name) 76 | return x 77 | return BNReLU 78 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/regularize.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: regularize.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | import re 7 | 8 | from ..utils import logger 9 | from ..utils.utils import * 10 | from .model_desc import get_current_tower_context 11 | from ._common import layer_register 12 | 13 | __all__ = ['regularize_cost', 'l2_regularizer', 'l1_regularizer', 'Dropout'] 14 | 15 | @memoized 16 | def _log_regularizer(name): 17 | logger.info("Apply regularizer for {}".format(name)) 18 | 19 | l2_regularizer = tf.contrib.layers.l2_regularizer 20 | l1_regularizer = tf.contrib.layers.l1_regularizer 21 | 22 | def regularize_cost(regex, func, name=None): 23 | """ 24 | Apply a regularizer on every trainable variable matching the regex. 25 | 26 | :param func: a function that takes a tensor and return a scalar. 27 | """ 28 | G = tf.get_default_graph() 29 | params = G.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) 30 | 31 | costs = [] 32 | for p in params: 33 | para_name = p.name 34 | if re.search(regex, para_name): 35 | costs.append(func(p)) 36 | _log_regularizer(para_name) 37 | if not costs: 38 | return 0 39 | return tf.add_n(costs, name=name) 40 | 41 | 42 | @layer_register(log_shape=False) 43 | def Dropout(x, prob=0.5): 44 | is_training = get_current_tower_context().is_training 45 | keep_prob = tf.constant(prob if is_training else 1.0) 46 | return tf.nn.dropout(x, keep_prob) 47 | 48 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/models/softmax.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: softmax.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | from ._common import layer_register 8 | 9 | __all__ = ['SoftMax'] 10 | 11 | @layer_register() 12 | def SoftMax(x, use_temperature=False, temperature_init=1.0): 13 | """ 14 | A SoftMax layer (no linear projection) with optional temperature 15 | :param x: a 2D tensor 16 | """ 17 | if use_temperature: 18 | t = tf.get_variable('invtemp', [], 19 | initializer=tf.constant_initializer(1.0 / float(temperature_init))) 20 | x = x * t 21 | return tf.nn.softmax(x, name='output') 22 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/predict/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | import os.path 8 | 9 | def global_import(name): 10 | p = __import__(name, globals(), locals(), level=1) 11 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 12 | del globals()[name] 13 | for k in lst: 14 | globals()[k] = p.__dict__[k] 15 | 16 | for _, module_name, _ in walk_packages( 17 | [os.path.dirname(__file__)]): 18 | if not module_name.startswith('_'): 19 | global_import(module_name) 20 | 21 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/predict/common.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: common.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | from collections import namedtuple 7 | import six 8 | from six.moves import zip 9 | 10 | from tensorpack.models import ModelDesc 11 | from ..utils import logger 12 | from ..tfutils import * 13 | from .base import OfflinePredictor 14 | 15 | import multiprocessing 16 | 17 | __all__ = ['PredictConfig', 'get_predict_func', 'PredictResult' ] 18 | 19 | PredictResult = namedtuple('PredictResult', ['input', 'output']) 20 | 21 | class PredictConfig(object): 22 | def __init__(self, **kwargs): 23 | """ 24 | The config used by `get_predict_func`. 25 | 26 | :param session_init: a `utils.sessinit.SessionInit` instance to 27 | initialize variables of a session. 28 | :param input_var_names: a list of input variable names. 29 | :param input_data_mapping: deprecated. used to select `input_var_names` from the `InputVars` of the model. 30 | :param model: a `ModelDesc` instance 31 | :param output_var_names: a list of names of the output tensors to predict, the 32 | variables can be any computable tensor in the graph. 33 | Predict specific output might not require all input variables. 34 | :param return_input: whether to return (input, output) pair or just output. default to False. 35 | """ 36 | def assert_type(v, tp): 37 | assert isinstance(v, tp), v.__class__ 38 | # XXX does it work? start with minimal memory, but allow growth. 39 | # allow_growth doesn't seem to work very well in TF. 40 | self.session_config = kwargs.pop('session_config', get_default_sess_config(0.4)) 41 | self.session_init = kwargs.pop('session_init', JustCurrentSession()) 42 | assert_type(self.session_init, SessionInit) 43 | self.model = kwargs.pop('model') 44 | assert_type(self.model, ModelDesc) 45 | 46 | # inputs & outputs 47 | self.input_var_names = kwargs.pop('input_var_names', None) 48 | input_mapping = kwargs.pop('input_data_mapping', None) 49 | if input_mapping: 50 | raw_vars = self.model.get_input_vars_desc() 51 | self.input_var_names = [raw_vars[k].name for k in input_mapping] 52 | logger.warn('The option `input_data_mapping` was deprecated. \ 53 | Use \'input_var_names=[{}]\' instead'.format(', '.join(self.input_var_names))) 54 | elif self.input_var_names is None: 55 | # neither options is set, assume all inputs 56 | raw_vars = self.model.get_input_vars_desc() 57 | self.input_var_names = [k.name for k in raw_vars] 58 | self.output_var_names = kwargs.pop('output_var_names') 59 | assert len(self.input_var_names), self.input_var_names 60 | for v in self.input_var_names: assert_type(v, six.string_types) 61 | assert len(self.output_var_names), self.output_var_names 62 | 63 | self.return_input = kwargs.pop('return_input', False) 64 | assert len(kwargs) == 0, 'Unknown arguments: {}'.format(str(kwargs.keys())) 65 | 66 | def get_predict_func(config): 67 | """ 68 | Produce a offline predictor run inside a new session. 69 | 70 | :param config: a `PredictConfig` instance. 71 | :returns: A callable predictor that takes a list of input values, and return 72 | a list of output values defined in ``config.output_var_names``. 73 | """ 74 | return OfflinePredictor(config) 75 | 76 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | 8 | def _global_import(name): 9 | p = __import__(name, globals(), None, level=1) 10 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 11 | if name in ['common', 'argscope']: 12 | del globals()[name] 13 | for k in lst: 14 | globals()[k] = p.__dict__[k] 15 | 16 | _global_import('sessinit') 17 | _global_import('common') 18 | _global_import('gradproc') 19 | _global_import('argscope') 20 | 21 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/argscope.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: argscope.py 4 | # Author: Yuxin Wu 5 | from contextlib import contextmanager 6 | from collections import defaultdict 7 | import inspect 8 | import copy 9 | import six 10 | 11 | __all__ = ['argscope', 'get_arg_scope'] 12 | 13 | _ArgScopeStack = [] 14 | 15 | @contextmanager 16 | def argscope(layers, **param): 17 | if not isinstance(layers, list): 18 | layers = [layers] 19 | 20 | def _check_args_exist(l): 21 | args = inspect.getargspec(l).args 22 | for k, v in six.iteritems(param): 23 | assert k in args, "No argument {} in {}".format(k, l.__name__) 24 | 25 | for l in layers: 26 | assert hasattr(l, 'f'), "{} is not a registered layer".format(l.__name__) 27 | _check_args_exist(l.f) 28 | 29 | new_scope = copy.copy(get_arg_scope()) 30 | for l in layers: 31 | new_scope[l.__name__].update(param) 32 | _ArgScopeStack.append(new_scope) 33 | yield 34 | del _ArgScopeStack[-1] 35 | 36 | def get_arg_scope(): 37 | """ 38 | :returns: the current argscope. 39 | An argscope is a dict of dict: dict[layername] = {arg: val} 40 | """ 41 | if len(_ArgScopeStack) > 0: 42 | return _ArgScopeStack[-1] 43 | else: 44 | return defaultdict(dict) 45 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/common.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: common.py 4 | # Author: Yuxin Wu 5 | 6 | from ..utils.naming import * 7 | import tensorflow as tf 8 | from copy import copy 9 | import six 10 | from contextlib import contextmanager 11 | 12 | __all__ = ['get_default_sess_config', 13 | 'get_global_step', 14 | 'get_global_step_var', 15 | 'get_op_var_name', 16 | 'get_vars_by_names', 17 | 'backup_collection', 18 | 'restore_collection', 19 | 'clear_collection', 20 | 'freeze_collection'] 21 | 22 | def get_default_sess_config(mem_fraction=0.9): 23 | """ 24 | Return a better session config to use as default. 25 | Tensorflow default session config consume too much resources. 26 | 27 | :param mem_fraction: fraction of memory to use. 28 | :returns: a `tf.ConfigProto` object. 29 | """ 30 | conf = tf.ConfigProto() 31 | conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction 32 | conf.gpu_options.allocator_type = 'BFC' 33 | conf.gpu_options.allow_growth = True 34 | conf.allow_soft_placement = True 35 | #conf.log_device_placement = True 36 | return conf 37 | 38 | def get_global_step_var(): 39 | """ :returns: the global_step variable in the current graph. create if not existed""" 40 | try: 41 | return tf.get_default_graph().get_tensor_by_name(GLOBAL_STEP_VAR_NAME) 42 | except KeyError: 43 | scope = tf.get_variable_scope() 44 | assert scope.name == '', \ 45 | "Creating global_step_var under a variable scope would cause problems!" 46 | var = tf.Variable( 47 | 0, trainable=False, name=GLOBAL_STEP_OP_NAME) 48 | return var 49 | 50 | def get_global_step(): 51 | """ :returns: global_step value in current graph and session""" 52 | return tf.train.global_step( 53 | tf.get_default_session(), 54 | get_global_step_var()) 55 | 56 | def get_op_var_name(name): 57 | """ 58 | Variable name is assumed to be ``op_name + ':0'`` 59 | 60 | :param name: an op or a variable name 61 | :returns: (op_name, variable_name) 62 | """ 63 | if name.endswith(':0'): 64 | return name[:-2], name 65 | else: 66 | return name, name + ':0' 67 | 68 | def get_vars_by_names(names): 69 | """ 70 | Get a list of variables in the default graph by a list of names 71 | """ 72 | ret = [] 73 | G = tf.get_default_graph() 74 | for n in names: 75 | opn, varn = get_op_var_name(n) 76 | ret.append(G.get_tensor_by_name(varn)) 77 | return ret 78 | 79 | def backup_collection(keys): 80 | ret = {} 81 | for k in keys: 82 | ret[k] = copy(tf.get_collection(k)) 83 | return ret 84 | 85 | def restore_collection(backup): 86 | for k, v in six.iteritems(backup): 87 | del tf.get_collection_ref(k)[:] 88 | tf.get_collection_ref(k).extend(v) 89 | 90 | def clear_collection(keys): 91 | for k in keys: 92 | del tf.get_collection_ref(k)[:] 93 | 94 | @contextmanager 95 | def freeze_collection(keys): 96 | backup = backup_collection(keys) 97 | yield 98 | restore_collection(backup) 99 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/gradproc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: UTF-8 -*- 3 | # File: gradproc.py 4 | # Author: Yuxin Wu 5 | 6 | import tensorflow as tf 7 | from abc import ABCMeta, abstractmethod 8 | import re 9 | import inspect 10 | from ..utils import logger 11 | from .symbolic_functions import rms 12 | from .summary import add_moving_summary 13 | 14 | __all__ = ['GradientProcessor', 'SummaryGradient', 'CheckGradient', 15 | 'ScaleGradient', 'MapGradient'] 16 | 17 | class GradientProcessor(object): 18 | __metaclass__ = ABCMeta 19 | 20 | def process(self, grads): 21 | """ 22 | Process the symbolic gradients. 23 | 24 | :param grads: list of (grad, var) 25 | :returns: symbolic gradients with the same type as input 26 | """ 27 | with tf.name_scope(type(self).__name__): 28 | return self._process(grads) 29 | 30 | @abstractmethod 31 | def _process(self, grads): 32 | pass 33 | 34 | class MapGradient(GradientProcessor): 35 | """ 36 | Apply a function on all gradient if the name matches regex. 37 | Keep the other gradients unchanged. 38 | """ 39 | def __init__(self, func, regex='.*'): 40 | """ 41 | :param func: takes a grad or (grad, var) pair and returns a grad. If return None, the 42 | gradient is discarded. 43 | :param regex: used to match variables. default to match all variables. 44 | """ 45 | args = inspect.getargspec(func).args 46 | arg_num = len(args) - inspect.ismethod(func) 47 | assert arg_num in [1, 2], \ 48 | "The function must take 1 or 2 arguments! ({})".format(args) 49 | if arg_num == 1: 50 | self.func = lambda grad, var: func(grad) 51 | else: 52 | self.func = func 53 | 54 | if not regex.endswith('$'): 55 | regex = regex + '$' 56 | self.regex = regex 57 | 58 | def _process(self, grads): 59 | ret = [] 60 | for grad, var in grads: 61 | if re.match(self.regex, var.op.name): 62 | grad = self.func(grad, var) 63 | if grad is not None: 64 | ret.append((grad, var)) 65 | else: 66 | ret.append((grad, var)) 67 | return ret 68 | 69 | _summaried_gradient = set() 70 | 71 | class SummaryGradient(MapGradient): 72 | """ 73 | Summary history and RMS for each graident variable 74 | """ 75 | def __init__(self): 76 | super(SummaryGradient, self).__init__(self._mapper) 77 | 78 | def _mapper(self, grad, var): 79 | name = var.op.name 80 | if name not in _summaried_gradient: 81 | _summaried_gradient.add(name) 82 | tf.histogram_summary(name + '/grad', grad) 83 | add_moving_summary(rms(grad, name=name + '/rms')) 84 | return grad 85 | 86 | class CheckGradient(MapGradient): 87 | """ 88 | Check for numeric issue. 89 | """ 90 | def __init__(self): 91 | super(CheckGradient, self).__init__(self._mapper) 92 | 93 | def _mapper(self, grad, var): 94 | # this is very slow... 95 | #op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100) 96 | grad = tf.check_numerics(grad, 'CheckGradient') 97 | return grad 98 | 99 | class ScaleGradient(MapGradient): 100 | """ 101 | Scale gradient by a multiplier 102 | """ 103 | def __init__(self, multipliers): 104 | """ 105 | :param multipliers: list of (regex, float) 106 | """ 107 | self.multipliers = multipliers 108 | super(ScaleGradient, self).__init__(self._mapper) 109 | 110 | def _mapper(self, grad, var): 111 | varname = var.op.name 112 | for regex, val in self.multipliers: 113 | # always match against the whole name 114 | if not regex.endswith('$'): 115 | regex = regex + '$' 116 | 117 | if re.match(regex, varname): 118 | logger.info("Apply lr multiplier {} for {}".format(val, varname)) 119 | if val != 0: # skip zero to speed up 120 | return grad * val 121 | else: 122 | return None 123 | return grad 124 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/modelutils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: modelutils.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | 7 | from ..utils import logger 8 | 9 | def describe_model(): 10 | """ print a description of the current model parameters """ 11 | train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) 12 | msg = [""] 13 | total = 0 14 | for v in train_vars: 15 | shape = v.get_shape() 16 | ele = shape.num_elements() 17 | total += ele 18 | msg.append("{}: shape={}, dim={}".format( 19 | v.name, shape.as_list(), ele)) 20 | size_mb = total * 4 / 1024.0**2 21 | msg.append("Total param={} ({:01f} MB assuming all float32)".format(total, size_mb)) 22 | logger.info("Model Parameters: {}".format('\n'.join(msg))) 23 | 24 | 25 | def get_shape_str(tensors): 26 | """ 27 | :param tensors: a tensor or a list of tensors 28 | :returns: a string to describe the shape 29 | """ 30 | if isinstance(tensors, (list, tuple)): 31 | for v in tensors: 32 | assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v)) 33 | shape_str = ",".join( 34 | map(lambda x: str(x.get_shape().as_list()), tensors)) 35 | else: 36 | assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors)) 37 | shape_str = str(tensors.get_shape().as_list()) 38 | return shape_str 39 | 40 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/summary.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: summary.py 3 | # Author: Yuxin Wu 4 | 5 | import six 6 | import tensorflow as tf 7 | import re 8 | 9 | from ..utils import * 10 | from . import get_global_step_var 11 | from .symbolic_functions import rms 12 | 13 | __all__ = ['create_summary', 'add_param_summary', 'add_activation_summary', 14 | 'add_moving_summary', 'summary_moving_average'] 15 | 16 | def create_summary(name, v): 17 | """ 18 | Return a tf.Summary object with name and simple scalar value v 19 | """ 20 | assert isinstance(name, six.string_types), type(name) 21 | v = float(v) 22 | s = tf.Summary() 23 | s.value.add(tag=name, simple_value=v) 24 | return s 25 | 26 | def add_activation_summary(x, name=None): 27 | """ 28 | Add summary to graph for an activation tensor x. 29 | If name is None, use x.name. 30 | """ 31 | ndim = x.get_shape().ndims 32 | assert ndim >= 2, \ 33 | "Summary a scalar with histogram? Maybe use scalar instead. FIXME!" 34 | if name is None: 35 | name = x.name 36 | with tf.name_scope('act_summary'): 37 | tf.histogram_summary(name + '/activation', x) 38 | tf.scalar_summary(name + '/activation_sparsity', tf.nn.zero_fraction(x)) 39 | tf.scalar_summary( 40 | name + '/activation_rms', rms(x)) 41 | 42 | def add_param_summary(summary_lists): 43 | """ 44 | Add summary for all trainable variables matching the regex 45 | 46 | :param summary_lists: list of (regex, [list of summary type to perform]). 47 | Type can be 'mean', 'scalar', 'histogram', 'sparsity', 'rms' 48 | """ 49 | def perform(var, action): 50 | ndim = var.get_shape().ndims 51 | name = var.name.replace(':0', '') 52 | if action == 'scalar': 53 | assert ndim == 0, "Scalar summary on high-dimension data. Maybe you want 'mean'?" 54 | tf.scalar_summary(name, var) 55 | return 56 | assert ndim > 0, "Cannot perform {} summary on scalar data".format(action) 57 | if action == 'histogram': 58 | tf.histogram_summary(name, var) 59 | return 60 | if action == 'sparsity': 61 | tf.scalar_summary(name + '/sparsity', tf.nn.zero_fraction(var)) 62 | return 63 | if action == 'mean': 64 | tf.scalar_summary(name + '/mean', tf.reduce_mean(var)) 65 | return 66 | if action == 'rms': 67 | tf.scalar_summary(name + '/rms', rms(var)) 68 | return 69 | raise RuntimeError("Unknown summary type: {}".format(action)) 70 | 71 | params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) 72 | with tf.name_scope('param_summary'): 73 | for p in params: 74 | name = p.name 75 | for rgx, actions in summary_lists: 76 | if not rgx.endswith('$'): 77 | rgx = rgx + '(:0)?$' 78 | if re.match(rgx, name): 79 | for act in actions: 80 | perform(p, act) 81 | 82 | def add_moving_summary(v, *args): 83 | """ 84 | :param v: tensor or list of tensor to summary 85 | :param args: tensors to summary 86 | """ 87 | if not isinstance(v, list): 88 | v = [v] 89 | v.extend(args) 90 | for x in v: 91 | tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, x) 92 | 93 | def summary_moving_average(): 94 | """ Create a MovingAverage op and summary for all variables in 95 | MOVING_SUMMARY_VARS_KEY. 96 | :returns: a op to maintain these average. 97 | """ 98 | with tf.name_scope('EMA_summary'): 99 | global_step_var = get_global_step_var() 100 | with tf.name_scope(None): 101 | averager = tf.train.ExponentialMovingAverage( 102 | 0.99, num_updates=global_step_var, name='EMA') 103 | vars_to_summary = tf.get_collection(MOVING_SUMMARY_VARS_KEY) 104 | avg_maintain_op = averager.apply(vars_to_summary) 105 | for idx, c in enumerate(vars_to_summary): 106 | # TODO assert scalar 107 | name = re.sub('tower[p0-9]+/', '', c.op.name) 108 | tf.scalar_summary(name, averager.average(c)) 109 | return avg_maintain_op 110 | 111 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/symbolic_functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: symbolic_functions.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | import numpy as np 7 | from ..utils import logger 8 | 9 | def prediction_incorrect(logits, label, topk=1): 10 | """ 11 | :param logits: NxC 12 | :param label: N 13 | :returns: a float32 vector of length N with 0/1 values, 1 meaning incorrect prediction 14 | """ 15 | return tf.cast(tf.logical_not(tf.nn.in_top_k(logits, label, topk)), tf.float32) 16 | 17 | def flatten(x): 18 | """ 19 | Flatten the tensor. 20 | """ 21 | return tf.reshape(x, [-1]) 22 | 23 | def batch_flatten(x): 24 | """ 25 | Flatten the tensor except the first dimension. 26 | """ 27 | shape = x.get_shape().as_list()[1:] 28 | if None not in shape: 29 | return tf.reshape(x, [-1, np.prod(shape)]) 30 | return tf.reshape(x, tf.pack([tf.shape(x)[0], -1])) 31 | 32 | def logSoftmax(x): 33 | """ 34 | Batch log softmax. 35 | :param x: NxC tensor. 36 | :returns: NxC tensor. 37 | """ 38 | logger.warn("symbf.logSoftmax is deprecated in favor of tf.nn.log_softmax") 39 | return tf.nn.log_softmax(x) 40 | 41 | def class_balanced_binary_class_cross_entropy(pred, label, name='cross_entropy_loss'): 42 | """ 43 | The class-balanced cross entropy loss for binary classification, 44 | as in `Holistically-Nested Edge Detection 45 | `_. 46 | 47 | :param pred: size: b x ANYTHING. the predictions in [0,1]. 48 | :param label: size: b x ANYTHING. the ground truth in {0,1}. 49 | :returns: class-balanced binary classification cross entropy loss 50 | """ 51 | z = batch_flatten(pred) 52 | y = tf.cast(batch_flatten(label), tf.float32) 53 | 54 | count_neg = tf.reduce_sum(1. - y) 55 | count_pos = tf.reduce_sum(y) 56 | beta = count_neg / (count_neg + count_pos) 57 | 58 | eps = 1e-8 59 | loss_pos = -beta * tf.reduce_mean(y * tf.log(tf.abs(z) + eps), 1) 60 | loss_neg = (1. - beta) * tf.reduce_mean((1. - y) * tf.log(tf.abs(1. - z) + eps), 1) 61 | cost = tf.sub(loss_pos, loss_neg) 62 | cost = tf.reduce_mean(cost, name=name) 63 | return cost 64 | 65 | def print_stat(x, message=None): 66 | """ a simple print op. 67 | Use it like: x = print_stat(x) 68 | """ 69 | if message is None: 70 | message = x.op.name 71 | return tf.Print(x, [tf.reduce_mean(x), x], summarize=20, 72 | message=message, name='print_' + x.op.name) 73 | 74 | def rms(x, name=None): 75 | if name is None: 76 | name = x.op.name + '/rms' 77 | with tf.name_scope(None): # name already contains the scope 78 | return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name) 79 | return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name) 80 | 81 | def huber_loss(x, delta=1, name=None): 82 | if name is None: 83 | name = 'huber_loss' 84 | sqrcost = tf.square(x) 85 | abscost = tf.abs(x) 86 | return tf.reduce_sum( 87 | tf.select(abscost < delta, 88 | sqrcost * 0.5, 89 | abscost * delta - 0.5 * delta ** 2), 90 | name=name) 91 | 92 | def get_scalar_var(name, init_value): 93 | return tf.get_variable(name, shape=[], 94 | initializer=tf.constant_initializer(init_value), 95 | trainable=False) 96 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/tfutils/varmanip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: varmanip.py 4 | # Author: Yuxin Wu 5 | 6 | import six 7 | import tensorflow as tf 8 | from collections import defaultdict 9 | import re 10 | import numpy as np 11 | from ..utils import logger 12 | from ..utils.naming import * 13 | 14 | __all__ = ['SessionUpdate', 'dump_session_params', 'dump_chkpt_vars', 15 | 'get_savename_from_varname'] 16 | 17 | def get_savename_from_varname( 18 | varname, varname_prefix=None, 19 | savename_prefix=None): 20 | """ 21 | :param varname: a variable name in the graph 22 | :param varname_prefix: an optional prefix that may need to be removed in varname 23 | :param savename_prefix: an optional prefix to append to all savename 24 | :returns: the name used to save the variable 25 | """ 26 | name = varname 27 | if 'towerp' in name: 28 | logger.error("No variable should be under 'towerp' name scope".format(v.name)) 29 | # don't overwrite anything in the current prediction graph 30 | return None 31 | if 'tower' in name: 32 | name = re.sub('tower[p0-9]+/', '', name) 33 | if varname_prefix is not None \ 34 | and name.startswith(varname_prefix): 35 | name = name[len(varname_prefix)+1:] 36 | if savename_prefix is not None: 37 | name = savename_prefix + '/' + name 38 | return name 39 | 40 | class SessionUpdate(object): 41 | """ Update the variables in a session """ 42 | def __init__(self, sess, vars_to_update): 43 | """ 44 | :param vars_to_update: a collection of variables to update 45 | """ 46 | self.sess = sess 47 | self.assign_ops = defaultdict(list) 48 | for v in vars_to_update: 49 | #p = tf.placeholder(v.dtype, shape=v.get_shape()) 50 | with tf.device('/cpu:0'): 51 | p = tf.placeholder(v.dtype) 52 | savename = get_savename_from_varname(v.name) 53 | # multiple vars might share one savename 54 | self.assign_ops[savename].append((p, v, v.assign(p))) 55 | 56 | def update(self, prms): 57 | """ 58 | :param prms: dict of {variable name: value} 59 | Any name in prms must be in the graph and in vars_to_update. 60 | """ 61 | for name, value in six.iteritems(prms): 62 | assert name in self.assign_ops 63 | for p, v, op in self.assign_ops[name]: 64 | varshape = tuple(v.get_shape().as_list()) 65 | if varshape != value.shape: 66 | # TODO only allow reshape when shape different by empty axis 67 | assert np.prod(varshape) == np.prod(value.shape), \ 68 | "{}: {}!={}".format(name, varshape, value.shape) 69 | logger.warn("Param {} is reshaped during assigning".format(name)) 70 | value = value.reshape(varshape) 71 | self.sess.run(op, feed_dict={p: value}) 72 | 73 | def dump_session_params(path): 74 | """ Dump value of all trainable + to_save variables to a dict and save to `path` as 75 | npy format, loadable by ParamRestore 76 | """ 77 | var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) 78 | var.extend(tf.get_collection(EXTRA_SAVE_VARS_KEY)) 79 | result = {} 80 | for v in var: 81 | name = get_savename_from_varname(v.name) 82 | if name in result: 83 | logger.info("Variable {} would be stored instead of another with \ 84 | the same name".format(v.name)) 85 | result[name] = v.eval() 86 | logger.info("Variables to save to {}:".format(path)) 87 | logger.info(str(result.keys())) 88 | np.save(path, result) 89 | 90 | def dump_chkpt_vars(model_path, output): 91 | """ Dump all variables from a checkpoint """ 92 | reader = tf.train.NewCheckpointReader(model_path) 93 | var_names = reader.get_variable_to_shape_map().keys() 94 | result = {} 95 | for n in var_names: 96 | result[n] = reader.get_tensor(n) 97 | logger.info("Variables to save to {}:".format(output)) 98 | logger.info(str(result.keys())) 99 | np.save(output, result) 100 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/train/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | import os.path 8 | 9 | def global_import(name): 10 | p = __import__(name, globals(), locals(), level=1) 11 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 12 | for k in lst: 13 | globals()[k] = p.__dict__[k] 14 | del globals()[name] 15 | 16 | for _, module_name, _ in walk_packages( 17 | [os.path.dirname(__file__)]): 18 | if not module_name.startswith('_'): 19 | global_import(module_name) 20 | 21 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/train/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # File: config.py 3 | # Author: Yuxin Wu 4 | 5 | import tensorflow as tf 6 | 7 | from ..callbacks import Callbacks 8 | from ..models import ModelDesc 9 | from ..utils import * 10 | from ..tfutils import * 11 | from ..dataflow import DataFlow 12 | 13 | __all__ = ['TrainConfig'] 14 | 15 | class TrainConfig(object): 16 | """ 17 | Config for training a model with a single loss 18 | """ 19 | def __init__(self, **kwargs): 20 | """ 21 | :param dataset: the dataset to train. a `DataFlow` instance. 22 | :param optimizer: a `tf.train.Optimizer` instance defining the optimizer for trainig. 23 | :param callbacks: a `callback.Callbacks` instance. Define 24 | the callbacks to perform during training. 25 | :param session_config: a `tf.ConfigProto` instance to instantiate the 26 | session. default to a session running 1 GPU. 27 | :param session_init: a `sessinit.SessionInit` instance to 28 | initialize variables of a session. default to a new session. 29 | :param model: a `ModelDesc` instance. 30 | :param starting_epoch: int. default to be 1. 31 | :param step_per_epoch: the number of steps (SGD updates) to perform in each epoch. 32 | :param max_epoch: maximum number of epoch to run training. default to inf 33 | :param nr_tower: int. number of training towers. default to 1. 34 | :param tower: list of training towers in relative id. default to `range(nr_tower)` if nr_tower is given. 35 | :param extra_threads_procs: list of `Startable` threads or processes 36 | """ 37 | def assert_type(v, tp): 38 | assert isinstance(v, tp), v.__class__ 39 | self.dataset = kwargs.pop('dataset') 40 | assert_type(self.dataset, DataFlow) 41 | self.optimizer = kwargs.pop('optimizer') 42 | assert_type(self.optimizer, tf.train.Optimizer) 43 | self.callbacks = kwargs.pop('callbacks') 44 | assert_type(self.callbacks, Callbacks) 45 | self.model = kwargs.pop('model') 46 | assert_type(self.model, ModelDesc) 47 | 48 | self.session_config = kwargs.pop('session_config', get_default_sess_config()) 49 | assert_type(self.session_config, tf.ConfigProto) 50 | self.session_init = kwargs.pop('session_init', JustCurrentSession()) 51 | assert_type(self.session_init, SessionInit) 52 | self.step_per_epoch = int(kwargs.pop('step_per_epoch')) 53 | self.starting_epoch = int(kwargs.pop('starting_epoch', 1)) 54 | self.max_epoch = int(kwargs.pop('max_epoch', 99999)) 55 | assert self.step_per_epoch > 0 and self.max_epoch > 0 56 | 57 | if 'nr_tower' in kwargs or 'tower' in kwargs: 58 | self.set_tower(**kwargs) 59 | else: 60 | self.tower = [0] 61 | 62 | self.extra_threads_procs = kwargs.pop('extra_threads_procs', []) 63 | assert len(kwargs) == 0, 'Unknown arguments: {}'.format(str(kwargs.keys())) 64 | 65 | def set_tower(self, nr_tower=None, tower=None): 66 | assert nr_tower is None or tower is None, "Cannot set both nr_tower and tower!" 67 | if nr_tower: 68 | tower = list(range(nr_tower)) 69 | else: 70 | if isinstance(tower, int): 71 | tower = list(range(tower)) 72 | self.tower = tower 73 | assert isinstance(self.tower, list) 74 | 75 | @property 76 | def nr_tower(self): 77 | return len(self.tower) 78 | 79 | @nr_tower.setter 80 | def nr_tower(self, value): 81 | self.tower = list(range(value)) 82 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: __init__.py 3 | # Author: Yuxin Wu 4 | 5 | from pkgutil import walk_packages 6 | import os 7 | 8 | """ 9 | Common utils. 10 | These utils should be irrelevant to tensorflow. 11 | """ 12 | 13 | def _global_import(name): 14 | p = __import__(name, globals(), None, level=1) 15 | lst = p.__all__ if '__all__' in dir(p) else dir(p) 16 | del globals()[name] 17 | for k in lst: 18 | globals()[k] = p.__dict__[k] 19 | _global_import('naming') 20 | _global_import('utils') 21 | _global_import('gpu') 22 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/discretize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: discretize.py 4 | # Author: Yuxin Wu 5 | 6 | from . import logger, memoized 7 | from abc import abstractmethod, ABCMeta 8 | import numpy as np 9 | from six.moves import range 10 | 11 | __all__ = ['UniformDiscretizer1D', 'UniformDiscretizerND'] 12 | 13 | @memoized 14 | def log_once(s): 15 | logger.warn(s) 16 | 17 | # just a placeholder 18 | class Discretizer(object): 19 | __metaclass__ = ABCMeta 20 | 21 | @abstractmethod 22 | def get_nr_bin(self): 23 | pass 24 | 25 | @abstractmethod 26 | def get_bin(self, v): 27 | pass 28 | 29 | class Discretizer1D(Discretizer): 30 | pass 31 | 32 | class UniformDiscretizer1D(Discretizer1D): 33 | def __init__(self, minv, maxv, spacing): 34 | """ 35 | :params minv: minimum value of the first bin 36 | :params maxv: maximum value of the last bin 37 | :param spacing: width of a bin 38 | """ 39 | self.minv = float(minv) 40 | self.maxv = float(maxv) 41 | self.spacing = float(spacing) 42 | self.nr_bin = int(np.ceil((self.maxv - self.minv) / self.spacing)) 43 | 44 | def get_nr_bin(self): 45 | return self.nr_bin 46 | 47 | def get_bin(self, v): 48 | if v < self.minv: 49 | log_once("UniformDiscretizer1D: value smaller than min!") 50 | return 0 51 | if v > self.maxv: 52 | log_once("UniformDiscretizer1D: value larger than max!") 53 | return self.nr_bin - 1 54 | return int(np.clip( 55 | (v - self.minv) / self.spacing, 56 | 0, self.nr_bin - 1)) 57 | 58 | def get_bin_center(self, bin_id): 59 | return self.minv + self.spacing * (bin_id + 0.5) 60 | 61 | def get_distribution(self, v, smooth_factor=0.05, smooth_radius=2): 62 | """ return a smoothed one-hot distribution of the sample v. 63 | """ 64 | b = self.get_bin(v) 65 | ret = np.zeros((self.nr_bin, ), dtype='float32') 66 | ret[b] = 1.0 67 | if v >= self.maxv or v <= self.minv: 68 | return ret 69 | try: 70 | for k in range(1, smooth_radius+1): 71 | ret[b+k] = smooth_factor ** k 72 | except IndexError: 73 | pass 74 | for k in range(1, min(smooth_radius+1, b+1)): 75 | ret[b-k] = smooth_factor ** k 76 | ret /= ret.sum() 77 | return ret 78 | 79 | 80 | class UniformDiscretizerND(Discretizer): 81 | def __init__(self, *min_max_spacing): 82 | """ 83 | :params min_max_spacing: (minv, maxv, spacing) for each dimension 84 | """ 85 | self.n = len(min_max_spacing) 86 | self.discretizers = [UniformDiscretizer1D(*k) for k in min_max_spacing] 87 | self.nr_bins = [k.get_nr_bin() for k in self.discretizers] 88 | 89 | def get_nr_bin(self): 90 | return np.prod(self.nr_bins) 91 | 92 | def get_bin(self, v): 93 | assert len(v) == self.n 94 | bin_id = [self.discretizers[k].get_bin(v[k]) for k in range(self.n)] 95 | return self.get_bin_from_nd_bin_ids(bin_id) 96 | 97 | def get_nd_bin_ids(self, bin_id): 98 | ret = [] 99 | for k in reversed(list(range(self.n))): 100 | nr = self.nr_bins[k] 101 | v = bin_id % nr 102 | bin_id = bin_id / nr 103 | ret.append(v) 104 | return list(reversed(ret)) 105 | 106 | def get_bin_from_nd_bin_ids(self, bin_ids): 107 | acc, res = 1, 0 108 | for k in reversed(list(range(self.n))): 109 | res += bin_ids[k] * acc 110 | acc *= self.nr_bins[k] 111 | return res 112 | 113 | def get_nr_bin_nd(self): 114 | return self.nr_bins 115 | 116 | def get_bin_center(self, bin_id): 117 | bin_id_nd = self.get_nd_bin_ids(bin_id) 118 | return [self.discretizers[k].get_bin_center(bin_id_nd[k]) for k in range(self.n)] 119 | 120 | if __name__ == '__main__': 121 | #u = UniformDiscretizer1D(-10, 10, 0.12) 122 | u = UniformDiscretizerND((0, 100, 1), (0, 100, 1), (0, 100, 1)) 123 | import IPython as IP; 124 | IP.embed(config=IP.terminal.ipapp.load_default_config()) 125 | 126 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/fs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: fs.py 4 | # Author: Yuxin Wu 5 | 6 | import os, sys 7 | from six.moves import urllib 8 | from . import logger 9 | 10 | __all__ = ['mkdir_p', 'download'] 11 | 12 | def mkdir_p(dirname): 13 | """ make a dir recursively, but do nothing if the dir exists""" 14 | assert dirname is not None 15 | if dirname == '' or os.path.isdir(dirname): 16 | return 17 | try: 18 | os.makedirs(dirname) 19 | except OSError as e: 20 | if e.errno != 17: 21 | raise e 22 | 23 | 24 | def download(url, dir): 25 | mkdir_p(dir) 26 | fname = url.split('/')[-1] 27 | fpath = os.path.join(dir, fname) 28 | 29 | def _progress(count, block_size, total_size): 30 | sys.stdout.write('\r>> Downloading %s %.1f%%' % 31 | (fname, 32 | min(float(count * block_size)/ total_size, 33 | 1.0) * 100.0)) 34 | sys.stdout.flush() 35 | try: 36 | fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=_progress) 37 | statinfo = os.stat(fpath) 38 | size = statinfo.st_size 39 | except: 40 | logger.error("Failed to download {}".format(url)) 41 | raise 42 | assert size > 0, "Download an empty file!" 43 | sys.stdout.write('\n') 44 | print('Succesfully downloaded ' + fname + " " + str(size) + ' bytes.') 45 | return fpath 46 | 47 | if __name__ == '__main__': 48 | download('http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz', '.') 49 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/gpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: gpu.py 4 | # Author: Yuxin Wu 5 | 6 | import os 7 | from .utils import change_env 8 | 9 | __all__ = ['change_gpu', 'get_nr_gpu', 'get_gpus'] 10 | 11 | def change_gpu(val): 12 | return change_env('CUDA_VISIBLE_DEVICES', str(val)) 13 | 14 | def get_nr_gpu(): 15 | env = os.environ.get('CUDA_VISIBLE_DEVICES', None) 16 | assert env is not None # TODO 17 | return len(env.split(',')) 18 | 19 | def get_gpus(): 20 | """ return a list of GPU physical id""" 21 | env = os.environ.get('CUDA_VISIBLE_DEVICES', None) 22 | assert env is not None # TODO 23 | return map(int, env.strip().split(',')) 24 | 25 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/loadcaffe.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: loadcaffe.py 4 | # Author: Yuxin Wu 5 | 6 | from collections import namedtuple, defaultdict 7 | from abc import abstractmethod 8 | import numpy as np 9 | import copy 10 | import os 11 | 12 | from six.moves import zip 13 | 14 | from .utils import change_env, get_dataset_path 15 | from .fs import download 16 | from . import logger 17 | 18 | __all__ = ['load_caffe', 'get_caffe_pb'] 19 | 20 | CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto" 21 | 22 | def get_processor(): 23 | ret = {} 24 | def process_conv(layer_name, param, input_data_shape): 25 | assert len(param) == 2 26 | # caffe: ch_out, ch_in, h, w 27 | return {layer_name + '/W': param[0].data.transpose(2,3,1,0), 28 | layer_name + '/b': param[1].data} 29 | ret['Convolution'] = process_conv 30 | 31 | # TODO caffe has an 'transpose' option for fc/W 32 | def process_fc(layer_name, param, input_data_shape): 33 | assert len(param) == 2 34 | if len(input_data_shape) == 3: 35 | logger.info("{} is right after spatial data.".format(layer_name)) 36 | W = param[0].data 37 | # original: outx(CxHxW) 38 | W = W.reshape((-1,) + input_data_shape).transpose(2,3,1,0) 39 | # become: (HxWxC)xout 40 | else: 41 | W = param[0].data.transpose() 42 | return {layer_name + '/W': W, 43 | layer_name + '/b': param[1].data} 44 | ret['InnerProduct'] = process_fc 45 | 46 | return ret 47 | 48 | def load_caffe(model_desc, model_file): 49 | """ 50 | return a dict of params 51 | """ 52 | param_dict = {} 53 | param_processors = get_processor() 54 | 55 | with change_env('GLOG_minloglevel', '2'): 56 | import caffe 57 | caffe.set_mode_cpu() 58 | net = caffe.Net(model_desc, model_file, caffe.TEST) 59 | layer_names = net._layer_names 60 | blob_names = net.blobs.keys() 61 | for layername, layer in zip(layer_names, net.layers): 62 | try: 63 | prev_blob_name = blob_names[blob_names.index(layername)-1] 64 | prev_data_shape = net.blobs[prev_blob_name].data.shape[1:] 65 | except ValueError: 66 | prev_data_shape = None 67 | logger.info("Processing layer {} of type {}".format( 68 | layername, layer.type)) 69 | if layer.type in param_processors: 70 | param_dict.update(param_processors[layer.type]( 71 | layername, layer.blobs, prev_data_shape)) 72 | else: 73 | if len(layer.blobs) != 0: 74 | logger.warn("Layer type {} not supported!".format(layer.type)) 75 | logger.info("Model loaded from caffe. Params: " + \ 76 | " ".join(sorted(param_dict.keys()))) 77 | return param_dict 78 | 79 | def get_caffe_pb(): 80 | dir = get_dataset_path('caffe') 81 | caffe_pb_file = os.path.join(dir, 'caffe_pb2.py') 82 | if not os.path.isfile(caffe_pb_file): 83 | proto_path = download(CAFFE_PROTO_URL, dir) 84 | ret = os.system('cd {} && protoc caffe.proto --python_out .'.format(dir)) 85 | assert ret == 0, \ 86 | "caffe proto compilation failed! Did you install protoc?" 87 | import imp 88 | return imp.load_source('caffepb', caffe_pb_file) 89 | 90 | if __name__ == '__main__': 91 | import argparse 92 | parser = argparse.ArgumentParser() 93 | parser.add_argument('model') 94 | parser.add_argument('weights') 95 | parser.add_argument('output') 96 | args = parser.parse_args() 97 | ret = load_caffe(args.model, args.weights) 98 | 99 | import numpy as np 100 | np.save(args.output, ret) 101 | 102 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/logger.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: logger.py 3 | # Author: Yuxin Wu 4 | 5 | import logging 6 | import os, shutil 7 | import os.path 8 | from termcolor import colored 9 | from datetime import datetime 10 | from six.moves import input 11 | import sys 12 | 13 | from .utils import memoized 14 | 15 | __all__ = [] 16 | 17 | class _MyFormatter(logging.Formatter): 18 | def format(self, record): 19 | date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green') 20 | msg = '%(message)s' 21 | if record.levelno == logging.WARNING: 22 | fmt = date + ' ' + colored('WRN', 'red', attrs=['blink']) + ' ' + msg 23 | elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL: 24 | fmt = date + ' ' + colored('ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg 25 | else: 26 | fmt = date + ' ' + msg 27 | if hasattr(self, '_style'): 28 | # Python3 compatibilty 29 | self._style._fmt = fmt 30 | self._fmt = fmt 31 | return super(_MyFormatter, self).format(record) 32 | 33 | def _getlogger(): 34 | logger = logging.getLogger('tensorpack') 35 | logger.propagate = False 36 | logger.setLevel(logging.INFO) 37 | handler = logging.StreamHandler(sys.stdout) 38 | handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) 39 | logger.addHandler(handler) 40 | return logger 41 | _logger = _getlogger() 42 | 43 | 44 | def get_time_str(): 45 | return datetime.now().strftime('%m%d-%H%M%S') 46 | # logger file and directory: 47 | global LOG_FILE, LOG_DIR 48 | LOG_DIR = None 49 | def _set_file(path): 50 | if os.path.isfile(path): 51 | backup_name = path + '.' + get_time_str() 52 | shutil.move(path, backup_name) 53 | info("Log file '{}' backuped to '{}'".format(path, backup_name)) 54 | hdl = logging.FileHandler( 55 | filename=path, encoding='utf-8', mode='w') 56 | hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S')) 57 | _logger.addHandler(hdl) 58 | 59 | def set_logger_dir(dirname, action=None): 60 | """ 61 | Set the directory for global logging. 62 | :param dirname: log directory 63 | :param action: an action (k/b/d/n) to be performed. Will ask user by default. 64 | """ 65 | global LOG_FILE, LOG_DIR 66 | if os.path.isdir(dirname): 67 | if not action: 68 | _logger.warn("""\ 69 | Directory {} exists! Please either backup/delete it, or use a new directory.""".format(dirname)) 70 | _logger.warn("""\ 71 | If you're resuming from a previous run you can choose to keep it.""") 72 | _logger.info("Select Action: k (keep) / b (backup) / d (delete) / n (new):") 73 | while not action: 74 | action = input().lower().strip() 75 | act = action 76 | if act == 'b': 77 | backup_name = dirname + get_time_str() 78 | shutil.move(dirname, backup_name) 79 | info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) 80 | elif act == 'd': 81 | shutil.rmtree(dirname) 82 | elif act == 'n': 83 | dirname = dirname + get_time_str() 84 | info("Use a new log directory {}".format(dirname)) 85 | elif act == 'k': 86 | pass 87 | else: 88 | raise ValueError("Unknown action: {}".format(act)) 89 | LOG_DIR = dirname 90 | from .fs import mkdir_p 91 | mkdir_p(dirname) 92 | LOG_FILE = os.path.join(dirname, 'log.log') 93 | _set_file(LOG_FILE) 94 | 95 | # export logger functions 96 | for func in ['info', 'warning', 'error', 'critical', 'warn', 'exception', 'debug']: 97 | locals()[func] = getattr(_logger, func) 98 | 99 | def disable_logger(): 100 | """ disable all logging ability from this moment""" 101 | for func in ['info', 'warning', 'error', 'critical', 'warn', 'exception', 'debug']: 102 | globals()[func] = lambda x: None 103 | 104 | def auto_set_dir(action=None, overwrite_setting=False): 105 | """ set log directory to a subdir inside 'train_log', with the name being 106 | the main python file currently running""" 107 | if LOG_DIR is not None and not overwrite_setting: 108 | return 109 | mod = sys.modules['__main__'] 110 | basename = os.path.basename(mod.__file__) 111 | set_logger_dir( 112 | os.path.join('train_log', 113 | basename[:basename.rfind('.')]), 114 | action=action) 115 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/lut.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: lut.py 4 | # Author: Yuxin Wu 5 | 6 | import six 7 | 8 | __all__ = ['LookUpTable'] 9 | 10 | class LookUpTable(object): 11 | def __init__(self, objlist): 12 | self.idx2obj = dict(enumerate(objlist)) 13 | self.obj2idx = {v : k for k, v in six.iteritems(self.idx2obj)} 14 | 15 | def size(self): 16 | return len(self.idx2obj) 17 | 18 | def get_obj(self, idx): 19 | return self.idx2obj[idx] 20 | 21 | def get_idx(self, obj): 22 | return self.obj2idx[obj] 23 | 24 | def __str__(self): 25 | return self.idx2obj.__str__() 26 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/naming.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: naming.py 3 | # Author: Yuxin Wu 4 | 5 | GLOBAL_STEP_OP_NAME = 'global_step' 6 | GLOBAL_STEP_VAR_NAME = 'global_step:0' 7 | 8 | # extra variables to summarize during training in a moving-average way 9 | MOVING_SUMMARY_VARS_KEY = 'MOVING_SUMMARY_VARIABLES' 10 | 11 | # placeholders for input variables 12 | INPUT_VARS_KEY = 'INPUT_VARIABLES' 13 | 14 | # variables that need to be saved for inference, apart from trainable variables 15 | EXTRA_SAVE_VARS_KEY = 'EXTRA_SAVE_VARIABLES' 16 | 17 | import tensorflow as tf 18 | SUMMARY_BACKUP_KEYS = [tf.GraphKeys.SUMMARIES, MOVING_SUMMARY_VARS_KEY] 19 | 20 | # export all upper case variables 21 | all_local_names = locals().keys() 22 | __all__ = [x for x in all_local_names if x.isupper()] 23 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/rect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: rect.py 4 | # Author: Yuxin Wu 5 | 6 | import numpy as np 7 | 8 | class Rect(object): 9 | """ 10 | A Rectangle. 11 | Note that x1 = x+w, not x+w-1 or something 12 | """ 13 | __slots__ = ['x', 'y', 'w', 'h'] 14 | 15 | def __init__(self, x=0, y=0, w=0, h=0, allow_neg=False): 16 | self.x = x 17 | self.y = y 18 | self.w = w 19 | self.h = h 20 | if not allow_neg: 21 | assert min(self.x, self.y, self.w, self.h) >= 0 22 | 23 | @property 24 | def x0(self): 25 | return self.x 26 | 27 | @property 28 | def y0(self): 29 | return self.y 30 | 31 | @property 32 | def x1(self): 33 | return self.x + self.w 34 | 35 | @property 36 | def y1(self): 37 | return self.y + self.h 38 | 39 | def copy(self): 40 | new = type(self)() 41 | for i in self.__slots__: 42 | setattr(new, i, getattr(self, i)) 43 | return new 44 | 45 | def __str__(self): 46 | return 'Rect(x={}, y={}, w={}, h={})'.format(self.x, self.y, self.w, self.h) 47 | 48 | def area(self): 49 | return self.w * self.h 50 | 51 | def validate(self, shape=None): 52 | """ 53 | Is a valid bounding box within this shape 54 | :param shape: [h, w] 55 | :returns: boolean 56 | """ 57 | if min(self.x, self.y) < 0: 58 | return False 59 | if min(self.w, self.h) <= 0: 60 | return False 61 | if shape is None: 62 | return True 63 | if self.x1 > shape[1] - 1: 64 | return False 65 | if self.y1 > shape[0] - 1: 66 | return False 67 | return True 68 | 69 | def roi(self, img): 70 | assert self.validate(img.shape[:2]), "{} vs {}".format(self, img.shape[:2]) 71 | return img[self.y0:self.y1+1, self.x0:self.x1+1] 72 | 73 | def expand(self, frac): 74 | assert frac > 1.0, frac 75 | neww = self.w * frac 76 | newh = self.h * frac 77 | newx = self.x - (neww - self.w) * 0.5 78 | newy = self.y - (newh - self.h) * 0.5 79 | return Rect(*(map(int, [newx, newy, neww, newh])), allow_neg=True) 80 | 81 | def roi_zeropad(self, img): 82 | shp = list(img.shape) 83 | shp[0] = self.h 84 | shp[1] = self.w 85 | ret = np.zeros(tuple(shp), dtype=img.dtype) 86 | 87 | xstart = 0 if self.x >= 0 else -self.x 88 | ystart = 0 if self.y >= 0 else -self.y 89 | 90 | xmin = max(self.x0, 0) 91 | ymin = max(self.y0, 0) 92 | xmax = min(self.x1, img.shape[1]) 93 | ymax = min(self.y1, img.shape[0]) 94 | patch = img[ymin:ymax, xmin:xmax] 95 | ret[ystart:ystart+patch.shape[0],xstart:xstart+patch.shape[1]] = patch 96 | return ret 97 | 98 | __repr__ = __str__ 99 | 100 | 101 | if __name__ == '__main__': 102 | x = Rect(2, 1, 3, 3, allow_neg=True) 103 | 104 | img = np.random.rand(3,3) 105 | print(img) 106 | print(x.roi_zeropad(img)) 107 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/serialize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: serialize.py 4 | # Author: Yuxin Wu 5 | 6 | import msgpack 7 | import msgpack_numpy 8 | msgpack_numpy.patch() 9 | #import dill 10 | 11 | __all__ = ['loads', 'dumps'] 12 | 13 | def dumps(obj): 14 | #return dill.dumps(obj) 15 | return msgpack.dumps(obj, use_bin_type=True) 16 | 17 | def loads(buf): 18 | #return dill.loads(buf) 19 | return msgpack.loads(buf) 20 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/stat.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: stat.py 3 | # Author: Yuxin Wu 4 | import numpy as np 5 | 6 | __all__ = ['StatCounter', 'Accuracy', 'BinaryStatistics', 'RatioCounter'] 7 | 8 | class StatCounter(object): 9 | """ A simple counter""" 10 | def __init__(self): 11 | self.reset() 12 | 13 | def feed(self, v): 14 | self._values.append(v) 15 | 16 | def reset(self): 17 | self._values = [] 18 | 19 | @property 20 | def count(self): 21 | return len(self._values) 22 | 23 | @property 24 | def average(self): 25 | assert len(self._values) 26 | return np.mean(self._values) 27 | 28 | @property 29 | def sum(self): 30 | assert len(self._values) 31 | return np.sum(self._values) 32 | 33 | @property 34 | def max(self): 35 | assert len(self._values) 36 | return max(self._values) 37 | 38 | class RatioCounter(object): 39 | """ A counter to count ratio of something""" 40 | def __init__(self): 41 | self.reset() 42 | 43 | def reset(self): 44 | self._tot = 0 45 | self._cnt = 0 46 | 47 | def feed(self, cnt, tot=1): 48 | self._tot += tot 49 | self._cnt += cnt 50 | 51 | @property 52 | def ratio(self): 53 | if self._tot == 0: 54 | return 0 55 | return self._cnt * 1.0 / self._tot 56 | 57 | @property 58 | def count(self): 59 | return self._tot 60 | 61 | class Accuracy(RatioCounter): 62 | """ A RatioCounter with a fancy name """ 63 | @property 64 | def accuracy(self): 65 | return self.ratio 66 | 67 | class BinaryStatistics(object): 68 | """ 69 | Statistics for binary decision, 70 | including precision, recall, false positive, false negative 71 | """ 72 | def __init__(self): 73 | self.reset() 74 | 75 | def reset(self): 76 | self.nr_pos = 0 # positive label 77 | self.nr_neg = 0 # negative label 78 | self.nr_pred_pos = 0 79 | self.nr_pred_neg = 0 80 | self.corr_pos = 0 # correct predict positive 81 | self.corr_neg = 0 # correct predict negative 82 | 83 | def feed(self, pred, label): 84 | """ 85 | :param pred: 0/1 np array 86 | :param label: 0/1 np array of the same size 87 | """ 88 | assert pred.shape == label.shape 89 | self.nr_pos += (label == 1).sum() 90 | self.nr_neg += (label == 0).sum() 91 | self.nr_pred_pos += (pred == 1).sum() 92 | self.nr_pred_neg += (pred == 0).sum() 93 | self.corr_pos += ((pred == 1) & (pred == label)).sum() 94 | self.corr_neg += ((pred == 0) & (pred == label)).sum() 95 | 96 | @property 97 | def precision(self): 98 | if self.nr_pred_pos == 0: 99 | return 0 100 | return self.corr_pos * 1. / self.nr_pred_pos 101 | 102 | @property 103 | def recall(self): 104 | if self.nr_pos == 0: 105 | return 0 106 | return self.corr_pos * 1. / self.nr_pos 107 | 108 | @property 109 | def false_positive(self): 110 | if self.nr_pred_pos == 0: 111 | return 0 112 | return 1 - self.precision 113 | 114 | @property 115 | def false_negative(self): 116 | if self.nr_pos == 0: 117 | return 0 118 | return 1 - self.recall 119 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/timer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # File: timer.py 4 | # Author: Yuxin Wu 5 | 6 | 7 | from contextlib import contextmanager 8 | import time 9 | from collections import defaultdict 10 | import six 11 | import atexit 12 | 13 | from .stat import StatCounter 14 | from . import logger 15 | 16 | __all__ = ['total_timer', 'timed_operation', 17 | 'print_total_timer', 'IterSpeedCounter'] 18 | 19 | class IterSpeedCounter(object): 20 | """ To count how often some code gets reached""" 21 | def __init__(self, print_every, name=None): 22 | self.cnt = 0 23 | self.print_every = int(print_every) 24 | self.name = name if name else 'IterSpeed' 25 | 26 | def reset(self): 27 | self.start = time.time() 28 | 29 | def __call__(self): 30 | if self.cnt == 0: 31 | self.reset() 32 | self.cnt += 1 33 | if self.cnt % self.print_every != 0: 34 | return 35 | t = time.time() - self.start 36 | logger.info("{}: {:.2f} sec, {} times, {:.3g} sec/time".format( 37 | self.name, t, self.cnt, t / self.cnt)) 38 | 39 | @contextmanager 40 | def timed_operation(msg, log_start=False): 41 | if log_start: 42 | logger.info('Start {} ...'.format(msg)) 43 | start = time.time() 44 | yield 45 | logger.info('{} finished, time={:.2f}sec.'.format( 46 | msg, time.time() - start)) 47 | 48 | _TOTAL_TIMER_DATA = defaultdict(StatCounter) 49 | 50 | @contextmanager 51 | def total_timer(msg): 52 | start = time.time() 53 | yield 54 | t = time.time() - start 55 | _TOTAL_TIMER_DATA[msg].feed(t) 56 | 57 | def print_total_timer(): 58 | if len(_TOTAL_TIMER_DATA) == 0: 59 | return 60 | for k, v in six.iteritems(_TOTAL_TIMER_DATA): 61 | logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format( 62 | k, v.sum, v.count, v.average)) 63 | 64 | atexit.register(print_total_timer) 65 | -------------------------------------------------------------------------------- /f1/F1_track1/tensorpack/utils/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: UTF-8 -*- 2 | # File: utils.py 3 | # Author: Yuxin Wu 4 | 5 | import os, sys 6 | from contextlib import contextmanager 7 | import inspect, functools 8 | from datetime import datetime 9 | import time 10 | import collections 11 | import numpy as np 12 | import six 13 | 14 | __all__ = ['change_env', 15 | 'map_arg', 16 | 'get_rng', 'memoized', 17 | 'get_dataset_path', 18 | 'get_tqdm_kwargs' 19 | ] 20 | 21 | #def expand_dim_if_necessary(var, dp): 22 | # """ 23 | # Args: 24 | # var: a tensor 25 | # dp: a numpy array 26 | # Return a reshaped version of dp, if that makes it match the valid dimension of var 27 | # """ 28 | # shape = var.get_shape().as_list() 29 | # valid_shape = [k for k in shape if k] 30 | # if dp.shape == tuple(valid_shape): 31 | # new_shape = [k if k else 1 for k in shape] 32 | # dp = dp.reshape(new_shape) 33 | # return dp 34 | 35 | @contextmanager 36 | def change_env(name, val): 37 | oldval = os.environ.get(name, None) 38 | os.environ[name] = val 39 | yield 40 | if oldval is None: 41 | del os.environ[name] 42 | else: 43 | os.environ[name] = oldval 44 | 45 | class memoized(object): 46 | '''Decorator. Caches a function's return value each time it is called. 47 | If called later with the same arguments, the cached value is returned 48 | (not reevaluated). 49 | ''' 50 | def __init__(self, func): 51 | self.func = func 52 | self.cache = {} 53 | 54 | def __call__(self, *args): 55 | if not isinstance(args, collections.Hashable): 56 | # uncacheable. a list, for instance. 57 | # better to not cache than blow up. 58 | return self.func(*args) 59 | if args in self.cache: 60 | return self.cache[args] 61 | else: 62 | value = self.func(*args) 63 | self.cache[args] = value 64 | return value 65 | 66 | def __repr__(self): 67 | '''Return the function's docstring.''' 68 | return self.func.__doc__ 69 | 70 | def __get__(self, obj, objtype): 71 | '''Support instance methods.''' 72 | return functools.partial(self.__call__, obj) 73 | 74 | 75 | #_GLOBAL_MEMOIZED_CACHE = dict() 76 | #def global_memoized(func): 77 | #""" Make sure that the same `memoized` object is returned on different 78 | #calls to global_memoized(func) 79 | #""" 80 | #ret = _GLOBAL_MEMOIZED_CACHE.get(func, None) 81 | #if ret is None: 82 | #ret = _GLOBAL_MEMOIZED_CACHE[func] = memoized(func) 83 | #return ret 84 | 85 | def map_arg(**maps): 86 | """ 87 | Apply a mapping on certains argument before calling original function. 88 | maps: {key: map_func} 89 | """ 90 | def deco(func): 91 | @functools.wraps(func) 92 | def wrapper(*args, **kwargs): 93 | argmap = inspect.getcallargs(func, *args, **kwargs) 94 | for k, map_func in six.iteritems(maps): 95 | if k in argmap: 96 | argmap[k] = map_func(argmap[k]) 97 | return func(**argmap) 98 | return wrapper 99 | return deco 100 | 101 | def get_rng(obj=None): 102 | """ obj: some object to use to generate random seed""" 103 | seed = (id(obj) + os.getpid() + 104 | int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295 105 | return np.random.RandomState(seed) 106 | 107 | def get_dataset_path(*args): 108 | from . import logger 109 | d = os.environ.get('TENSORPACK_DATASET', None) 110 | if d is None: 111 | d = os.path.abspath(os.path.join( 112 | os.path.dirname(__file__), '..', 'dataflow', 'dataset')) 113 | logger.info("TENSORPACK_DATASET not set, using {} for dataset.".format(d)) 114 | assert os.path.isdir(d), d 115 | return os.path.join(d, *args) 116 | 117 | 118 | def get_tqdm_kwargs(**kwargs): 119 | default = dict( 120 | smoothing=0.5, 121 | dynamic_ncols=True, 122 | ascii=True, 123 | bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]' 124 | ) 125 | f = kwargs.get('file', sys.stderr) 126 | if f.isatty(): 127 | default['mininterval'] = 0.5 128 | else: 129 | default['mininterval'] = 60 130 | default.update(kwargs) 131 | return default 132 | -------------------------------------------------------------------------------- /host/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # ViZdoom dependencies 4 | RUN apt-get update && apt-get install -y \ 5 | build-essential \ 6 | bzip2 \ 7 | cmake \ 8 | curl \ 9 | git \ 10 | libboost-all-dev \ 11 | libbz2-dev \ 12 | libfluidsynth-dev \ 13 | libfreetype6-dev \ 14 | libgme-dev \ 15 | libgtk2.0-dev \ 16 | libjpeg-dev \ 17 | libopenal-dev \ 18 | libpng12-dev \ 19 | libsdl2-dev \ 20 | libwildmidi-dev \ 21 | libzmq3-dev \ 22 | nano \ 23 | nasm \ 24 | pkg-config \ 25 | rsync \ 26 | software-properties-common \ 27 | sudo \ 28 | tar \ 29 | timidity \ 30 | unzip \ 31 | wget \ 32 | locales \ 33 | zlib1g-dev \ 34 | python3-dev \ 35 | python3 \ 36 | python3-pip 37 | 38 | # Python3 39 | RUN pip3 install pip --upgrade 40 | 41 | RUN pip3 --no-cache-dir install \ 42 | git+https://github.com/mwydmuch/ViZDoom \ 43 | tabulate 44 | 45 | # Unicode support: 46 | RUN locale-gen en_US.UTF-8 47 | ENV LANG en_US.UTF-8 48 | ENV LANGUAGE en_US:en 49 | ENV LC_ALL en_US.UTF-8 50 | 51 | # Enables X11 sharing and creates user home directory 52 | ENV USER_NAME cig2017 53 | ENV HOME_DIR /home/$USER_NAME 54 | # 55 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 56 | ENV HOST_UID 1000 57 | ENV HOST_GID 1000 58 | 59 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 60 | mkdir -p ${HOME_DIR} && \ 61 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 62 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 63 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 64 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 65 | chown ${uid}:${gid} -R ${HOME_DIR} 66 | 67 | USER ${USER_NAME} 68 | WORKDIR ${HOME_DIR} 69 | 70 | 71 | 72 | COPY host.py . 73 | COPY cig2017.wad . 74 | COPY _vizdoom.ini . 75 | RUN sudo chown ${HOST_UID}:${HOST_GID} -R * 76 | 77 | 78 | # Uncomment to use doom2.wad: 79 | #COPY doom2.wad /usr/local/lib/python3.5/dist-packages/vizdoom 80 | 81 | 82 | ENTRYPOINT ["./host.py"] 83 | -------------------------------------------------------------------------------- /intelact/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Cuda 7.5 with cudnn 4.0.7 4 | FROM nvidia/cuda:7.5-devel 5 | 6 | RUN echo "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1404/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list 7 | ENV CUDNN_VERSION 4 8 | RUN apt-get update && apt-get install -y --no-install-recommends \ 9 | libcudnn4=4.0.7 libcudnn4-dev=4.0.7 10 | 11 | # ViZdoom dependencies 12 | RUN apt-get update && apt-get install -y \ 13 | build-essential \ 14 | bzip2 \ 15 | cmake \ 16 | curl \ 17 | git \ 18 | libboost-all-dev \ 19 | libbz2-dev \ 20 | libfluidsynth-dev \ 21 | libfreetype6-dev \ 22 | libgme-dev \ 23 | libgtk2.0-dev \ 24 | libjpeg-dev \ 25 | libopenal-dev \ 26 | libpng12-dev \ 27 | libsdl2-dev \ 28 | libwildmidi-dev \ 29 | libzmq3-dev \ 30 | nano \ 31 | nasm \ 32 | pkg-config \ 33 | rsync \ 34 | software-properties-common \ 35 | sudo \ 36 | tar \ 37 | timidity \ 38 | unzip \ 39 | wget \ 40 | zlib1g-dev 41 | 42 | RUN apt-get update && apt-get install -y dbus 43 | 44 | 45 | 46 | # Python with pip 47 | RUN apt-get update && apt-get install -y python-dev python python-pip 48 | RUN pip install pip --upgrade 49 | 50 | 51 | 52 | 53 | # Vizdoom and other pip packages if needed 54 | RUN pip --no-cache-dir install \ 55 | git+https://github.com/mwydmuch/ViZDoom 56 | 57 | RUN pip --no-cache-dir install \ 58 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0-cp27-none-linux_x86_64.whl 59 | RUN pip --no-cache-dir install opencv-python termcolor tqdm subprocess32 msgpack-python msgpack-numpy 60 | 61 | 62 | # Enables X11 sharing and creates user home directory 63 | ENV USER_NAME cig2017 64 | ENV HOME_DIR /home/$USER_NAME 65 | 66 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 67 | ENV HOST_UID 1000 68 | ENV HOST_GID 1000 69 | 70 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 71 | mkdir -p ${HOME_DIR} && \ 72 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 73 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 74 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 75 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 76 | chown ${uid}:${gid} -R ${HOME_DIR} 77 | 78 | USER ${USER_NAME} 79 | WORKDIR ${HOME_DIR} 80 | 81 | 82 | # Copy agent files inside Docker image: 83 | COPY IntelAct_track2 . 84 | 85 | 86 | ### Do not change this ### 87 | COPY cig2017.wad . 88 | COPY _vizdoom.cfg . 89 | ########################## 90 | RUN sudo chown ${HOST_UID}:${HOST_GID} -R * 91 | 92 | 93 | CMD taskset -c 1 python run_agent.py 94 | -------------------------------------------------------------------------------- /intelact/IntelAct_track2/agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/intelact/IntelAct_track2/agent/__init__.py -------------------------------------------------------------------------------- /intelact/IntelAct_track2/agent/doom_simulator.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sys 3 | 4 | sys.path.append('./bin/python') 5 | import vizdoom 6 | import random 7 | import time 8 | import numpy as np 9 | import re 10 | 11 | 12 | class DoomSimulator: 13 | def __init__(self, args): 14 | 15 | self.config = args['config'] 16 | self.resolution = args['resolution'] 17 | self.frame_skip = args['frame_skip'] 18 | self.color_mode = args['color_mode'] 19 | self.game_args = args['game_args'] 20 | 21 | self._game = vizdoom.DoomGame() 22 | self._game.load_config(self.config) 23 | self._game.add_game_args(self.game_args) 24 | 25 | if 'ticrate' in args: 26 | self._game.set_ticrate(args['ticrate']) 27 | 28 | # set resolution 29 | try: 30 | self._game.set_screen_resolution(getattr(vizdoom.ScreenResolution, 'RES_%dX%d' % self.resolution)) 31 | except: 32 | print("Requested resolution not supported:", sys.exc_info()[0]) 33 | raise 34 | 35 | # set color mode 36 | if self.color_mode == 'RGB': 37 | self._game.set_screen_format(vizdoom.ScreenFormat.CRCGCB) 38 | self.num_channels = 3 39 | elif self.color_mode == 'GRAY': 40 | self._game.set_screen_format(vizdoom.ScreenFormat.GRAY8) 41 | self.num_channels = 1 42 | else: 43 | print("Unknown color mode") 44 | raise 45 | 46 | self.available_controls, self.continuous_controls, self.discrete_controls = self.analyze_controls(self.config) 47 | self.num_buttons = self._game.get_available_buttons_size() 48 | assert (self.num_buttons == len(self.discrete_controls) + len(self.continuous_controls)) 49 | assert (len(self.continuous_controls) == 0) # only discrete for now 50 | self.num_meas = self._game.get_available_game_variables_size() 51 | 52 | self.game_initialized = False 53 | 54 | def close_game(self): 55 | self._game.close() 56 | 57 | def analyze_controls(self, config_file): 58 | with open(config_file, 'r') as myfile: 59 | config = myfile.read() 60 | m = re.search('available_buttons[\s]*\=[\s]*\{([^\}]*)\}', config) 61 | avail_controls = m.group(1).split() 62 | cont_controls = np.array([bool(re.match('.*_DELTA', c)) for c in avail_controls]) 63 | discr_controls = np.invert(cont_controls) 64 | return avail_controls, np.squeeze(np.nonzero(cont_controls)), np.squeeze(np.nonzero(discr_controls)) 65 | 66 | def step(self, action): 67 | 68 | if not self.game_initialized: 69 | self._game.init() 70 | self.game_initialized = True 71 | 72 | rwrd = self._game.make_action(action, self.frame_skip) 73 | if self._game.is_episode_finished(): 74 | return None, None, rwrd, True 75 | state = self._game.get_state() 76 | 77 | img = state.screen_buffer 78 | meas = state.game_variables # this is a numpy array of game variables specified by the scenario 79 | term = self._game.is_episode_finished() 80 | 81 | return img, meas, rwrd, term 82 | 83 | def get_random_action(self): 84 | return [(random.random() >= .5) for i in range(self.num_buttons)] 85 | 86 | def is_new_episode(self): 87 | return self._game.is_new_episode() 88 | -------------------------------------------------------------------------------- /intelact/IntelAct_track2/agent/ops.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | get_stddev = lambda x, k_h, k_w: 1/np.sqrt(0.5*k_w*k_h*x.get_shape().as_list()[-1]) 5 | 6 | def conv2d(input_, output_dim, 7 | k_h=3, k_w=3, d_h=2, d_w=2, msra_coeff=1, 8 | name="conv2d"): 9 | with tf.variable_scope(name): 10 | w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], 11 | initializer=tf.truncated_normal_initializer(stddev=msra_coeff * get_stddev(input_, k_h, k_w))) 12 | b = tf.get_variable('b', [output_dim], initializer=tf.constant_initializer(0.0)) 13 | 14 | return tf.nn.bias_add(tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME'), b) 15 | 16 | def lrelu(x, leak=0.2, name="lrelu"): 17 | with tf.variable_scope(name): 18 | f1 = 0.5 * (1 + leak) 19 | f2 = 0.5 * (1 - leak) 20 | return f1 * x + f2 * abs(x) 21 | 22 | def linear(input_, output_size, name='linear', msra_coeff=1): 23 | shape = input_.get_shape().as_list() 24 | 25 | with tf.variable_scope(name): 26 | w = tf.get_variable("w", [shape[1], output_size], tf.float32, 27 | tf.random_normal_initializer(stddev=msra_coeff * get_stddev(input_, 1, 1))) 28 | b = tf.get_variable("b", [output_size], initializer=tf.constant_initializer(0.0)) 29 | return tf.matmul(input_, w) + b 30 | 31 | def conv_encoder(data, params, name, msra_coeff=1): 32 | layers = [] 33 | for nl, param in enumerate(params): 34 | if len(layers) == 0: 35 | curr_inp = data 36 | else: 37 | curr_inp = layers[-1] 38 | 39 | layers.append(lrelu(conv2d(curr_inp, param['out_channels'], k_h=param['kernel'], k_w=param['kernel'], d_h=param['stride'], d_w=param['stride'], name=name + str(nl), msra_coeff=msra_coeff))) 40 | 41 | return layers[-1] 42 | 43 | def fc_net(data, params, name, last_linear = False, return_layers = [-1], msra_coeff=1): 44 | layers = [] 45 | for nl, param in enumerate(params): 46 | if len(layers) == 0: 47 | curr_inp = data 48 | else: 49 | curr_inp = layers[-1] 50 | 51 | if nl == len(params) - 1 and last_linear: 52 | layers.append(linear(curr_inp, param['out_dims'], name=name + str(nl), msra_coeff=msra_coeff)) 53 | else: 54 | layers.append(lrelu(linear(curr_inp, param['out_dims'], name=name + str(nl), msra_coeff=msra_coeff))) 55 | 56 | if len(return_layers) == 1: 57 | return layers[return_layers[0]] 58 | else: 59 | return [layers[nl] for nl in return_layers] 60 | 61 | def flatten(data): 62 | return tf.reshape(data, [-1, np.prod(data.get_shape().as_list()[1:])]) -------------------------------------------------------------------------------- /intelact/IntelAct_track2/checkpoints/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "net" 2 | all_model_checkpoint_paths: "net" 3 | -------------------------------------------------------------------------------- /intelact/IntelAct_track2/checkpoints/net: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/intelact/IntelAct_track2/checkpoints/net -------------------------------------------------------------------------------- /intelact/IntelAct_track2/checkpoints/net.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/intelact/IntelAct_track2/checkpoints/net.meta -------------------------------------------------------------------------------- /intelact/IntelAct_track2/config/config.cfg: -------------------------------------------------------------------------------- 1 | 2 | # Change if needed 3 | screen_resolution = RES_160X120 4 | screen_format = GRAY8 5 | render_hud = false 6 | render_crosshair = true 7 | render_weapon = false 8 | render_decals = false 9 | render_particles = false 10 | window_visible = false 11 | 12 | # Add more if needed 13 | available_buttons = 14 | { 15 | MOVE_FORWARD 16 | MOVE_BACKWARD 17 | TURN_LEFT 18 | TURN_RIGHT 19 | ATTACK 20 | SPEED 21 | SELECT_WEAPON2 22 | SELECT_WEAPON3 23 | SELECT_WEAPON4 24 | SELECT_WEAPON5 25 | SELECT_WEAPON6 26 | SELECT_WEAPON7 27 | } 28 | 29 | # Add more if needed 30 | available_game_variables = 31 | { 32 | SELECTED_WEAPON_AMMO 33 | HEALTH 34 | FRAGCOUNT 35 | AMMO2 36 | AMMO3 37 | AMMO4 38 | AMMO5 39 | AMMO6 40 | AMMO7 41 | WEAPON2 42 | WEAPON3 43 | WEAPON4 44 | WEAPON5 45 | WEAPON6 46 | WEAPON7 47 | SELECTED_WEAPON 48 | } -------------------------------------------------------------------------------- /no_host/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Cuda 7.5 with cudnn 5 4 | #FROM nvidia/cuda:7.5-cudnn5-devel 5 | # Cuda 8 with cudnn 5 6 | FROM nvidia/cuda:8.0-cudnn5-devel 7 | 8 | # ViZdoom dependencies 9 | RUN apt-get update && apt-get install -y \ 10 | build-essential \ 11 | bzip2 \ 12 | cmake \ 13 | curl \ 14 | git \ 15 | libboost-all-dev \ 16 | libbz2-dev \ 17 | libfluidsynth-dev \ 18 | libfreetype6-dev \ 19 | libgme-dev \ 20 | libgtk2.0-dev \ 21 | libjpeg-dev \ 22 | libopenal-dev \ 23 | libpng12-dev \ 24 | libsdl2-dev \ 25 | libwildmidi-dev \ 26 | libzmq3-dev \ 27 | nano \ 28 | nasm \ 29 | pkg-config \ 30 | rsync \ 31 | software-properties-common \ 32 | sudo \ 33 | tar \ 34 | timidity \ 35 | unzip \ 36 | wget \ 37 | zlib1g-dev \ 38 | python3-dev \ 39 | python3 \ 40 | python3-pip 41 | 42 | 43 | 44 | # Python with pip 45 | #RUN apt-get update && apt-get install -y python-dev python python-pip 46 | #RUN pip install pip --upgrade 47 | 48 | 49 | 50 | # Vizdoom and other pip packages if needed 51 | #RUN pip --no-cache-dir install \ 52 | # git+https://github.com/mwydmuch/ViZDoom \ 53 | # numpy \ 54 | #RUN pip --no-cache-dir install \ 55 | # https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl 56 | 57 | 58 | # Vizdoom and other pip3 packages if needed 59 | RUN pip3 --no-cache-dir install \ 60 | git+https://github.com/mwydmuch/ViZDoom \ 61 | opencv-python 62 | 63 | RUN pip3 --no-cache-dir install \ 64 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl 65 | 66 | 67 | # Enables X11 sharing and creates user home directory 68 | ENV USER_NAME cig2017 69 | ENV HOME_DIR /home/$USER_NAME 70 | 71 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 72 | ENV HOST_UID 1000 73 | ENV HOST_GID 1000 74 | 75 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 76 | mkdir -p ${HOME_DIR} && \ 77 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 78 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 79 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 80 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 81 | chown ${uid}:${gid} -R ${HOME_DIR} 82 | 83 | USER ${USER_NAME} 84 | WORKDIR ${HOME_DIR} 85 | 86 | 87 | # Copy agent files inside Docker image: 88 | COPY config config 89 | COPY no_host.py . 90 | 91 | 92 | ### Do not change this ### 93 | COPY cig2017.wad . 94 | COPY _nohost_vizdoom.cfg _vizdoom.cfg 95 | ########################## 96 | # Uncomment to use doom2.wad: 97 | #COPY doom2.wad /usr/local/lib/python3.5/dist-packages/vizdoom 98 | 99 | CMD ./no_host.py -------------------------------------------------------------------------------- /no_host/_nohost_vizdoom.cfg: -------------------------------------------------------------------------------- 1 | 2 | game_args += -deathmatch +viz_nocheat 1 +viz_debug 0 +viz_respawn_delay 10 3 | game_args += -host 1 +timelimit 10 4 | game_args += +sv_forcerespawn 1 +sv_noautoaim 1 +sv_respawnprotect 1 +sv_spawnfarthest 1 +sv_crouch 1 5 | doom_map = map01 6 | doom_scenario_path = cig2017.wad 7 | window_visible = False 8 | mode = PLAYER 9 | 10 | -------------------------------------------------------------------------------- /no_host/config/custom_config.cfg: -------------------------------------------------------------------------------- 1 | # Change if needed 2 | screen_resolution = RES_640X480 3 | screen_format = CRCGCB 4 | render_hud = true 5 | render_crosshair = true 6 | render_weapon = true 7 | render_decals = false 8 | render_particles = false 9 | 10 | # Add more if needed 11 | available_buttons = 12 | { 13 | TURN_LEFT 14 | TURN_RIGHT 15 | ATTACK 16 | 17 | MOVE_RIGHT 18 | MOVE_LEFT 19 | 20 | MOVE_FORWARD 21 | MOVE_BACKWARD 22 | TURN_LEFT_RIGHT_DELTA 23 | LOOK_UP_DOWN_DELTA 24 | } 25 | 26 | 27 | # Add more if needed 28 | available_game_variables = 29 | { 30 | HEALTH 31 | } -------------------------------------------------------------------------------- /no_host/no_host.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import vizdoom as vzd 4 | from random import choice 5 | 6 | bots_num = 10 7 | map = 1 8 | 9 | 10 | game = vzd.DoomGame() 11 | game.load_config("config/custom_config.cfg") 12 | 13 | # Name your agent and select color 14 | # colors: 0 - green, 1 - gray, 2 - brown, 3 - red, 4 - light gray, 5 - light brown, 6 - light red, 7 - light blue 15 | name = "RandomAgent" 16 | color = 0 17 | game.add_game_args("+name {} +colorset {}".format(name, color)) 18 | game.init() 19 | 20 | # Three sample actions: turn left/right and shoot 21 | actions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] 22 | 23 | 24 | # Add bots 25 | for i in range(bots_num): 26 | game.send_game_command("addbot") 27 | 28 | # Play until the game (episode) is over. 29 | 30 | while not game.is_episode_finished(): 31 | 32 | if game.is_player_dead(): 33 | # Use this to respawn immediately after death, new state will be available. 34 | game.respawn_player() 35 | 36 | # Or observe the game until automatic respawn. 37 | # game.advance_action(); 38 | # continue; 39 | 40 | # Analyze the state ... or not 41 | s = game.get_state() 42 | 43 | # Make your action. 44 | game.make_action(choice(actions)) 45 | 46 | # Log your frags every ~5 seconds 47 | if s.number % 175 == 0: 48 | print("Frags:", game.get_game_variable(vzd.GameVariable.FRAGCOUNT)) 49 | 50 | game.close() 51 | -------------------------------------------------------------------------------- /random/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Cuda 7.5 with cudnn 5 4 | #FROM nvidia/cuda:7.5-cudnn5-devel 5 | # Cuda 8 with cudnn 5 6 | FROM nvidia/cuda:8.0-cudnn5-devel 7 | 8 | # ViZdoom dependencies 9 | RUN apt-get update && apt-get install -y \ 10 | build-essential \ 11 | bzip2 \ 12 | cmake \ 13 | curl \ 14 | git \ 15 | libboost-all-dev \ 16 | libbz2-dev \ 17 | libfluidsynth-dev \ 18 | libfreetype6-dev \ 19 | libgme-dev \ 20 | libgtk2.0-dev \ 21 | libjpeg-dev \ 22 | libopenal-dev \ 23 | libpng12-dev \ 24 | libsdl2-dev \ 25 | libwildmidi-dev \ 26 | libzmq3-dev \ 27 | nano \ 28 | nasm \ 29 | pkg-config \ 30 | rsync \ 31 | software-properties-common \ 32 | sudo \ 33 | tar \ 34 | timidity \ 35 | unzip \ 36 | wget \ 37 | zlib1g-dev \ 38 | python3-dev \ 39 | python3 \ 40 | python3-pip 41 | 42 | 43 | 44 | # Python with pip 45 | #RUN apt-get install -y python-dev python python-pip 46 | #RUN pip install pip --upgrade 47 | 48 | # Python3 with pip3 49 | RUN pip3 install pip --upgrade 50 | 51 | 52 | 53 | # Vizdoom and other pip packages if needed 54 | #RUN pip --no-cache-dir install \ 55 | # git+https://github.com/mwydmuch/ViZDoom \ 56 | # numpy \ 57 | #RUN pip --no-cache-dir install \ 58 | # https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.10.0-cp27-none-linux_x86_64.whl 59 | 60 | 61 | # Vizdoom and other pip3 packages if needed 62 | RUN pip3 --no-cache-dir install \ 63 | git+https://github.com/mwydmuch/ViZDoom \ 64 | opencv-python 65 | 66 | RUN pip3 --no-cache-dir install \ 67 | https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow_gpu-1.0.1-cp35-cp35m-linux_x86_64.whl 68 | 69 | 70 | # Enables X11 sharing and creates user home directory 71 | ENV USER_NAME cig2017 72 | ENV HOME_DIR /home/$USER_NAME 73 | 74 | # Replace HOST_UID/HOST_GUID with your user / group id (needed for X11) 75 | ENV HOST_UID 1000 76 | ENV HOST_GID 1000 77 | 78 | RUN export uid=${HOST_UID} gid=${HOST_GID} && \ 79 | mkdir -p ${HOME_DIR} && \ 80 | echo "$USER_NAME:x:${uid}:${gid}:$USER_NAME,,,:$HOME_DIR:/bin/bash" >> /etc/passwd && \ 81 | echo "$USER_NAME:x:${uid}:" >> /etc/group && \ 82 | echo "$USER_NAME ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER_NAME && \ 83 | chmod 0440 /etc/sudoers.d/$USER_NAME && \ 84 | chown ${uid}:${gid} -R ${HOME_DIR} 85 | 86 | USER ${USER_NAME} 87 | WORKDIR ${HOME_DIR} 88 | 89 | 90 | # Copy agent files inside Docker image: 91 | COPY config config 92 | COPY sample_random_agent.py . 93 | 94 | 95 | ### Do not change this ### 96 | COPY cig2017.wad . 97 | COPY _vizdoom.cfg . 98 | ########################## 99 | # Uncomment to use doom2.wad: 100 | #COPY doom2.wad /usr/local/lib/python3.5/dist-packages/vizdoom 101 | 102 | CMD ./sample_random_agent.py 103 | -------------------------------------------------------------------------------- /random/config/custom_config.cfg: -------------------------------------------------------------------------------- 1 | # Change if needed 2 | screen_resolution = RES_640X480 3 | screen_format = CRCGCB 4 | render_hud = true 5 | render_crosshair = true 6 | render_weapon = true 7 | render_decals = false 8 | render_particles = false 9 | 10 | # Add more if needed 11 | available_buttons = 12 | { 13 | TURN_LEFT 14 | TURN_RIGHT 15 | ATTACK 16 | 17 | MOVE_RIGHT 18 | MOVE_LEFT 19 | 20 | MOVE_FORWARD 21 | MOVE_BACKWARD 22 | TURN_LEFT_RIGHT_DELTA 23 | LOOK_UP_DOWN_DELTA 24 | } 25 | 26 | 27 | # Add more if needed 28 | available_game_variables = 29 | { 30 | HEALTH 31 | } -------------------------------------------------------------------------------- /random/sample_random_agent.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import vizdoom as vzd 4 | from random import choice 5 | 6 | game = vzd.DoomGame() 7 | game.load_config("config/custom_config.cfg") 8 | 9 | # Name your agent and select color 10 | # colors: 0 - green, 1 - gray, 2 - brown, 3 - red, 4 - light gray, 5 - light brown, 6 - light red, 7 - light blue 11 | name = "SampleRandomAgent" 12 | color = 0 13 | game.add_game_args("+name {} +colorset {}".format(name, color)) 14 | game.init() 15 | 16 | # Three sample actions: turn left/right and shoot 17 | actions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] 18 | 19 | # Play until the game (episode) is over. 20 | while not game.is_episode_finished(): 21 | 22 | if game.is_player_dead(): 23 | # Use this to respawn immediately after death, new state will be available. 24 | game.respawn_player() 25 | 26 | # Or observe the game until automatic respawn. 27 | # game.advance_action(); 28 | # continue; 29 | 30 | # Analyze the state ... or not 31 | s = game.get_state() 32 | 33 | # Make your action. 34 | game.make_action(choice(actions)) 35 | 36 | # Log your frags every ~5 seconds 37 | if s.number % 175 == 0: 38 | print("Frags:", game.get_game_variable(vzd.GameVariable.FRAGCOUNT)) 39 | 40 | game.close() 41 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ERROR_PREFIX="ERROR:" 4 | if [[ ! -z `which nvidia-docker` ]] 5 | then 6 | DOCKER_CMD=nvidia-docker 7 | elif [[ ! -z `which docker` ]] 8 | then 9 | echo "WARNING: nvidia-docker not found. Nvidia drivers may not work." >&2 10 | DOCKER_CMD=docker 11 | else 12 | echo "${ERROR_PREFIX} docker or nvidia-docker not found. Aborting." >&2 13 | exit 1 14 | fi 15 | 16 | 17 | 18 | DIRECTORY=$1 19 | if [ ! -d "$DIRECTORY" ]; then 20 | echo "${ERROR_PREFIX} Directory '${DIRECTORY}' doesn't exist. Aborting." >&2 21 | exit 2 22 | fi 23 | 24 | image_tag="cig2017_`basename $DIRECTORY`" 25 | container_name=${image_tag} 26 | 27 | run_version=0 28 | while [[ ! -z `docker ps --format '{{.Names}}'|grep "^${container_name}$"` ]] 29 | do 30 | echo "WARNING: '${container_name}' is already a running docker container. Trying to run '${image_tag}_${run_version}'." 31 | run_version=$(($run_version+1)) 32 | container_name=${image_tag}_${run_version} 33 | done 34 | 35 | 36 | if [ "`uname`" != "Linux" ]; then 37 | echo "WARNING: GUI forwarding in Docker was tested only on a linux host." 38 | fi 39 | 40 | $DOCKER_CMD run --net=host -ti --rm --name ${container_name} \ 41 | --env="DISPLAY" --privileged \ 42 | ${image_tag} "${@:2}" 43 | -------------------------------------------------------------------------------- /run_i.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ERROR_PREFIX="ERROR:" 4 | if [[ ! -z `which nvidia-docker` ]] 5 | then 6 | DOCKER_CMD=nvidia-docker 7 | elif [[ ! -z `which docker` ]] 8 | then 9 | echo "WARNING: nvidia-docker not found. Nvidia drivers may not work." >&2 10 | DOCKER_CMD=docker 11 | else 12 | echo "${ERROR_PREFIX} docker or nvidia-docker not found. Aborting." >&2 13 | exit 1 14 | fi 15 | 16 | 17 | 18 | DIRECTORY=$1 19 | if [ ! -d "$DIRECTORY" ]; then 20 | echo "${ERROR_PREFIX} Directory '${DIRECTORY}' doesn't exist. Aborting." >&2 21 | exit 2 22 | fi 23 | 24 | image_tag="cig2017_`basename $DIRECTORY`" 25 | container_name=${image_tag} 26 | 27 | run_version=0 28 | while [[ ! -z `docker ps --format '{{.Names}}'|grep "^${container_name}$"` ]] 29 | do 30 | echo "WARNING: '${container_name}' is already a running docker container. Trying to run '${image_tag}_${run_version}'." 31 | run_version=$(($run_version+1)) 32 | container_name=${image_tag}_${run_version} 33 | done 34 | 35 | 36 | if [ "`uname`" != "Linux" ]; then 37 | echo "WARNING: GUI forwarding in Docker was tested only on a linux host." 38 | fi 39 | 40 | $DOCKER_CMD run --net=host -ti --name ${container_name} \ 41 | --env="DISPLAY" --privileged \ 42 | --entrypoint /bin/bash \ 43 | ${image_tag} -------------------------------------------------------------------------------- /wads/brit11.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/wads/brit11.wad -------------------------------------------------------------------------------- /wads/cig2017.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/wads/cig2017.wad -------------------------------------------------------------------------------- /wads/exec.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/wads/exec.wad -------------------------------------------------------------------------------- /wads/greenwar.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/wads/greenwar.wad -------------------------------------------------------------------------------- /wads/vex.wad: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mihahauke/VDAIC2017/42baffa7c6ee43db618605838ea6f9e0547001d1/wads/vex.wad --------------------------------------------------------------------------------