├── LICENSE ├── README.md ├── custom_criteria.py ├── custom_transforms.py ├── data ├── kitti_raw_loader.py └── prepare_train_data.py ├── dataloader └── __init__.py ├── datasets ├── __init__.py ├── sequence_folders.py ├── validation_flow.py └── validation_folders.py ├── flowutils ├── __init__.py ├── flow_io.py ├── flow_viz.py ├── flowlib.py └── pfm.py ├── install_flownet2_deps.sh ├── logger.py ├── losses.py ├── main.py ├── models ├── FlowNet2S.py ├── FlowNetC.py ├── FlowNetFusion.py ├── FlowNetS.py ├── FlowNetSD.py ├── PWCNet.py ├── __init__.py ├── back2future.py ├── channelnorm_package │ ├── __init__.py │ ├── channelnorm.py │ ├── channelnorm_cuda.cc │ ├── channelnorm_cuda.egg-info │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ └── top_level.txt │ ├── channelnorm_kernel.cu │ ├── channelnorm_kernel.cuh │ └── setup.py ├── flownet2 │ ├── FlowNetC.py │ ├── FlowNetFusion.py │ ├── FlowNetS.py │ ├── FlowNetSD.py │ └── __init__.py ├── flownet2_models.py ├── resample2d_package │ ├── __init__.py │ ├── resample2d.py │ ├── resample2d_cuda.cc │ ├── resample2d_cuda.egg-info │ │ ├── PKG-INFO │ │ ├── SOURCES.txt │ │ ├── dependency_links.txt │ │ └── top_level.txt │ ├── resample2d_kernel.cu │ ├── resample2d_kernel.cuh │ └── setup.py ├── spynet.py ├── spynet_models │ ├── README.md │ ├── convert2pth.py │ ├── modelL1_3-1-bias.pth.tar │ ├── modelL1_3-1-weight.pth.tar │ ├── modelL1_3-2-bias.pth.tar │ ├── modelL1_3-2-weight.pth.tar │ ├── modelL1_3-3-bias.pth.tar │ ├── modelL1_3-3-weight.pth.tar │ ├── modelL1_3-4-bias.pth.tar │ ├── modelL1_3-4-weight.pth.tar │ ├── modelL1_3-5-bias.pth.tar │ ├── modelL1_3-5-weight.pth.tar │ ├── modelL1_4-1-bias.pth.tar │ ├── modelL1_4-1-weight.pth.tar │ ├── modelL1_4-2-bias.pth.tar │ ├── modelL1_4-2-weight.pth.tar │ ├── modelL1_4-3-bias.pth.tar │ ├── modelL1_4-3-weight.pth.tar │ ├── modelL1_4-4-bias.pth.tar │ ├── modelL1_4-4-weight.pth.tar │ ├── modelL1_4-5-bias.pth.tar │ ├── modelL1_4-5-weight.pth.tar │ ├── modelL1_C-1-bias.pth.tar │ ├── modelL1_C-1-weight.pth.tar │ ├── modelL1_C-2-bias.pth.tar │ ├── modelL1_C-2-weight.pth.tar │ ├── modelL1_C-3-bias.pth.tar │ ├── modelL1_C-3-weight.pth.tar │ ├── modelL1_C-4-bias.pth.tar │ ├── modelL1_C-4-weight.pth.tar │ ├── modelL1_C-5-bias.pth.tar │ ├── modelL1_C-5-weight.pth.tar │ ├── modelL1_F-1-bias.pth.tar │ ├── modelL1_F-1-weight.pth.tar │ ├── modelL1_F-2-bias.pth.tar │ ├── modelL1_F-2-weight.pth.tar │ ├── modelL1_F-3-bias.pth.tar │ ├── modelL1_F-3-weight.pth.tar │ ├── modelL1_F-4-bias.pth.tar │ ├── modelL1_F-4-weight.pth.tar │ ├── modelL1_F-5-bias.pth.tar │ ├── modelL1_F-5-weight.pth.tar │ ├── modelL1_K-1-bias.pth.tar │ ├── modelL1_K-1-weight.pth.tar │ ├── modelL1_K-2-bias.pth.tar │ ├── modelL1_K-2-weight.pth.tar │ ├── modelL1_K-3-bias.pth.tar │ ├── modelL1_K-3-weight.pth.tar │ ├── modelL1_K-4-bias.pth.tar │ ├── modelL1_K-4-weight.pth.tar │ ├── modelL1_K-5-bias.pth.tar │ ├── modelL1_K-5-weight.pth.tar │ ├── modelL2_3-1-bias.pth.tar │ ├── modelL2_3-1-weight.pth.tar │ ├── modelL2_3-2-bias.pth.tar │ ├── modelL2_3-2-weight.pth.tar │ ├── modelL2_3-3-bias.pth.tar │ ├── modelL2_3-3-weight.pth.tar │ ├── modelL2_3-4-bias.pth.tar │ ├── modelL2_3-4-weight.pth.tar │ ├── modelL2_3-5-bias.pth.tar │ ├── modelL2_3-5-weight.pth.tar │ ├── modelL2_4-1-bias.pth.tar │ ├── modelL2_4-1-weight.pth.tar │ ├── modelL2_4-2-bias.pth.tar │ ├── modelL2_4-2-weight.pth.tar │ ├── modelL2_4-3-bias.pth.tar │ ├── modelL2_4-3-weight.pth.tar │ ├── modelL2_4-4-bias.pth.tar │ ├── modelL2_4-4-weight.pth.tar │ ├── modelL2_4-5-bias.pth.tar │ ├── modelL2_4-5-weight.pth.tar │ ├── modelL2_C-1-bias.pth.tar │ ├── modelL2_C-1-weight.pth.tar │ ├── modelL2_C-2-bias.pth.tar │ ├── modelL2_C-2-weight.pth.tar │ ├── modelL2_C-3-bias.pth.tar │ ├── modelL2_C-3-weight.pth.tar │ ├── modelL2_C-4-bias.pth.tar │ ├── modelL2_C-4-weight.pth.tar │ ├── modelL2_C-5-bias.pth.tar │ ├── modelL2_C-5-weight.pth.tar │ ├── modelL2_F-1-bias.pth.tar │ ├── modelL2_F-1-weight.pth.tar │ ├── modelL2_F-2-bias.pth.tar │ ├── modelL2_F-2-weight.pth.tar │ ├── modelL2_F-3-bias.pth.tar │ ├── modelL2_F-3-weight.pth.tar │ ├── modelL2_F-4-bias.pth.tar │ ├── modelL2_F-4-weight.pth.tar │ ├── modelL2_F-5-bias.pth.tar │ ├── modelL2_F-5-weight.pth.tar │ ├── modelL2_K-1-bias.pth.tar │ ├── modelL2_K-1-weight.pth.tar │ ├── modelL2_K-2-bias.pth.tar │ ├── modelL2_K-2-weight.pth.tar │ ├── modelL2_K-3-bias.pth.tar │ ├── modelL2_K-3-weight.pth.tar │ ├── modelL2_K-4-bias.pth.tar │ ├── modelL2_K-4-weight.pth.tar │ ├── modelL2_K-5-bias.pth.tar │ ├── modelL2_K-5-weight.pth.tar │ ├── modelL3_3-1-bias.pth.tar │ ├── modelL3_3-1-weight.pth.tar │ ├── modelL3_3-2-bias.pth.tar │ ├── modelL3_3-2-weight.pth.tar │ ├── modelL3_3-3-bias.pth.tar │ ├── modelL3_3-3-weight.pth.tar │ ├── modelL3_3-4-bias.pth.tar │ ├── modelL3_3-4-weight.pth.tar │ ├── modelL3_3-5-bias.pth.tar │ ├── modelL3_3-5-weight.pth.tar │ ├── modelL3_4-1-bias.pth.tar │ ├── modelL3_4-1-weight.pth.tar │ ├── modelL3_4-2-bias.pth.tar │ ├── modelL3_4-2-weight.pth.tar │ ├── modelL3_4-3-bias.pth.tar │ ├── modelL3_4-3-weight.pth.tar │ ├── modelL3_4-4-bias.pth.tar │ ├── modelL3_4-4-weight.pth.tar │ ├── modelL3_4-5-bias.pth.tar │ ├── modelL3_4-5-weight.pth.tar │ ├── modelL3_C-1-bias.pth.tar │ ├── modelL3_C-1-weight.pth.tar │ ├── modelL3_C-2-bias.pth.tar │ ├── modelL3_C-2-weight.pth.tar │ ├── modelL3_C-3-bias.pth.tar │ ├── modelL3_C-3-weight.pth.tar │ ├── modelL3_C-4-bias.pth.tar │ ├── modelL3_C-4-weight.pth.tar │ ├── modelL3_C-5-bias.pth.tar │ ├── modelL3_C-5-weight.pth.tar │ ├── modelL3_F-1-bias.pth.tar │ ├── modelL3_F-1-weight.pth.tar │ ├── modelL3_F-2-bias.pth.tar │ ├── modelL3_F-2-weight.pth.tar │ ├── modelL3_F-3-bias.pth.tar │ ├── modelL3_F-3-weight.pth.tar │ ├── modelL3_F-4-bias.pth.tar │ ├── modelL3_F-4-weight.pth.tar │ ├── modelL3_F-5-bias.pth.tar │ ├── modelL3_F-5-weight.pth.tar │ ├── modelL3_K-1-bias.pth.tar │ ├── modelL3_K-1-weight.pth.tar │ ├── modelL3_K-2-bias.pth.tar │ ├── modelL3_K-2-weight.pth.tar │ ├── modelL3_K-3-bias.pth.tar │ ├── modelL3_K-3-weight.pth.tar │ ├── modelL3_K-4-bias.pth.tar │ ├── modelL3_K-4-weight.pth.tar │ ├── modelL3_K-5-bias.pth.tar │ ├── modelL3_K-5-weight.pth.tar │ ├── modelL4_3-1-bias.pth.tar │ ├── modelL4_3-1-weight.pth.tar │ ├── modelL4_3-2-bias.pth.tar │ ├── modelL4_3-2-weight.pth.tar │ ├── modelL4_3-3-bias.pth.tar │ ├── modelL4_3-3-weight.pth.tar │ ├── modelL4_3-4-bias.pth.tar │ ├── modelL4_3-4-weight.pth.tar │ ├── modelL4_3-5-bias.pth.tar │ ├── modelL4_3-5-weight.pth.tar │ ├── modelL4_4-1-bias.pth.tar │ ├── modelL4_4-1-weight.pth.tar │ ├── modelL4_4-2-bias.pth.tar │ ├── modelL4_4-2-weight.pth.tar │ ├── modelL4_4-3-bias.pth.tar │ ├── modelL4_4-3-weight.pth.tar │ ├── modelL4_4-4-bias.pth.tar │ ├── modelL4_4-4-weight.pth.tar │ ├── modelL4_4-5-bias.pth.tar │ ├── modelL4_4-5-weight.pth.tar │ ├── modelL4_C-1-bias.pth.tar │ ├── modelL4_C-1-weight.pth.tar │ ├── modelL4_C-2-bias.pth.tar │ ├── modelL4_C-2-weight.pth.tar │ ├── modelL4_C-3-bias.pth.tar │ ├── modelL4_C-3-weight.pth.tar │ ├── modelL4_C-4-bias.pth.tar │ ├── modelL4_C-4-weight.pth.tar │ ├── modelL4_C-5-bias.pth.tar │ ├── modelL4_C-5-weight.pth.tar │ ├── modelL4_F-1-bias.pth.tar │ ├── modelL4_F-1-weight.pth.tar │ ├── modelL4_F-2-bias.pth.tar │ ├── modelL4_F-2-weight.pth.tar │ ├── modelL4_F-3-bias.pth.tar │ ├── modelL4_F-3-weight.pth.tar │ ├── modelL4_F-4-bias.pth.tar │ ├── modelL4_F-4-weight.pth.tar │ ├── modelL4_F-5-bias.pth.tar │ ├── modelL4_F-5-weight.pth.tar │ ├── modelL4_K-1-bias.pth.tar │ ├── modelL4_K-1-weight.pth.tar │ ├── modelL4_K-2-bias.pth.tar │ ├── modelL4_K-2-weight.pth.tar │ ├── modelL4_K-3-bias.pth.tar │ ├── modelL4_K-3-weight.pth.tar │ ├── modelL4_K-4-bias.pth.tar │ ├── modelL4_K-4-weight.pth.tar │ ├── modelL4_K-5-bias.pth.tar │ ├── modelL4_K-5-weight.pth.tar │ ├── modelL5_3-1-bias.pth.tar │ ├── modelL5_3-1-weight.pth.tar │ ├── modelL5_3-2-bias.pth.tar │ ├── modelL5_3-2-weight.pth.tar │ ├── modelL5_3-3-bias.pth.tar │ ├── modelL5_3-3-weight.pth.tar │ ├── modelL5_3-4-bias.pth.tar │ ├── modelL5_3-4-weight.pth.tar │ ├── modelL5_3-5-bias.pth.tar │ ├── modelL5_3-5-weight.pth.tar │ ├── modelL5_4-1-bias.pth.tar │ ├── modelL5_4-1-weight.pth.tar │ ├── modelL5_4-2-bias.pth.tar │ ├── modelL5_4-2-weight.pth.tar │ ├── modelL5_4-3-bias.pth.tar │ ├── modelL5_4-3-weight.pth.tar │ ├── modelL5_4-4-bias.pth.tar │ ├── modelL5_4-4-weight.pth.tar │ ├── modelL5_4-5-bias.pth.tar │ ├── modelL5_4-5-weight.pth.tar │ ├── modelL5_C-1-bias.pth.tar │ ├── modelL5_C-1-weight.pth.tar │ ├── modelL5_C-2-bias.pth.tar │ ├── modelL5_C-2-weight.pth.tar │ ├── modelL5_C-3-bias.pth.tar │ ├── modelL5_C-3-weight.pth.tar │ ├── modelL5_C-4-bias.pth.tar │ ├── modelL5_C-4-weight.pth.tar │ ├── modelL5_C-5-bias.pth.tar │ ├── modelL5_C-5-weight.pth.tar │ ├── modelL5_F-1-bias.pth.tar │ ├── modelL5_F-1-weight.pth.tar │ ├── modelL5_F-2-bias.pth.tar │ ├── modelL5_F-2-weight.pth.tar │ ├── modelL5_F-3-bias.pth.tar │ ├── modelL5_F-3-weight.pth.tar │ ├── modelL5_F-4-bias.pth.tar │ ├── modelL5_F-4-weight.pth.tar │ ├── modelL5_F-5-bias.pth.tar │ ├── modelL5_F-5-weight.pth.tar │ ├── modelL5_K-1-bias.pth.tar │ ├── modelL5_K-1-weight.pth.tar │ ├── modelL5_K-2-bias.pth.tar │ ├── modelL5_K-2-weight.pth.tar │ ├── modelL5_K-3-bias.pth.tar │ ├── modelL5_K-3-weight.pth.tar │ ├── modelL5_K-4-bias.pth.tar │ ├── modelL5_K-4-weight.pth.tar │ ├── modelL5_K-5-bias.pth.tar │ ├── modelL5_K-5-weight.pth.tar │ ├── modelL6_C-1-bias.pth.tar │ ├── modelL6_C-1-weight.pth.tar │ ├── modelL6_C-2-bias.pth.tar │ ├── modelL6_C-2-weight.pth.tar │ ├── modelL6_C-3-bias.pth.tar │ ├── modelL6_C-3-weight.pth.tar │ ├── modelL6_C-4-bias.pth.tar │ ├── modelL6_C-4-weight.pth.tar │ ├── modelL6_C-5-bias.pth.tar │ ├── modelL6_C-5-weight.pth.tar │ ├── modelL6_F-1-bias.pth.tar │ ├── modelL6_F-1-weight.pth.tar │ ├── modelL6_F-2-bias.pth.tar │ ├── modelL6_F-2-weight.pth.tar │ ├── modelL6_F-3-bias.pth.tar │ ├── modelL6_F-3-weight.pth.tar │ ├── modelL6_F-4-bias.pth.tar │ ├── modelL6_F-4-weight.pth.tar │ ├── modelL6_F-5-bias.pth.tar │ ├── modelL6_F-5-weight.pth.tar │ ├── modelL6_K-1-bias.pth.tar │ ├── modelL6_K-1-weight.pth.tar │ ├── modelL6_K-2-bias.pth.tar │ ├── modelL6_K-2-weight.pth.tar │ ├── modelL6_K-3-bias.pth.tar │ ├── modelL6_K-3-weight.pth.tar │ ├── modelL6_K-4-bias.pth.tar │ ├── modelL6_K-4-weight.pth.tar │ ├── modelL6_K-5-bias.pth.tar │ └── modelL6_K-5-weight.pth.tar └── submodules.py ├── patches ├── Upatch1.png └── Upatch2.png ├── print_patch.py ├── raw.py ├── requirements.txt ├── test_patch.py └── utils.py /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019 Anurag Ranjan, Joel Janai and Max-Planck-Gesellschaft 2 | 3 | Software Copyright License for non-commercial scientific research purposes 4 | Please read carefully the following terms and conditions and any accompanying documentation before you download and/or use the software, (the "Software"). By downloading and/or using the Software, you acknowledge that you have read these terms and conditions, understand them, and agree to be bound by them. If you do not agree with these terms and conditions, you must not download and/or use the Software. Any infringement of the terms of this agreement will automatically terminate your rights under this License. 5 | 6 | Ownership / Licensees 7 | The Software and the associated materials has been developed at the Max Planck Institute for Intelligent Systems (hereinafter "MPI"). Any copyright or patent right is owned by and proprietary material of the Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (hereinafter “MPG”; MPI and MPG hereinafter collectively “Max-Planck”) hereinafter the “Licensor”. 8 | 9 | License Grant 10 | Licensor grants you (Licensee) personally a single-user, non-exclusive, non-transferable, free of charge right: 11 | 12 | To install the Software on computers owned, leased or otherwise controlled by you and/or your organization; 13 | To use the Software and Data for the sole purpose of performing non-commercial scientific research, non-commercial education, or non-commercial artistic projects; 14 | To modify, adapt, translate or create derivative works based upon the software and data. 15 | Any other use, in particular any use for commercial purposes, is prohibited. This includes, without limitation, incorporation in a commercial product, use in a commercial service, or production of other artefacts for commercial purposes. The Data and Software may not be reproduced, modified and/or made available in any form to any third party without Max-Planck’s prior written permission. 16 | 17 | The Software may not be used for pornographic purposes or to generate pornographic material whether commercial or not. This license also prohibits the use of the Data and Software to train methods/algorithms/neural networks/etc. for commercial use of any kind. By downloading the Data and Software, you agree not to reverse engineer it. 18 | 19 | No Distribution 20 | The Data and Software and the license herein granted shall not be copied, shared, distributed, re-sold, offered for re-sale, transferred or sub-licensed in whole or in part except that you may make one copy for archive purposes only. 21 | 22 | Disclaimer of Representations and Warranties 23 | You expressly acknowledge and agree that the Data and Software results from basic research, is provided “AS IS”, may contain errors, and that any use of the Data and Software is at your sole risk. LICENSOR MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE DATA and SOFTWARE, NEITHER EXPRESS NOR IMPLIED, AND THE ABSENCE OF ANY LEGAL OR ACTUAL DEFECTS, WHETHER DISCOVERABLE OR NOT. Specifically, and not to limit the foregoing, licensor makes no representations or warranties (i) regarding the merchantability or fitness for a particular purpose of the Data and Software, (ii) that the use of the Data and Software will not infringe any patents, copyrights or other intellectual property rights of a third party, and (iii) that the use of the Data and Software will not cause any damage of any kind to you or a third party. 24 | 25 | Limitation of Liability 26 | Because this Data and Software License Agreement qualifies as a donation, according to Section 521 of the German Civil Code (Bürgerliches Gesetzbuch – BGB) Licensor as a donor is liable for intent and gross negligence only. If the Licensor fraudulently conceals a legal or material defect, they are obliged to compensate the Licensee for the resulting damage. Licensor shall be liable for loss of data only up to the amount of typical recovery costs which would have arisen had proper and regular data backup measures been taken. For the avoidance of doubt Licensor shall be liable in accordance with the German Product Liability Act in the event of product liability. The foregoing applies also to Licensor’s legal representatives or assistants in performance. Any further liability shall be excluded. 27 | 28 | Patent claims generated through the usage of the software cannot be directed towards the copyright holders. The data and software is provided in the state of development the licensor defines. If modified or extended by Licensee, the Licensor makes no claims about the fitness of the software and is not responsible for any problems such modifications cause. 29 | 30 | No Maintenance Services 31 | You understand and agree that Licensor is under no obligation to provide either maintenance services, update services, notices of latent defects, or corrections of defects with regard to the Data and Software. Licensor nevertheless reserves the right to update, modify, or discontinue the Software at any time. 32 | 33 | Defects of the software must be notified in writing to the Licensor with a comprehensible description of the error symptoms. The notification of the defect should enable the reproduction of the error. The Licensee is encouraged to communicate any use, results, modification or publication. 34 | 35 | Publications using the Software 36 | You acknowledge that the Data and Software is a valuable scientific resource and agree to appropriately reference the following paper in any publication making use of the Data and/or Software. 37 | 38 | Citation: 39 | 40 | @inproceedings{ranjan2019attacking, 41 | title = {Attacking Optical Flow}, 42 | author = {Ranjan, Anurag and Joel Janai and Andreas Geiger and Michael J. Black}, 43 | booktitle = {International Conference on Computer Vision (ICCV)}, 44 | year = {2019}, 45 | url = {http://flowattack.is.tue.mpg.de/} 46 | } 47 | 48 | Commercial Licensing Opportunities 49 | For commercial uses of the Data and Software, please send email to ps-license@tue.mpg.de 50 | This Agreement shall be governed by the laws of the Federal Republic of Germany except for the UN Sales Convention. 51 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Flow Attack 2 | 3 | This is an official repository of 4 | 5 | *Anurag Ranjan, Joel Janai, Andreas Geiger, Michael J. Black*. **Attacking Optical Flow.** ICCV 2019. 6 | 7 | [[Project Page](http://flowattack.is.tue.mpg.de/)] [[Arxiv](arxiv.org)] 8 | 9 | ### Known Issues 10 | - To obtain the batch, use the learning rate of `1e3` and `1e4`. For each learning rate, run at least five different trials for 30 epochs. 11 | - The best patch for FlowNetC was obtained with LR of `1e3` and for FlowNet2 was obtained with LR of `1e4`. 12 | 13 | ## Prerequisites 14 | Python3 and pytorch are required. Third party libraries can be installed (in a `python3 ` virtualenv) using: 15 | 16 | ```bash 17 | pip3 install -r requirements.txt 18 | ``` 19 | Install custom cuda layers for FlowNet2 using 20 | 21 | ```bash 22 | bash install_flownet2_deps.sh 23 | ``` 24 | ### Preparing training data 25 | 26 | Download the [KITTI](http://www.cvlibs.net/datasets/kitti/raw_data.php) dataset using this [script](http://www.cvlibs.net/download.php?file=raw_data_downloader.zip) provided on the official website, and then run the following command. 27 | 28 | ```bash 29 | python3 data/prepare_train_data.py /path/to/raw/kitti/dataset/ --dataset-format 'kitti' --dump-root /path/to/resulting/formatted/data/ --width 1280 --height 384 --num-threads 1 --with-gt 30 | ``` 31 | 32 | For testing optical flow ground truths on KITTI, download [KITTI2015](http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow) dataset. 33 | 34 | ### Pretrained Models 35 | Download the pretrained models for [FlowNetC](https://drive.google.com/file/d/1BFT6b7KgKJC8rA59RmOVAXRM_S7aSfKE/view), [FlowNet2](https://drive.google.com/file/d/1hF8vS6YeHkx3j2pfCeQqqZGwA_PJq_Da/view), [PWC-Net](https://github.com/NVlabs/PWC-Net/blob/master/PyTorch/pwc_net_chairs.pth.tar). The pretrained models for SPyNet and Back2Future are provided with this repository. 36 | 37 | ## Generating Adversarial Patches 38 | 39 | ### White-Box Attacks 40 | In the White-Box attacks we optimize a patch for a single network. We use gradient descent as described in the paper. 41 | Use the following command to generate an adversarial patch for a specific network architecture using the prepared dataset: 42 | 43 | ```bash 44 | python3 main.py --data [Path to prepared dataset] --kitti-data [Path to KITTI 2015 test set] --flownet [FlowNetS|FlowNetC|FlowNet2|PWCNet|Back2Future|SpyNet] --patch-size 0.10 --name [Name of the experiment] 45 | ``` 46 | 47 | The patch size is specified in percentage of the training image size (default: 256). 48 | All other arguments such as the learning rate, epoch size, etc are set to the values used in our experiments. For details please check main.py 49 | 50 | ## Acknowledgements 51 | - We thank several github users for their contributions which are used in this repository. 52 | - The code for generating randomized patches and augmentation comes from [jhayes14/adversarial-patch](https://github.com/jhayes14/adversarial-patch). 53 | - Data preprocessing and KITTI dataloaders code is taken from [ClementPinard/SfmLearner-Pytorch/](https://github.com/ClementPinard/SfmLearner-Pytorch/). Optical flow evalution code is taken from [anuragranj/cc](https://github.com/anuragranj/cc). 54 | - FlowNet and FlowNet2 models have been taken from [NVIDIA/flownet2-pytorch](https://github.com/NVIDIA/flownet2-pytorch). PWCNet is taken from [NVlabs/PWC-Net](https://github.com/NVlabs/PWC-Net). 55 | - SPyNet implementation is taken from [sniklaus/pytorch-spynet](https://github.com/sniklaus/pytorch-spynet). 56 | - Back2Future implementation is taken from [anuragranj/back2future.pytorch](https://github.com/anuragranj/back2future.pytorch). 57 | - Correlation module is taken from [ClementPinard/Pytorch-Correlation-extension](https://github.com/ClementPinard/Pytorch-Correlation-extension). 58 | -------------------------------------------------------------------------------- /custom_criteria.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torch.autograd import Function,Variable 4 | 5 | #import ipdb 6 | 7 | class GemanMcclureLoss(Function): 8 | """ 9 | Define Loss based on Geman-Mcclure 10 | """ 11 | @staticmethod 12 | def forward(ctx, input, target): 13 | sigma = 0.1 14 | x = input-target 15 | ctx.saved_variable = (x,sigma) 16 | # ipdb.set_trace() 17 | return input.new([(x**2 / (x**2 + sigma**2)).sum() / x.nelement()]) 18 | # return (x**2 / (x**2 + sigma**2)).sum() / x.nelement() 19 | 20 | @staticmethod 21 | def backward(ctx, grad_output=None): 22 | x,sigma = ctx.saved_variable 23 | 24 | # import ipdb;ipdb.set_trace() 25 | grad = Variable(2*x*sigma**2 / ((x**2 + sigma**2)**2) / x.nelement()) 26 | 27 | return grad*grad_output,None 28 | 29 | 30 | class AdaptiveGemanMcclureLoss(Function): 31 | """ 32 | Define Loss based on Geman-Mcclure 33 | """ 34 | @staticmethod 35 | def forward(ctx, input, target): 36 | _mad = lambda x : (x - x.median()).abs().median() 37 | x = input-target 38 | sigma = 1.4826 * _mad(x) 39 | ctx.saved_variable = (x,sigma) 40 | # ipdb.set_trace() 41 | return input.new([(x**2 / (x**2 + sigma**2)).sum() / x.nelement()]) 42 | # return (x**2 / (x**2 + sigma**2)).sum() / x.nelement() 43 | 44 | @staticmethod 45 | def backward(ctx, grad_output=None): 46 | x,sigma = ctx.saved_variable 47 | 48 | # import ipdb;ipdb.set_trace() 49 | grad = Variable(2*x*sigma**2 / ((x**2 + sigma**2)**2) / x.nelement()) 50 | 51 | return grad*grad_output,None 52 | 53 | 54 | class EPELoss(Function): 55 | """ 56 | Loss based on average endpoint error 57 | """ 58 | @staticmethod 59 | def forward(ctx, input, target): 60 | x = input-target 61 | df = (x**2).sum(dim=1,keepdim=True) 62 | ctx.saved_variable = (x,df) 63 | return input.new( [ df.sqrt().sum() / (x.nelement()/2.0) ] ) 64 | 65 | @staticmethod 66 | def backward(ctx, grad_output=None): 67 | x,df = ctx.saved_variable 68 | df_stacked = torch.cat((df,df),dim=1) 69 | grad = Variable( x * df_stacked.rsqrt() / (x.nelement()/2.0) ) 70 | return grad*grad_output, None 71 | 72 | 73 | 74 | 75 | def main(): 76 | # Test GemanMcclure criterion 77 | from torch.autograd import gradcheck 78 | 79 | # gradchek takes a tuple of tensor as input, check if your gradient 80 | # evaluated with these tensors are close enough to numerical 81 | # approximations and returns True if they all verify this condition. 82 | input = (Variable(torch.randn(20,2).double(), requires_grad=True), Variable(torch.randn(20,2).double(), requires_grad=False),) 83 | 84 | # Test manually 85 | loss = GemanMcclureLoss.apply(input[0],input[1]) 86 | print('=== Loss ===') 87 | print(loss) 88 | loss.backward() 89 | print('=== Grad ===') 90 | print(input[0].grad) 91 | 92 | test = gradcheck(GemanMcclureLoss.apply, input, eps=1e-6, atol=1e-4, raise_exception=True) 93 | print(test) 94 | 95 | # Gradcheck for EPELoss 96 | input = (Variable(torch.randn(3,2,10,10).double(), requires_grad=True), 97 | Variable(torch.randn(3,2,10,10).double(), requires_grad=False)) 98 | test = gradcheck(EPELoss.apply, input, eps=1e-6, atol=1e-4, raise_exception=True) 99 | print(test) 100 | 101 | 102 | 103 | if __name__ == '__main__': 104 | main() 105 | 106 | -------------------------------------------------------------------------------- /custom_transforms.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import torch 3 | import random 4 | import numpy as np 5 | from utils import imresize 6 | 7 | '''Set of tranform random routines that takes list of inputs as arguments, 8 | in order to have random but coherent transformations.''' 9 | 10 | 11 | class Compose(object): 12 | def __init__(self, transforms): 13 | self.transforms = transforms 14 | 15 | def __call__(self, images): 16 | for t in self.transforms: 17 | images = t(images) 18 | return images 19 | 20 | 21 | class Normalize(object): 22 | def __init__(self, mean, std): 23 | self.mean = mean 24 | self.std = std 25 | 26 | def __call__(self, images): 27 | for tensor in images: 28 | for t, m, s in zip(tensor, self.mean, self.std): 29 | t.sub_(m).div_(s) 30 | return images 31 | 32 | 33 | class ArrayToTensor(object): 34 | """Converts a list of numpy.ndarray (H x W x C) along with a intrinsics matrix to a list of torch.FloatTensor of shape (C x H x W) with a intrinsics tensor.""" 35 | 36 | def __call__(self, images): 37 | tensors = [] 38 | for im in images: 39 | # put it from HWC to CHW format 40 | im = np.transpose(im, (2, 0, 1)) 41 | # handle numpy array 42 | tensors.append(torch.from_numpy(im).float()/255) 43 | return tensors 44 | 45 | 46 | class RandomHorizontalFlip(object): 47 | """Randomly horizontally flips the given numpy array with a probability of 0.5""" 48 | 49 | def __call__(self, images): 50 | if random.random() < 0.5: 51 | output_images = [np.copy(np.fliplr(im)) for im in images] 52 | w = output_images[0].shape[1] 53 | else: 54 | output_images = images 55 | return output_images 56 | 57 | 58 | class RandomScaleCrop(object): 59 | """Randomly zooms images up to 15% and crop them to a particular size""" 60 | def __init__(self, h, w): 61 | self.h = h 62 | self.w = w 63 | 64 | def __call__(self, images): 65 | in_h, in_w, _ = images[0].shape 66 | x_scaling, y_scaling = np.random.uniform(1,1.15,2) 67 | scaled_h, scaled_w = int(in_h * y_scaling), int(in_w * x_scaling) 68 | scaled_images = [imresize(im, (scaled_h, scaled_w)) for im in images] 69 | offset_y = np.random.randint(scaled_h - self.h + 1) 70 | offset_x = np.random.randint(scaled_w - self.w + 1) 71 | cropped_images = [im[offset_y:offset_y + self.h, offset_x:offset_x + self.w] for im in scaled_images] 72 | return cropped_images 73 | 74 | class RandomCrop(object): 75 | """Randomly zooms images up to 15% and crop them to a particular size""" 76 | def __init__(self, h, w): 77 | self.h = h 78 | self.w = w 79 | 80 | def __call__(self, images): 81 | in_h, in_w, _ = images[0].shape 82 | offset_y = np.random.randint(in_h - self.h + 1) 83 | offset_x = np.random.randint(in_w - self.w + 1) 84 | cropped_images = [im[offset_y:offset_y + self.h, offset_x:offset_x + self.w] for im in images] 85 | return cropped_images 86 | 87 | class Scale(object): 88 | """Scales images to a particular size""" 89 | def __init__(self, h, w): 90 | self.h = h 91 | self.w = w 92 | 93 | def __call__(self, images): 94 | 95 | in_h, in_w, _ = images[0].shape 96 | scaled_h, scaled_w = self.h , self.w 97 | 98 | scaled_images = [imresize(im, (scaled_h, scaled_w)) for im in images] 99 | 100 | return scaled_images 101 | -------------------------------------------------------------------------------- /data/kitti_raw_loader.py: -------------------------------------------------------------------------------- 1 | # Taken from https://github.com/ClementPinard/SfmLearner-Pytorch/ 2 | import numpy as np 3 | from path import Path 4 | from collections import Counter 5 | from PIL import Image 6 | 7 | def imresize(arr, sz): 8 | height, width = sz 9 | return np.array(Image.fromarray(arr).resize((width, height), resample=Image.BILINEAR)) 10 | 11 | class KittiRawLoader(object): 12 | def __init__(self, 13 | dataset_dir, 14 | static_frames_file=None, 15 | img_height=128, 16 | img_width=416, 17 | min_speed=2, 18 | get_gt=False): 19 | self.from_speed = static_frames_file is None 20 | if static_frames_file is not None: 21 | static_frames_file = Path(static_frames_file) 22 | self.collect_static_frames(static_frames_file) 23 | 24 | self.dataset_dir = Path(dataset_dir) 25 | self.img_height = img_height 26 | self.img_width = img_width 27 | self.cam_ids = ['02', '03'] 28 | self.date_list = ['2011_09_26', '2011_09_28', '2011_09_29', '2011_09_30', '2011_10_03'] 29 | self.min_speed = min_speed 30 | self.get_gt = get_gt 31 | self.collect_train_folders() 32 | 33 | def collect_static_frames(self, static_frames_file): 34 | with open(static_frames_file, 'r') as f: 35 | frames = f.readlines() 36 | self.static_frames = {} 37 | for fr in frames: 38 | if fr == '\n': 39 | continue 40 | date, drive, frame_id = fr.split(' ') 41 | curr_fid = '%.10d' % (np.int(frame_id[:-1])) 42 | if drive not in self.static_frames.keys(): 43 | self.static_frames[drive] = [] 44 | self.static_frames[drive].append(curr_fid) 45 | 46 | def collect_train_folders(self): 47 | self.scenes = [] 48 | for date in self.date_list: 49 | drive_set = (self.dataset_dir/date).dirs() 50 | for dr in drive_set: 51 | self.scenes.append(dr) 52 | 53 | def collect_scenes(self, drive): 54 | train_scenes = [] 55 | for c in self.cam_ids: 56 | oxts = sorted((drive/'oxts'/'data').files('*.txt')) 57 | scene_data = {'cid': c, 'dir': drive, 'speed': [], 'frame_id': [], 'rel_path': drive.name + '_' + c} 58 | for n, f in enumerate(oxts): 59 | metadata = np.genfromtxt(f) 60 | speed = metadata[8:11] 61 | scene_data['speed'].append(speed) 62 | scene_data['frame_id'].append('{:010d}'.format(n)) 63 | sample = self.load_image(scene_data, 0) 64 | if sample is None: 65 | return [] 66 | scene_data['P_rect'] = self.get_P_rect(scene_data, sample[1], sample[2]) 67 | scene_data['intrinsics'] = scene_data['P_rect'][:,:3] 68 | 69 | train_scenes.append(scene_data) 70 | return train_scenes 71 | 72 | def get_scene_imgs(self, scene_data): 73 | def construct_sample(scene_data, i, frame_id): 74 | sample = [self.load_image(scene_data, i)[0], frame_id] 75 | if self.get_gt: 76 | sample.append(self.generate_depth_map(scene_data, i)) 77 | return sample 78 | 79 | if self.from_speed: 80 | cum_speed = np.zeros(3) 81 | for i, speed in enumerate(scene_data['speed']): 82 | cum_speed += speed 83 | speed_mag = np.linalg.norm(cum_speed) 84 | if speed_mag > self.min_speed: 85 | frame_id = scene_data['frame_id'][i] 86 | yield construct_sample(scene_data, i, frame_id) 87 | cum_speed *= 0 88 | else: # from static frame file 89 | drive = str(scene_data['dir'].name) 90 | for (i,frame_id) in enumerate(scene_data['frame_id']): 91 | if (drive not in self.static_frames.keys()) or (frame_id not in self.static_frames[drive]): 92 | yield construct_sample(scene_data, i, frame_id) 93 | 94 | def get_P_rect(self, scene_data, zoom_x, zoom_y): 95 | #print(zoom_x, zoom_y) 96 | calib_file = scene_data['dir'].parent/'calib_cam_to_cam.txt' 97 | 98 | filedata = self.read_raw_calib_file(calib_file) 99 | P_rect = np.reshape(filedata['P_rect_' + scene_data['cid']], (3, 4)) 100 | P_rect[0] *= zoom_x 101 | P_rect[1] *= zoom_y 102 | return P_rect 103 | 104 | def load_image(self, scene_data, tgt_idx): 105 | img_file = scene_data['dir']/'image_{}'.format(scene_data['cid'])/'data'/scene_data['frame_id'][tgt_idx]+'.png' 106 | if not img_file.isfile(): 107 | return None 108 | img = np.array(Image.open(img_file)) 109 | zoom_y = self.img_height/img.shape[0] 110 | zoom_x = self.img_width/img.shape[1] 111 | img = imresize(img, (self.img_height, self.img_width)) 112 | return img, zoom_x, zoom_y 113 | 114 | def read_raw_calib_file(self, filepath): 115 | # From https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py 116 | """Read in a calibration file and parse into a dictionary.""" 117 | data = {} 118 | 119 | with open(filepath, 'r') as f: 120 | for line in f.readlines(): 121 | key, value = line.split(':', 1) 122 | # The only non-float values in these files are dates, which 123 | # we don't care about anyway 124 | try: 125 | data[key] = np.array([float(x) for x in value.split()]) 126 | except ValueError: 127 | pass 128 | return data 129 | -------------------------------------------------------------------------------- /data/prepare_train_data.py: -------------------------------------------------------------------------------- 1 | # Taken from https://github.com/ClementPinard/SfmLearner-Pytorch/ 2 | 3 | from __future__ import division 4 | import argparse 5 | from PIL import Image 6 | import numpy as np 7 | from joblib import Parallel, delayed 8 | from tqdm import tqdm 9 | from path import Path 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument("dataset_dir", metavar='DIR', 13 | help='path to original dataset') 14 | parser.add_argument("--dataset-format", type=str, required=True, choices=["kitti", "cityscapes"]) 15 | parser.add_argument("--static-frames", default=None, 16 | help="list of imgs to discard for being static, if not set will discard them based on speed \ 17 | (careful, on KITTI some frames have incorrect speed)") 18 | parser.add_argument("--with-gt", action='store_true', 19 | help="If available (e.g. with KITTI), will store ground truth along with images, for validation") 20 | parser.add_argument("--dump-root", type=str, required=True, help="Where to dump the data") 21 | parser.add_argument("--height", type=int, default=128, help="image height") 22 | parser.add_argument("--width", type=int, default=416, help="image width") 23 | parser.add_argument("--num-threads", type=int, default=4, help="number of threads to use") 24 | 25 | args = parser.parse_args() 26 | 27 | 28 | def dump_example(scene): 29 | scene_list = data_loader.collect_scenes(scene) 30 | for scene_data in scene_list: 31 | dump_dir = args.dump_root/scene_data['rel_path'] 32 | dump_dir.makedirs_p() 33 | intrinsics = scene_data['intrinsics'] 34 | fx = intrinsics[0, 0] 35 | fy = intrinsics[1, 1] 36 | cx = intrinsics[0, 2] 37 | cy = intrinsics[1, 2] 38 | 39 | dump_cam_file = dump_dir/'cam.txt' 40 | with open(dump_cam_file, 'w') as f: 41 | f.write('%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy)) 42 | 43 | for sample in data_loader.get_scene_imgs(scene_data): 44 | assert(len(sample) >= 2) 45 | img, frame_nb = sample[0], sample[1] 46 | dump_img_file = dump_dir/'{}.jpg'.format(frame_nb) 47 | Image.fromarray(img).save(dump_img_file) 48 | if len(sample) == 3: 49 | dump_depth_file = dump_dir/'{}.npy'.format(frame_nb) 50 | np.save(dump_depth_file, sample[2]) 51 | 52 | if len(dump_dir.files('*.jpg')) < 3: 53 | dump_dir.rmtree() 54 | 55 | 56 | def main(): 57 | args.dump_root = Path(args.dump_root) 58 | args.dump_root.mkdir_p() 59 | 60 | global data_loader 61 | 62 | if args.dataset_format == 'kitti': 63 | from kitti_raw_loader import KittiRawLoader 64 | data_loader = KittiRawLoader(args.dataset_dir, 65 | static_frames_file=args.static_frames, 66 | img_height=args.height, 67 | img_width=args.width, 68 | get_gt=args.with_gt) 69 | 70 | if args.dataset_format == 'cityscapes': 71 | from cityscapes_loader import cityscapes_loader 72 | data_loader = cityscapes_loader(args.dataset_dir, 73 | img_height=args.height, 74 | img_width=args.width) 75 | 76 | print('Retrieving frames') 77 | Parallel(n_jobs=args.num_threads)(delayed(dump_example)(scene) for scene in tqdm(data_loader.scenes)) 78 | # Split into train/val 79 | print('Generating train val lists') 80 | np.random.seed(8964) 81 | subfolders = args.dump_root.dirs() 82 | with open(args.dump_root / 'train.txt', 'w') as tf: 83 | with open(args.dump_root / 'val.txt', 'w') as vf: 84 | for s in tqdm(subfolders): 85 | if np.random.random() < 0.1: 86 | vf.write('{}\n'.format(s.name)) 87 | else: 88 | tf.write('{}\n'.format(s.name)) 89 | # remove useless groundtruth data for training comment if you don't want to erase it 90 | for gt_file in s.files('*.npy'): 91 | gt_file.remove_p() 92 | 93 | 94 | if __name__ == '__main__': 95 | main() 96 | -------------------------------------------------------------------------------- /dataloader/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/dataloader/__init__.py -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/datasets/__init__.py -------------------------------------------------------------------------------- /datasets/sequence_folders.py: -------------------------------------------------------------------------------- 1 | # Taken from https://github.com/ClementPinard/SfmLearner-Pytorch/ 2 | 3 | import torch.utils.data as data 4 | import numpy as np 5 | from PIL import Image 6 | from path import Path 7 | import random 8 | 9 | 10 | def crawl_folders(folders_list, sequence_length): 11 | sequence_set = [] 12 | demi_length = (sequence_length-1)//2 13 | for folder in folders_list: 14 | intrinsics = np.genfromtxt(folder/'cam.txt', delimiter=',').astype(np.float32).reshape((3, 3)) 15 | imgs = sorted(folder.files('*.jpg')) 16 | if len(imgs) < sequence_length: 17 | continue 18 | for i in range(demi_length, len(imgs)-demi_length): 19 | sample = {'intrinsics': intrinsics, 'tgt': imgs[i], 'ref_imgs': []} 20 | for j in range(-demi_length, demi_length + 1): 21 | if j != 0: 22 | sample['ref_imgs'].append(imgs[i+j]) 23 | sequence_set.append(sample) 24 | random.shuffle(sequence_set) 25 | return sequence_set 26 | 27 | 28 | def load_as_float(path): 29 | return np.array(Image.open(path)).astype(np.float32) 30 | 31 | 32 | class SequenceFolder(data.Dataset): 33 | """A sequence data loader where the files are arranged in this way: 34 | root/scene_1/0000000.jpg 35 | root/scene_1/0000001.jpg 36 | .. 37 | root/scene_1/cam.txt 38 | root/scene_2/0000000.jpg 39 | . 40 | transform functions must take in a list a images and a numpy array (usually intrinsics matrix) 41 | """ 42 | 43 | def __init__(self, root, seed=None, train=True, sequence_length=3, transform=None, target_transform=None): 44 | np.random.seed(seed) 45 | random.seed(seed) 46 | self.root = Path(root) 47 | scene_list_path = self.root/'train.txt' if train else self.root/'val.txt' 48 | self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)] 49 | self.samples = crawl_folders(self.scenes, sequence_length) 50 | self.transform = transform 51 | 52 | def __getitem__(self, index): 53 | sample = self.samples[index] 54 | tgt_img = load_as_float(sample['tgt']) 55 | ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']] 56 | if self.transform is not None: 57 | imgs = self.transform([tgt_img] + ref_imgs) 58 | tgt_img = imgs[0] 59 | ref_img = imgs[1:] 60 | 61 | return tgt_img, ref_img 62 | 63 | def __len__(self): 64 | return len(self.samples) 65 | -------------------------------------------------------------------------------- /datasets/validation_folders.py: -------------------------------------------------------------------------------- 1 | # Adapted from https://github.com/ClementPinard/SfmLearner-Pytorch/ 2 | 3 | import torch.utils.data as data 4 | import numpy as np 5 | from scipy.misc import imread 6 | from path import Path 7 | import torch 8 | 9 | 10 | def crawl_folders(folders_list): 11 | imgs = [] 12 | depth = [] 13 | for folder in folders_list: 14 | current_imgs = sorted(folder.files('*.jpg')) 15 | current_depth = [] 16 | for img in current_imgs: 17 | d = img.dirname()/(img.name[:-4] + '.npy') 18 | assert(d.isfile()), "depth file {} not found".format(str(d)) 19 | depth.append(d) 20 | imgs.extend(current_imgs) 21 | depth.extend(current_depth) 22 | return imgs, depth 23 | 24 | def crawl_folders_pair(folders_list): 25 | imgs1 = [] 26 | imgs2 = [] 27 | depth = [] 28 | for folder in folders_list: 29 | current_imgs = sorted(folder.files('*.jpg')) 30 | current_imgs1 = current_imgs[:-1] 31 | current_imgs2 = current_imgs[1:] 32 | current_depth = [] 33 | for (img1,img2) in zip(current_imgs1, current_imgs2): 34 | d = img1.dirname()/(img1.name[:-4] + '.npy') 35 | assert(d.isfile()), "depth file {} not found".format(str(d)) 36 | depth.append(d) 37 | imgs1.extend(current_imgs1) 38 | imgs2.extend(current_imgs2) 39 | depth.extend(current_depth) 40 | return imgs1, imgs2, depth 41 | 42 | 43 | def load_as_float(path): 44 | return imread(path).astype(np.float32) 45 | 46 | 47 | class ValidationSet(data.Dataset): 48 | """A sequence data loader where the files are arranged in this way: 49 | root/scene_1/0000000.jpg 50 | root/scene_1/0000000.npy 51 | root/scene_1/0000001.jpg 52 | root/scene_1/0000001.npy 53 | .. 54 | root/scene_2/0000000.jpg 55 | root/scene_2/0000000.npy 56 | . 57 | 58 | transform functions must take in a list a images and a numpy array which can be None 59 | """ 60 | 61 | def __init__(self, root, transform=None): 62 | self.root = Path(root) 63 | scene_list_path = self.root/'val.txt' 64 | self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)] 65 | self.imgs, self.depth = crawl_folders(self.scenes) 66 | self.transform = transform 67 | 68 | def __getitem__(self, index): 69 | img = load_as_float(self.imgs[index]) 70 | depth = np.load(self.depth[index]).astype(np.float32) 71 | if self.transform is not None: 72 | img, _ = self.transform([img], None) 73 | img = img[0] 74 | return img, depth 75 | 76 | def __len__(self): 77 | return len(self.imgs) 78 | 79 | class ValidationSetPair(data.Dataset): 80 | """A sequence data loader where the files are arranged in this way: 81 | root/scene_1/0000000.jpg 82 | root/scene_1/0000000.npy 83 | root/scene_1/0000001.jpg 84 | root/scene_1/0000001.npy 85 | .. 86 | root/scene_2/0000000.jpg 87 | root/scene_2/0000000.npy 88 | . 89 | 90 | transform functions must take in a list a images and a numpy array which can be None 91 | """ 92 | 93 | def __init__(self, root, transform=None): 94 | self.root = Path(root) 95 | scene_list_path = self.root/'val.txt' 96 | self.scenes = [self.root/folder[:-1] for folder in open(scene_list_path)] 97 | self.imgs1, self.imgs2, self.depth = crawl_folders_pair(self.scenes) 98 | self.transform = transform 99 | 100 | def __getitem__(self, index): 101 | img1 = load_as_float(self.imgs1[index]) 102 | img2 = load_as_float(self.imgs2[index]) 103 | depth = np.load(self.depth[index]).astype(np.float32) 104 | if self.transform is not None: 105 | img, _ = self.transform([img1, img2], None) 106 | img1, img2 = img[0], img[1] 107 | return (img1, img2), depth 108 | 109 | def __len__(self): 110 | return len(self.imgs1) 111 | -------------------------------------------------------------------------------- /flowutils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/flowutils/__init__.py -------------------------------------------------------------------------------- /flowutils/flow_io.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python2 2 | 3 | """ 4 | I/O script to save and load the data coming with the MPI-Sintel low-level 5 | computer vision benchmark. 6 | 7 | For more details about the benchmark, please visit www.mpi-sintel.de 8 | 9 | CHANGELOG: 10 | v1.0 (2015/02/03): First release 11 | 12 | Copyright (c) 2015 Jonas Wulff 13 | Max Planck Institute for Intelligent Systems, Tuebingen, Germany 14 | 15 | """ 16 | 17 | # Requirements: Numpy as PIL/Pillow 18 | import numpy as np 19 | try: 20 | import png 21 | has_png = True 22 | except: 23 | has_png = False 24 | png=None 25 | 26 | 27 | 28 | # Check for endianness, based on Daniel Scharstein's optical flow code. 29 | # Using little-endian architecture, these two should be equal. 30 | TAG_FLOAT = 202021.25 31 | TAG_CHAR = 'PIEH'.encode() 32 | 33 | def flow_read(filename, return_validity=False): 34 | """ Read optical flow from file, return (U,V) tuple. 35 | 36 | Original code by Deqing Sun, adapted from Daniel Scharstein. 37 | """ 38 | f = open(filename,'rb') 39 | check = np.fromfile(f,dtype=np.float32,count=1)[0] 40 | assert check == TAG_FLOAT, ' flow_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) 41 | width = np.fromfile(f,dtype=np.int32,count=1)[0] 42 | height = np.fromfile(f,dtype=np.int32,count=1)[0] 43 | size = width*height 44 | assert width > 0 and height > 0 and size > 1 and size < 100000000, ' flow_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height) 45 | tmp = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width*2)) 46 | u = tmp[:,np.arange(width)*2] 47 | v = tmp[:,np.arange(width)*2 + 1] 48 | 49 | if return_validity: 50 | valid = u<1e19 51 | u[valid==0] = 0 52 | v[valid==0] = 0 53 | return u,v,valid 54 | else: 55 | return u,v 56 | 57 | def flow_write(filename,uv,v=None): 58 | """ Write optical flow to file. 59 | 60 | If v is None, uv is assumed to contain both u and v channels, 61 | stacked in depth. 62 | 63 | Original code by Deqing Sun, adapted from Daniel Scharstein. 64 | """ 65 | nBands = 2 66 | 67 | if v is None: 68 | uv_ = np.array(uv) 69 | assert(uv_.ndim==3) 70 | if uv_.shape[0] == 2: 71 | u = uv_[0,:,:] 72 | v = uv_[1,:,:] 73 | elif uv_.shape[2] == 2: 74 | u = uv_[:,:,0] 75 | v = uv_[:,:,1] 76 | else: 77 | raise UVError('Wrong format for flow input') 78 | else: 79 | u = uv 80 | 81 | assert(u.shape == v.shape) 82 | height,width = u.shape 83 | f = open(filename,'wb') 84 | # write the header 85 | f.write(TAG_CHAR) 86 | np.array(width).astype(np.int32).tofile(f) 87 | np.array(height).astype(np.int32).tofile(f) 88 | # arrange into matrix form 89 | tmp = np.zeros((height, width*nBands)) 90 | tmp[:,np.arange(width)*2] = u 91 | tmp[:,np.arange(width)*2 + 1] = v 92 | tmp.astype(np.float32).tofile(f) 93 | f.close() 94 | 95 | 96 | def flow_read_png(fpath): 97 | """ 98 | Read KITTI optical flow, returns u,v,valid mask 99 | 100 | """ 101 | if not has_png: 102 | print('Error. Please install the PyPNG library') 103 | return 104 | 105 | R = png.Reader(fpath) 106 | width,height,data,_ = R.asDirect() 107 | # This only worked with python2. 108 | #I = np.array(map(lambda x:x,data)).reshape((height,width,3)) 109 | I = np.array([x for x in data]).reshape((height,width,3)) 110 | u_ = I[:,:,0] 111 | v_ = I[:,:,1] 112 | valid = I[:,:,2] 113 | 114 | u = (u_.astype('float64')-2**15)/64.0 115 | v = (v_.astype('float64')-2**15)/64.0 116 | 117 | return u,v,valid 118 | 119 | 120 | def flow_write_png(fpath,u,v,valid=None): 121 | """ 122 | Write KITTI optical flow. 123 | 124 | """ 125 | if not has_png: 126 | print('Error. Please install the PyPNG library') 127 | return 128 | 129 | 130 | if valid==None: 131 | valid_ = np.ones(u.shape,dtype='uint16') 132 | else: 133 | valid_ = valid.astype('uint16') 134 | 135 | 136 | 137 | u_ = ((u*64.0)+2**15).astype('uint16') 138 | v_ = ((v*64.0)+2**15).astype('uint16') 139 | 140 | I = np.dstack((u_,v_,valid_)) 141 | 142 | W = png.Writer(width=u.shape[1], 143 | height=u.shape[0], 144 | bitdepth=16, 145 | planes=3) 146 | 147 | with open(fpath,'wb') as fil: 148 | W.write(fil,I.reshape((-1,3*u.shape[1]))) 149 | -------------------------------------------------------------------------------- /flowutils/flow_viz.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | from torchvision.transforms import ToTensor 4 | 5 | def batchComputeFlowImage(uv): 6 | flow_im = torch.zeros(uv.size(0), 3, uv.size(2), uv.size(3) ) 7 | uv_np = uv.numpy() 8 | for i in range(uv.size(0)): 9 | flow_im[i] = ToTensor()(computeFlowImage(uv_np[i][0], uv_np[i][1])) 10 | return flow_im 11 | 12 | def computeFlowImage(u,v,logscale=True,scaledown=6,output=False): 13 | """ 14 | topleft is zero, u is horiz, v is vertical 15 | red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12 16 | """ 17 | colorwheel = makecolorwheel() 18 | ncols = colorwheel.shape[0] 19 | 20 | radius = np.sqrt(u**2 + v**2) 21 | if output: 22 | print("Maximum flow magnitude: %04f" % np.max(radius)) 23 | if logscale: 24 | radius = np.log(radius + 1) 25 | if output: 26 | print("Maximum flow magnitude (after log): %0.4f" % np.max(radius)) 27 | radius = radius / scaledown 28 | if output: 29 | print("Maximum flow magnitude (after scaledown): %0.4f" % np.max(radius)) 30 | rot = np.arctan2(-v, -u) / np.pi 31 | 32 | fk = (rot+1)/2 * (ncols-1) # -1~1 maped to 0~ncols 33 | k0 = fk.astype(np.uint8) # 0, 1, 2, ..., ncols 34 | 35 | k1 = k0+1 36 | k1[k1 == ncols] = 0 37 | 38 | f = fk - k0 39 | 40 | ncolors = colorwheel.shape[1] 41 | img = np.zeros(u.shape+(ncolors,)) 42 | for i in range(ncolors): 43 | tmp = colorwheel[:,i] 44 | col0 = tmp[k0] 45 | col1 = tmp[k1] 46 | col = (1-f)*col0 + f*col1 47 | 48 | idx = radius <= 1 49 | # increase saturation with radius 50 | col[idx] = 1 - radius[idx]*(1-col[idx]) 51 | # out of range 52 | col[~idx] *= 0.75 53 | img[:,:,i] = np.floor(255*col).astype(np.uint8) 54 | 55 | return img.astype(np.uint8) 56 | 57 | 58 | def makecolorwheel(): 59 | # Create a colorwheel for visualization 60 | RY = 15 61 | YG = 6 62 | GC = 4 63 | CB = 11 64 | BM = 13 65 | MR = 6 66 | 67 | ncols = RY + YG + GC + CB + BM + MR 68 | 69 | colorwheel = np.zeros((ncols,3)) 70 | 71 | col = 0 72 | # RY 73 | colorwheel[0:RY,0] = 1 74 | colorwheel[0:RY,1] = np.arange(0,1,1./RY) 75 | col += RY 76 | 77 | # YG 78 | colorwheel[col:col+YG,0] = np.arange(1,0,-1./YG) 79 | colorwheel[col:col+YG,1] = 1 80 | col += YG 81 | 82 | # GC 83 | colorwheel[col:col+GC,1] = 1 84 | colorwheel[col:col+GC,2] = np.arange(0,1,1./GC) 85 | col += GC 86 | 87 | # CB 88 | colorwheel[col:col+CB,1] = np.arange(1,0,-1./CB) 89 | colorwheel[col:col+CB,2] = 1 90 | col += CB 91 | 92 | # BM 93 | colorwheel[col:col+BM,2] = 1 94 | colorwheel[col:col+BM,0] = np.arange(0,1,1./BM) 95 | col += BM 96 | 97 | # MR 98 | colorwheel[col:col+MR,2] = np.arange(1,0,-1./MR) 99 | colorwheel[col:col+MR,0] = 1 100 | 101 | return colorwheel 102 | -------------------------------------------------------------------------------- /flowutils/pfm.py: -------------------------------------------------------------------------------- 1 | import re 2 | import numpy as np 3 | import sys 4 | 5 | 6 | def readPFM(file): 7 | file = open(file, 'rb') 8 | 9 | color = None 10 | width = None 11 | height = None 12 | scale = None 13 | endian = None 14 | 15 | header = file.readline().rstrip() 16 | if header == 'PF': 17 | color = True 18 | elif header == 'Pf': 19 | color = False 20 | else: 21 | raise Exception('Not a PFM file.') 22 | 23 | dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline()) 24 | if dim_match: 25 | width, height = map(int, dim_match.groups()) 26 | else: 27 | raise Exception('Malformed PFM header.') 28 | 29 | scale = float(file.readline().rstrip()) 30 | if scale < 0: # little-endian 31 | endian = '<' 32 | scale = -scale 33 | else: 34 | endian = '>' # big-endian 35 | 36 | data = np.fromfile(file, endian + 'f') 37 | shape = (height, width, 3) if color else (height, width) 38 | 39 | data = np.reshape(data, shape) 40 | data = np.flipud(data) 41 | return data, scale 42 | 43 | 44 | def writePFM(file, image, scale=1): 45 | file = open(file, 'wb') 46 | 47 | color = None 48 | 49 | if image.dtype.name != 'float32': 50 | raise Exception('Image dtype must be float32.') 51 | 52 | image = np.flipud(image) 53 | 54 | if len(image.shape) == 3 and image.shape[2] == 3: # color image 55 | color = True 56 | elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale 57 | color = False 58 | else: 59 | raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.') 60 | 61 | file.write('PF\n' if color else 'Pf\n') 62 | file.write('%d %d\n' % (image.shape[1], image.shape[0])) 63 | 64 | endian = image.dtype.byteorder 65 | 66 | if endian == '<' or endian == '=' and sys.byteorder == 'little': 67 | scale = -scale 68 | 69 | file.write('%f\n' % scale) 70 | 71 | image.tofile(file) -------------------------------------------------------------------------------- /install_flownet2_deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cd models/resample2d_package 3 | rm -rf *_cuda.egg-info build dist __pycache__ 4 | python3 setup.py install 5 | 6 | cd ../channelnorm_package 7 | rm -rf *_cuda.egg-info build dist __pycache__ 8 | python3 setup.py install 9 | cd ../.. 10 | -------------------------------------------------------------------------------- /logger.py: -------------------------------------------------------------------------------- 1 | from blessings import Terminal 2 | import progressbar 3 | import sys 4 | 5 | 6 | class TermLogger(object): 7 | def __init__(self, n_epochs, train_size, valid_size, attack_size): 8 | self.n_epochs = n_epochs 9 | self.train_size = train_size 10 | self.valid_size = valid_size 11 | self.attack_size = attack_size 12 | self.t = Terminal() 13 | s = 13 14 | e = 1 # epoch bar position 15 | tr = 3 # train bar position 16 | ts = 6 # valid bar position 17 | ta = 9 # attack bar position 18 | h = self.t.height 19 | 20 | for i in range(10): 21 | print('') 22 | self.epoch_bar = progressbar.ProgressBar(maxval=n_epochs, fd=Writer(self.t, (0, h-s+e))) 23 | 24 | self.train_writer = Writer(self.t, (0, h-s+tr)) 25 | self.train_bar_writer = Writer(self.t, (0, h-s+tr+1)) 26 | 27 | self.valid_writer = Writer(self.t, (0, h-s+ts)) 28 | self.valid_bar_writer = Writer(self.t, (0, h-s+ts+1)) 29 | 30 | self.attack_writer = Writer(self.t, (0, h-s+ta)) 31 | self.attack_bar_writer = Writer(self.t, (0, h-s+ta+1)) 32 | 33 | self.reset_train_bar() 34 | self.reset_valid_bar() 35 | self.reset_attack_bar() 36 | 37 | def reset_train_bar(self): 38 | self.train_bar = progressbar.ProgressBar(maxval=self.train_size, fd=self.train_bar_writer).start() 39 | 40 | def reset_valid_bar(self): 41 | self.valid_bar = progressbar.ProgressBar(maxval=self.valid_size, fd=self.valid_bar_writer).start() 42 | 43 | def reset_attack_bar(self): 44 | self.attack_bar = progressbar.ProgressBar(maxval=self.attack_size, fd=self.attack_bar_writer).start() 45 | 46 | class Writer(object): 47 | """Create an object with a write method that writes to a 48 | specific place on the screen, defined at instantiation. 49 | 50 | This is the glue between blessings and progressbar. 51 | """ 52 | 53 | def __init__(self, t, location): 54 | """ 55 | Input: location - tuple of ints (x, y), the position 56 | of the bar in the terminal 57 | """ 58 | self.location = location 59 | self.t = t 60 | 61 | def write(self, string): 62 | with self.t.location(*self.location): 63 | sys.stdout.write("\033[K") 64 | print(string) 65 | 66 | def flush(self): 67 | return 68 | 69 | 70 | class AverageMeter(object): 71 | """Computes and stores the average and current value""" 72 | 73 | def __init__(self, i=1, precision=3): 74 | self.meters = i 75 | self.precision = precision 76 | self.reset(self.meters) 77 | 78 | def reset(self, i): 79 | self.val = [0]*i 80 | self.avg = [0]*i 81 | self.sum = [0]*i 82 | self.count = 0 83 | 84 | def update(self, val, n=1): 85 | if not isinstance(val, list): 86 | val = [val] 87 | assert(len(val) == self.meters) 88 | self.count += n 89 | for i,v in enumerate(val): 90 | self.val[i] = v 91 | self.sum[i] += v * n 92 | self.avg[i] = self.sum[i] / self.count 93 | 94 | def __repr__(self): 95 | val = ' '.join(['{:.{}f}'.format(v, self.precision) for v in self.val]) 96 | avg = ' '.join(['{:.{}f}'.format(a, self.precision) for a in self.avg]) 97 | return '{} ({})'.format(val, avg) 98 | -------------------------------------------------------------------------------- /losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.autograd import Variable 4 | epsilon = 1e-8 5 | def compute_epe(gt, pred): 6 | _, _, h_pred, w_pred = pred.size() 7 | bs, nc, h_gt, w_gt = gt.size() 8 | u_gt, v_gt = gt[:,0,:,:], gt[:,1,:,:] 9 | pred = nn.functional.upsample(pred, size=(h_gt, w_gt), mode='bilinear') 10 | u_pred = pred[:,0,:,:] * (w_gt/w_pred) 11 | v_pred = pred[:,1,:,:] * (h_gt/h_pred) 12 | 13 | epe = torch.sqrt(torch.pow((u_gt - u_pred), 2) + torch.pow((v_gt - v_pred), 2)) 14 | 15 | if nc == 3: 16 | valid = gt[:,2,:,:] 17 | epe = epe * valid 18 | avg_epe = epe.sum()/(valid.sum() + epsilon) 19 | else: 20 | avg_epe = epe.sum()/(bs*h_gt*w_gt) 21 | 22 | 23 | if type(avg_epe) == Variable: avg_epe = avg_epe.data 24 | 25 | return avg_epe.item() 26 | 27 | def compute_cossim(gt, pred): 28 | _, _, h_pred, w_pred = pred.size() 29 | bs, nc, h_gt, w_gt = gt.size() 30 | #u_gt, v_gt = gt[:,0,:,:], gt[:,1,:,:] 31 | pred = nn.functional.upsample(pred, size=(h_gt, w_gt), mode='bilinear') 32 | #u_pred = pred[:,0,:,:] * (w_gt/w_pred) 33 | #v_pred = pred[:,1,:,:] * (h_gt/h_pred) 34 | 35 | similarity = nn.functional.cosine_similarity(gt[:,:2], pred) 36 | if nc == 3: 37 | valid = gt[:,2,:,:] 38 | similarity = similarity * valid 39 | avg_sim = similarity.sum()/(valid.sum() + epsilon) 40 | else: 41 | avg_sim = similarity.sum()/(bs*h_gt*w_gt) 42 | 43 | 44 | if type(avg_sim) == Variable: avg_sim = avg_sim.data 45 | 46 | return avg_sim.item() 47 | 48 | def multiscale_cossim(gt, pred): 49 | assert(len(gt)==len(pred)) 50 | loss = 0 51 | for (_gt, _pred) in zip(gt, pred): 52 | loss += - nn.functional.cosine_similarity(_gt, _pred).mean() 53 | 54 | return loss 55 | -------------------------------------------------------------------------------- /models/FlowNet2S.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Portions of this code copyright 2017, Clement Pinard 3 | ''' 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.nn import init 8 | 9 | import math 10 | import numpy as np 11 | 12 | from .submodules import * 13 | 'Parameter count : 38,676,504 ' 14 | 15 | class FlowNetS(nn.Module): 16 | def __init__(self, input_channels = 12, batchNorm=True): 17 | super(FlowNetS,self).__init__() 18 | 19 | self.batchNorm = batchNorm 20 | self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2) 21 | self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) 22 | self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) 23 | self.conv3_1 = conv(self.batchNorm, 256, 256) 24 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 25 | self.conv4_1 = conv(self.batchNorm, 512, 512) 26 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 27 | self.conv5_1 = conv(self.batchNorm, 512, 512) 28 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 29 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 30 | 31 | self.deconv5 = deconv(1024,512) 32 | self.deconv4 = deconv(1026,256) 33 | self.deconv3 = deconv(770,128) 34 | self.deconv2 = deconv(386,64) 35 | 36 | self.predict_flow6 = predict_flow(1024) 37 | self.predict_flow5 = predict_flow(1026) 38 | self.predict_flow4 = predict_flow(770) 39 | self.predict_flow3 = predict_flow(386) 40 | self.predict_flow2 = predict_flow(194) 41 | 42 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 43 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 44 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 45 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 46 | 47 | for m in self.modules(): 48 | if isinstance(m, nn.Conv2d): 49 | if m.bias is not None: 50 | init.uniform(m.bias) 51 | init.xavier_uniform(m.weight) 52 | 53 | if isinstance(m, nn.ConvTranspose2d): 54 | if m.bias is not None: 55 | init.uniform(m.bias) 56 | init.xavier_uniform(m.weight) 57 | # init_deconv_bilinear(m.weight) 58 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 59 | 60 | def forward(self, x1, x2): 61 | x = torch.cat((x1,x2), dim=1) 62 | out_conv1 = self.conv1(x) 63 | 64 | out_conv2 = self.conv2(out_conv1) 65 | out_conv3 = self.conv3_1(self.conv3(out_conv2)) 66 | out_conv4 = self.conv4_1(self.conv4(out_conv3)) 67 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 68 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 69 | 70 | flow6 = self.predict_flow6(out_conv6) 71 | flow6_up = self.upsampled_flow6_to_5(flow6) 72 | out_deconv5 = self.deconv5(out_conv6) 73 | 74 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 75 | flow5 = self.predict_flow5(concat5) 76 | flow5_up = self.upsampled_flow5_to_4(flow5) 77 | out_deconv4 = self.deconv4(concat5) 78 | 79 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 80 | flow4 = self.predict_flow4(concat4) 81 | flow4_up = self.upsampled_flow4_to_3(flow4) 82 | out_deconv3 = self.deconv3(concat4) 83 | 84 | concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) 85 | flow3 = self.predict_flow3(concat3) 86 | flow3_up = self.upsampled_flow3_to_2(flow3) 87 | out_deconv2 = self.deconv2(concat3) 88 | 89 | concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) 90 | flow2 = self.predict_flow2(concat2) 91 | 92 | if self.training: 93 | return flow2,flow3,flow4,flow5,flow6 94 | else: 95 | return flow2, 96 | -------------------------------------------------------------------------------- /models/FlowNetC.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from .submodules import * 9 | 'Parameter count , 39,175,298 ' 10 | 11 | class FlowNetC(nn.Module): 12 | def __init__(self,batchNorm=False, div_flow = 20): 13 | super(FlowNetC,self).__init__() 14 | 15 | self.rgb_max = 1 16 | self.batchNorm = batchNorm 17 | self.div_flow = div_flow 18 | 19 | self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2) 20 | self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) 21 | self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) 22 | self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1) 23 | 24 | self.corr = correlate #Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1) 25 | 26 | self.corr_activation = nn.LeakyReLU(0.1,inplace=True) 27 | self.conv3_1 = conv(self.batchNorm, 473, 256) 28 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 29 | self.conv4_1 = conv(self.batchNorm, 512, 512) 30 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 31 | self.conv5_1 = conv(self.batchNorm, 512, 512) 32 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 33 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 34 | 35 | self.deconv5 = deconv(1024,512) 36 | self.deconv4 = deconv(1026,256) 37 | self.deconv3 = deconv(770,128) 38 | self.deconv2 = deconv(386,64) 39 | 40 | self.predict_flow6 = predict_flow(1024) 41 | self.predict_flow5 = predict_flow(1026) 42 | self.predict_flow4 = predict_flow(770) 43 | self.predict_flow3 = predict_flow(386) 44 | self.predict_flow2 = predict_flow(194) 45 | 46 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 47 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 48 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 49 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 50 | 51 | for m in self.modules(): 52 | if isinstance(m, nn.Conv2d): 53 | if m.bias is not None: 54 | init.uniform(m.bias) 55 | init.xavier_uniform(m.weight) 56 | 57 | if isinstance(m, nn.ConvTranspose2d): 58 | if m.bias is not None: 59 | init.uniform(m.bias) 60 | init.xavier_uniform(m.weight) 61 | # init_deconv_bilinear(m.weight) 62 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 63 | 64 | def normalize(self, im): 65 | im = im - 0.5 66 | im = im / 0.5 67 | return im 68 | 69 | def forward(self, x1, x2): 70 | # inputs = torch.cat((x1, x2), dim=1) 71 | # rgb_mean = inputs.contiguous().view(inputs.size()[:2]+(-1,)).mean(dim=-1).view(inputs.size()[:2] + (1,1,)) 72 | # #print(rgb_mean.size()) 73 | 74 | # x = (inputs - rgb_mean) / self.rgb_max 75 | # x1 = x[:,:2,:,:] 76 | # x2 = x[:,3:,:,:] 77 | 78 | x1 = self.normalize(x1) 79 | x2 = self.normalize(x2) 80 | # FlownetC top input stream 81 | out_conv1a = self.conv1(x1) 82 | out_conv2a = self.conv2(out_conv1a) 83 | out_conv3a = self.conv3(out_conv2a) 84 | 85 | # FlownetC bottom input stream 86 | out_conv1b = self.conv1(x2) 87 | 88 | out_conv2b = self.conv2(out_conv1b) 89 | out_conv3b = self.conv3(out_conv2b) 90 | 91 | # Merge streams 92 | out_corr = self.corr(out_conv3a, out_conv3b) # False 93 | out_corr = self.corr_activation(out_corr) 94 | 95 | # Redirect top input stream and concatenate 96 | out_conv_redir = self.conv_redir(out_conv3a) 97 | 98 | in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1) 99 | 100 | # Merged conv layers 101 | out_conv3_1 = self.conv3_1(in_conv3_1) 102 | 103 | out_conv4 = self.conv4_1(self.conv4(out_conv3_1)) 104 | 105 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 106 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 107 | 108 | flow6 = self.predict_flow6(out_conv6) 109 | flow6_up = self.upsampled_flow6_to_5(flow6) 110 | out_deconv5 = self.deconv5(out_conv6) 111 | 112 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 113 | 114 | flow5 = self.predict_flow5(concat5) 115 | flow5_up = self.upsampled_flow5_to_4(flow5) 116 | out_deconv4 = self.deconv4(concat5) 117 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 118 | 119 | flow4 = self.predict_flow4(concat4) 120 | flow4_up = self.upsampled_flow4_to_3(flow4) 121 | out_deconv3 = self.deconv3(concat4) 122 | concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1) 123 | 124 | flow3 = self.predict_flow3(concat3) 125 | flow3_up = self.upsampled_flow3_to_2(flow3) 126 | out_deconv2 = self.deconv2(concat3) 127 | concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1) 128 | 129 | flow2 = self.predict_flow2(concat2) 130 | 131 | if self.training: 132 | return self.upsample1(flow2*self.div_flow), self.upsample1(flow3*self.div_flow), self.upsample1(flow4*self.div_flow), self.upsample1(flow5*self.div_flow), self.upsample1(flow6*self.div_flow) 133 | else: 134 | return self.upsample1(flow2*self.div_flow) 135 | -------------------------------------------------------------------------------- /models/FlowNetFusion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from submodules import * 9 | 'Parameter count = 581,226' 10 | 11 | class FlowNetFusion(nn.Module): 12 | def __init__(self,args, batchNorm=True): 13 | super(FlowNetFusion,self).__init__() 14 | 15 | self.batchNorm = batchNorm 16 | self.conv0 = conv(self.batchNorm, 11, 64) 17 | self.conv1 = conv(self.batchNorm, 64, 64, stride=2) 18 | self.conv1_1 = conv(self.batchNorm, 64, 128) 19 | self.conv2 = conv(self.batchNorm, 128, 128, stride=2) 20 | self.conv2_1 = conv(self.batchNorm, 128, 128) 21 | 22 | self.deconv1 = deconv(128,32) 23 | self.deconv0 = deconv(162,16) 24 | 25 | self.inter_conv1 = i_conv(self.batchNorm, 162, 32) 26 | self.inter_conv0 = i_conv(self.batchNorm, 82, 16) 27 | 28 | self.predict_flow2 = predict_flow(128) 29 | self.predict_flow1 = predict_flow(32) 30 | self.predict_flow0 = predict_flow(16) 31 | 32 | self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 33 | self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 34 | 35 | for m in self.modules(): 36 | if isinstance(m, nn.Conv2d): 37 | if m.bias is not None: 38 | init.uniform(m.bias) 39 | init.xavier_uniform(m.weight) 40 | 41 | if isinstance(m, nn.ConvTranspose2d): 42 | if m.bias is not None: 43 | init.uniform(m.bias) 44 | init.xavier_uniform(m.weight) 45 | # init_deconv_bilinear(m.weight) 46 | 47 | def forward(self, x): 48 | out_conv0 = self.conv0(x) 49 | out_conv1 = self.conv1_1(self.conv1(out_conv0)) 50 | out_conv2 = self.conv2_1(self.conv2(out_conv1)) 51 | 52 | flow2 = self.predict_flow2(out_conv2) 53 | flow2_up = self.upsampled_flow2_to_1(flow2) 54 | out_deconv1 = self.deconv1(out_conv2) 55 | 56 | concat1 = torch.cat((out_conv1,out_deconv1,flow2_up),1) 57 | out_interconv1 = self.inter_conv1(concat1) 58 | flow1 = self.predict_flow1(out_interconv1) 59 | flow1_up = self.upsampled_flow1_to_0(flow1) 60 | out_deconv0 = self.deconv0(concat1) 61 | 62 | concat0 = torch.cat((out_conv0,out_deconv0,flow1_up),1) 63 | out_interconv0 = self.inter_conv0(concat0) 64 | flow0 = self.predict_flow0(out_interconv0) 65 | 66 | return flow0 67 | 68 | -------------------------------------------------------------------------------- /models/FlowNetS.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn.init import kaiming_normal 4 | 5 | __all__ = [ 6 | 'flownets', 'flownets_bn' 7 | ] 8 | 9 | 10 | def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1): 11 | if batchNorm: 12 | return nn.Sequential( 13 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False), 14 | nn.BatchNorm2d(out_planes), 15 | nn.LeakyReLU(0.1,inplace=True) 16 | ) 17 | else: 18 | return nn.Sequential( 19 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True), 20 | nn.LeakyReLU(0.1,inplace=True) 21 | ) 22 | 23 | 24 | def predict_flow(in_planes): 25 | return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=False) 26 | 27 | 28 | def deconv(in_planes, out_planes): 29 | return nn.Sequential( 30 | nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False), 31 | nn.LeakyReLU(0.1,inplace=True) 32 | ) 33 | 34 | 35 | def crop_like(input, target): 36 | if input.size()[2:] == target.size()[2:]: 37 | return input 38 | else: 39 | return input[:, :, :target.size(2), :target.size(3)] 40 | 41 | 42 | class FlowNetS(nn.Module): 43 | expansion = 1 44 | 45 | def __init__(self,batchNorm=False): 46 | super(FlowNetS,self).__init__() 47 | 48 | self.batchNorm = batchNorm 49 | self.conv1 = conv(self.batchNorm, 6, 64, kernel_size=7, stride=2) 50 | self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) 51 | self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) 52 | self.conv3_1 = conv(self.batchNorm, 256, 256) 53 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 54 | self.conv4_1 = conv(self.batchNorm, 512, 512) 55 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 56 | self.conv5_1 = conv(self.batchNorm, 512, 512) 57 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 58 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 59 | 60 | self.deconv5 = deconv(1024,512) 61 | self.deconv4 = deconv(1026,256) 62 | self.deconv3 = deconv(770,128) 63 | self.deconv2 = deconv(386,64) 64 | 65 | self.predict_flow6 = predict_flow(1024) 66 | self.predict_flow5 = predict_flow(1026) 67 | self.predict_flow4 = predict_flow(770) 68 | self.predict_flow3 = predict_flow(386) 69 | self.predict_flow2 = predict_flow(194) 70 | 71 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 72 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 73 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 74 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 75 | 76 | for m in self.modules(): 77 | if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d): 78 | kaiming_normal(m.weight.data) 79 | if m.bias is not None: 80 | m.bias.data.zero_() 81 | elif isinstance(m, nn.BatchNorm2d): 82 | m.weight.data.fill_(1) 83 | m.bias.data.zero_() 84 | 85 | def forward(self, x1, x2): 86 | x = torch.cat((x1, x2), dim=1) 87 | out_conv2 = self.conv2(self.conv1(x)) 88 | out_conv3 = self.conv3_1(self.conv3(out_conv2)) 89 | out_conv4 = self.conv4_1(self.conv4(out_conv3)) 90 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 91 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 92 | 93 | flow6 = self.predict_flow6(out_conv6) 94 | flow6_up = crop_like(self.upsampled_flow6_to_5(flow6), out_conv5) 95 | out_deconv5 = crop_like(self.deconv5(out_conv6), out_conv5) 96 | 97 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 98 | flow5 = self.predict_flow5(concat5) 99 | flow5_up = crop_like(self.upsampled_flow5_to_4(flow5), out_conv4) 100 | out_deconv4 = crop_like(self.deconv4(concat5), out_conv4) 101 | 102 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 103 | flow4 = self.predict_flow4(concat4) 104 | flow4_up = crop_like(self.upsampled_flow4_to_3(flow4), out_conv3) 105 | out_deconv3 = crop_like(self.deconv3(concat4), out_conv3) 106 | 107 | concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) 108 | flow3 = self.predict_flow3(concat3) 109 | flow3_up = crop_like(self.upsampled_flow3_to_2(flow3), out_conv2) 110 | out_deconv2 = crop_like(self.deconv2(concat3), out_conv2) 111 | 112 | concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) 113 | flow2 = self.predict_flow2(concat2) 114 | 115 | if self.training: 116 | return 5*flow2,5*flow3,5*flow4,5*flow5,5*flow6 117 | else: 118 | return 5*flow2 119 | 120 | def weight_parameters(self): 121 | return [param for name, param in self.named_parameters() if 'weight' in name] 122 | 123 | def bias_parameters(self): 124 | return [param for name, param in self.named_parameters() if 'bias' in name] 125 | 126 | 127 | def flownets(data=None): 128 | """FlowNetS model architecture from the 129 | "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852) 130 | 131 | Args: 132 | data : pretrained weights of the network. will create a new one if not set 133 | """ 134 | model = FlowNetS(batchNorm=False) 135 | if data is not None: 136 | model.load_state_dict(data['state_dict']) 137 | return model 138 | 139 | 140 | def flownets_bn(data=None): 141 | """FlowNetS model architecture from the 142 | "Learning Optical Flow with Convolutional Networks" paper (https://arxiv.org/abs/1504.06852) 143 | 144 | Args: 145 | data : pretrained weights of the network. will create a new one if not set 146 | """ 147 | model = FlowNetS(batchNorm=True) 148 | if data is not None: 149 | model.load_state_dict(data['state_dict']) 150 | return model 151 | -------------------------------------------------------------------------------- /models/FlowNetSD.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from submodules import * 9 | 'Parameter count = 45,371,666' 10 | 11 | class FlowNetSD(nn.Module): 12 | def __init__(self, args, batchNorm=True): 13 | super(FlowNetSD,self).__init__() 14 | 15 | self.batchNorm = batchNorm 16 | self.conv0 = conv(self.batchNorm, 6, 64) 17 | self.conv1 = conv(self.batchNorm, 64, 64, stride=2) 18 | self.conv1_1 = conv(self.batchNorm, 64, 128) 19 | self.conv2 = conv(self.batchNorm, 128, 128, stride=2) 20 | self.conv2_1 = conv(self.batchNorm, 128, 128) 21 | self.conv3 = conv(self.batchNorm, 128, 256, stride=2) 22 | self.conv3_1 = conv(self.batchNorm, 256, 256) 23 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 24 | self.conv4_1 = conv(self.batchNorm, 512, 512) 25 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 26 | self.conv5_1 = conv(self.batchNorm, 512, 512) 27 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 28 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 29 | 30 | self.deconv5 = deconv(1024,512) 31 | self.deconv4 = deconv(1026,256) 32 | self.deconv3 = deconv(770,128) 33 | self.deconv2 = deconv(386,64) 34 | 35 | self.inter_conv5 = i_conv(self.batchNorm, 1026, 512) 36 | self.inter_conv4 = i_conv(self.batchNorm, 770, 256) 37 | self.inter_conv3 = i_conv(self.batchNorm, 386, 128) 38 | self.inter_conv2 = i_conv(self.batchNorm, 194, 64) 39 | 40 | self.predict_flow6 = predict_flow(1024) 41 | self.predict_flow5 = predict_flow(512) 42 | self.predict_flow4 = predict_flow(256) 43 | self.predict_flow3 = predict_flow(128) 44 | self.predict_flow2 = predict_flow(64) 45 | 46 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 47 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 48 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 49 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 50 | 51 | for m in self.modules(): 52 | if isinstance(m, nn.Conv2d): 53 | if m.bias is not None: 54 | init.uniform(m.bias) 55 | init.xavier_uniform(m.weight) 56 | 57 | if isinstance(m, nn.ConvTranspose2d): 58 | if m.bias is not None: 59 | init.uniform(m.bias) 60 | init.xavier_uniform(m.weight) 61 | # init_deconv_bilinear(m.weight) 62 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 63 | 64 | 65 | 66 | def forward(self, x): 67 | out_conv0 = self.conv0(x) 68 | out_conv1 = self.conv1_1(self.conv1(out_conv0)) 69 | out_conv2 = self.conv2_1(self.conv2(out_conv1)) 70 | 71 | out_conv3 = self.conv3_1(self.conv3(out_conv2)) 72 | out_conv4 = self.conv4_1(self.conv4(out_conv3)) 73 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 74 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 75 | 76 | flow6 = self.predict_flow6(out_conv6) 77 | flow6_up = self.upsampled_flow6_to_5(flow6) 78 | out_deconv5 = self.deconv5(out_conv6) 79 | 80 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 81 | out_interconv5 = self.inter_conv5(concat5) 82 | flow5 = self.predict_flow5(out_interconv5) 83 | 84 | flow5_up = self.upsampled_flow5_to_4(flow5) 85 | out_deconv4 = self.deconv4(concat5) 86 | 87 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 88 | out_interconv4 = self.inter_conv4(concat4) 89 | flow4 = self.predict_flow4(out_interconv4) 90 | flow4_up = self.upsampled_flow4_to_3(flow4) 91 | out_deconv3 = self.deconv3(concat4) 92 | 93 | concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) 94 | out_interconv3 = self.inter_conv3(concat3) 95 | flow3 = self.predict_flow3(out_interconv3) 96 | flow3_up = self.upsampled_flow3_to_2(flow3) 97 | out_deconv2 = self.deconv2(concat3) 98 | 99 | concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) 100 | out_interconv2 = self.inter_conv2(concat2) 101 | flow2 = self.predict_flow2(out_interconv2) 102 | 103 | if self.training: 104 | return flow2,flow3,flow4,flow5,flow6 105 | else: 106 | return flow2, 107 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | from .FlowNetS import FlowNetS 2 | from .spynet import Network as SpyNet 3 | from .FlowNetC import FlowNetC 4 | from .flownet2_models import FlowNet2 5 | from .back2future import Back2Future as Back2Future 6 | from .PWCNet import PWCDCNet, pwc_dc_net 7 | -------------------------------------------------------------------------------- /models/channelnorm_package/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/channelnorm_package/__init__.py -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm.py: -------------------------------------------------------------------------------- 1 | from torch.autograd import Function, Variable 2 | from torch.nn.modules.module import Module 3 | import channelnorm_cuda 4 | 5 | class ChannelNormFunction(Function): 6 | 7 | @staticmethod 8 | def forward(ctx, input1, norm_deg=2): 9 | assert input1.is_contiguous() 10 | b, _, h, w = input1.size() 11 | output = input1.new(b, 1, h, w).zero_() 12 | 13 | channelnorm_cuda.forward(input1, output, norm_deg) 14 | ctx.save_for_backward(input1, output) 15 | ctx.norm_deg = norm_deg 16 | 17 | return output 18 | 19 | @staticmethod 20 | def backward(ctx, grad_output): 21 | input1, output = ctx.saved_tensors 22 | 23 | grad_input1 = Variable(input1.new(input1.size()).zero_()) 24 | 25 | channelnorm_cuda.backward(input1, output, grad_output.data, 26 | grad_input1.data, ctx.norm_deg) 27 | 28 | return grad_input1, None 29 | 30 | 31 | class ChannelNorm(Module): 32 | 33 | def __init__(self, norm_deg=2): 34 | super(ChannelNorm, self).__init__() 35 | self.norm_deg = norm_deg 36 | 37 | def forward(self, input1): 38 | return ChannelNormFunction.apply(input1, self.norm_deg) 39 | 40 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_cuda.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "channelnorm_kernel.cuh" 5 | 6 | int channelnorm_cuda_forward( 7 | at::Tensor& input1, 8 | at::Tensor& output, 9 | int norm_deg) { 10 | 11 | channelnorm_kernel_forward(input1, output, norm_deg); 12 | return 1; 13 | } 14 | 15 | 16 | int channelnorm_cuda_backward( 17 | at::Tensor& input1, 18 | at::Tensor& output, 19 | at::Tensor& gradOutput, 20 | at::Tensor& gradInput1, 21 | int norm_deg) { 22 | 23 | channelnorm_kernel_backward(input1, output, gradOutput, gradInput1, norm_deg); 24 | return 1; 25 | } 26 | 27 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 28 | m.def("forward", &channelnorm_cuda_forward, "Channel norm forward (CUDA)"); 29 | m.def("backward", &channelnorm_cuda_backward, "Channel norm backward (CUDA)"); 30 | } 31 | 32 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_cuda.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: channelnorm-cuda 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | Home-page: UNKNOWN 6 | Author: UNKNOWN 7 | Author-email: UNKNOWN 8 | License: UNKNOWN 9 | Description: UNKNOWN 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_cuda.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | channelnorm_cuda.cc 2 | channelnorm_kernel.cu 3 | setup.py 4 | channelnorm_cuda.egg-info/PKG-INFO 5 | channelnorm_cuda.egg-info/SOURCES.txt 6 | channelnorm_cuda.egg-info/dependency_links.txt 7 | channelnorm_cuda.egg-info/top_level.txt -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_cuda.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_cuda.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | channelnorm_cuda 2 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_kernel.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "channelnorm_kernel.cuh" 6 | 7 | #define CUDA_NUM_THREADS 512 8 | 9 | #define DIM0(TENSOR) ((TENSOR).x) 10 | #define DIM1(TENSOR) ((TENSOR).y) 11 | #define DIM2(TENSOR) ((TENSOR).z) 12 | #define DIM3(TENSOR) ((TENSOR).w) 13 | 14 | #define DIM3_INDEX(TENSOR, xx, yy, zz, ww) ((TENSOR)[((xx) * (TENSOR##_stride.x)) + ((yy) * (TENSOR##_stride.y)) + ((zz) * (TENSOR##_stride.z)) + ((ww) * (TENSOR##_stride.w))]) 15 | 16 | using at::Half; 17 | 18 | template 19 | __global__ void kernel_channelnorm_update_output( 20 | const int n, 21 | const scalar_t* __restrict__ input1, 22 | const long4 input1_size, 23 | const long4 input1_stride, 24 | scalar_t* __restrict__ output, 25 | const long4 output_size, 26 | const long4 output_stride, 27 | int norm_deg) { 28 | 29 | int index = blockIdx.x * blockDim.x + threadIdx.x; 30 | 31 | if (index >= n) { 32 | return; 33 | } 34 | 35 | int dim_b = DIM0(output_size); 36 | int dim_c = DIM1(output_size); 37 | int dim_h = DIM2(output_size); 38 | int dim_w = DIM3(output_size); 39 | int dim_chw = dim_c * dim_h * dim_w; 40 | 41 | int b = ( index / dim_chw ) % dim_b; 42 | int y = ( index / dim_w ) % dim_h; 43 | int x = ( index ) % dim_w; 44 | 45 | int i1dim_c = DIM1(input1_size); 46 | int i1dim_h = DIM2(input1_size); 47 | int i1dim_w = DIM3(input1_size); 48 | int i1dim_chw = i1dim_c * i1dim_h * i1dim_w; 49 | int i1dim_hw = i1dim_h * i1dim_w; 50 | 51 | float result = 0.0; 52 | 53 | for (int c = 0; c < i1dim_c; ++c) { 54 | int i1Index = b * i1dim_chw + c * i1dim_hw + y * i1dim_w + x; 55 | scalar_t val = input1[i1Index]; 56 | result += static_cast(val * val); 57 | } 58 | result = sqrt(result); 59 | output[index] = static_cast(result); 60 | } 61 | 62 | 63 | template 64 | __global__ void kernel_channelnorm_backward_input1( 65 | const int n, 66 | const scalar_t* __restrict__ input1, const long4 input1_size, const long4 input1_stride, 67 | const scalar_t* __restrict__ output, const long4 output_size, const long4 output_stride, 68 | const scalar_t* __restrict__ gradOutput, const long4 gradOutput_size, const long4 gradOutput_stride, 69 | scalar_t* __restrict__ gradInput, const long4 gradInput_size, const long4 gradInput_stride, 70 | int norm_deg) { 71 | 72 | int index = blockIdx.x * blockDim.x + threadIdx.x; 73 | 74 | if (index >= n) { 75 | return; 76 | } 77 | 78 | float val = 0.0; 79 | 80 | int dim_b = DIM0(gradInput_size); 81 | int dim_c = DIM1(gradInput_size); 82 | int dim_h = DIM2(gradInput_size); 83 | int dim_w = DIM3(gradInput_size); 84 | int dim_chw = dim_c * dim_h * dim_w; 85 | int dim_hw = dim_h * dim_w; 86 | 87 | int b = ( index / dim_chw ) % dim_b; 88 | int y = ( index / dim_w ) % dim_h; 89 | int x = ( index ) % dim_w; 90 | 91 | 92 | int outIndex = b * dim_hw + y * dim_w + x; 93 | val = static_cast(gradOutput[outIndex]) * static_cast(input1[index]) / (static_cast(output[outIndex])+1e-9); 94 | gradInput[index] = static_cast(val); 95 | 96 | } 97 | 98 | void channelnorm_kernel_forward( 99 | at::Tensor& input1, 100 | at::Tensor& output, 101 | int norm_deg) { 102 | 103 | const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); 104 | const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); 105 | 106 | const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); 107 | const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); 108 | 109 | int n = output.numel(); 110 | 111 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "channelnorm_forward", ([&] { 112 | 113 | kernel_channelnorm_update_output<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( 114 | //at::globalContext().getCurrentCUDAStream() >>>( 115 | n, 116 | input1.data(), 117 | input1_size, 118 | input1_stride, 119 | output.data(), 120 | output_size, 121 | output_stride, 122 | norm_deg); 123 | 124 | })); 125 | 126 | // TODO: ATen-equivalent check 127 | 128 | // THCudaCheck(cudaGetLastError()); 129 | } 130 | 131 | void channelnorm_kernel_backward( 132 | at::Tensor& input1, 133 | at::Tensor& output, 134 | at::Tensor& gradOutput, 135 | at::Tensor& gradInput1, 136 | int norm_deg) { 137 | 138 | const long4 input1_size = make_long4(input1.size(0), input1.size(1), input1.size(2), input1.size(3)); 139 | const long4 input1_stride = make_long4(input1.stride(0), input1.stride(1), input1.stride(2), input1.stride(3)); 140 | 141 | const long4 output_size = make_long4(output.size(0), output.size(1), output.size(2), output.size(3)); 142 | const long4 output_stride = make_long4(output.stride(0), output.stride(1), output.stride(2), output.stride(3)); 143 | 144 | const long4 gradOutput_size = make_long4(gradOutput.size(0), gradOutput.size(1), gradOutput.size(2), gradOutput.size(3)); 145 | const long4 gradOutput_stride = make_long4(gradOutput.stride(0), gradOutput.stride(1), gradOutput.stride(2), gradOutput.stride(3)); 146 | 147 | const long4 gradInput1_size = make_long4(gradInput1.size(0), gradInput1.size(1), gradInput1.size(2), gradInput1.size(3)); 148 | const long4 gradInput1_stride = make_long4(gradInput1.stride(0), gradInput1.stride(1), gradInput1.stride(2), gradInput1.stride(3)); 149 | 150 | int n = gradInput1.numel(); 151 | 152 | AT_DISPATCH_FLOATING_TYPES_AND_HALF(input1.type(), "channelnorm_backward_input1", ([&] { 153 | 154 | kernel_channelnorm_backward_input1<<< (n + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, at::cuda::getCurrentCUDAStream() >>>( 155 | //at::globalContext().getCurrentCUDAStream() >>>( 156 | n, 157 | input1.data(), 158 | input1_size, 159 | input1_stride, 160 | output.data(), 161 | output_size, 162 | output_stride, 163 | gradOutput.data(), 164 | gradOutput_size, 165 | gradOutput_stride, 166 | gradInput1.data(), 167 | gradInput1_size, 168 | gradInput1_stride, 169 | norm_deg 170 | ); 171 | 172 | })); 173 | 174 | // TODO: Add ATen-equivalent check 175 | 176 | // THCudaCheck(cudaGetLastError()); 177 | } 178 | -------------------------------------------------------------------------------- /models/channelnorm_package/channelnorm_kernel.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | void channelnorm_kernel_forward( 6 | at::Tensor& input1, 7 | at::Tensor& output, 8 | int norm_deg); 9 | 10 | 11 | void channelnorm_kernel_backward( 12 | at::Tensor& input1, 13 | at::Tensor& output, 14 | at::Tensor& gradOutput, 15 | at::Tensor& gradInput1, 16 | int norm_deg); 17 | -------------------------------------------------------------------------------- /models/channelnorm_package/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import torch 4 | 5 | from setuptools import setup 6 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 7 | 8 | cxx_args = ['-std=c++11'] 9 | 10 | nvcc_args = [ 11 | '-gencode', 'arch=compute_52,code=sm_52', 12 | '-gencode', 'arch=compute_60,code=sm_60', 13 | '-gencode', 'arch=compute_61,code=sm_61', 14 | '-gencode', 'arch=compute_70,code=sm_70', 15 | '-gencode', 'arch=compute_70,code=compute_70' 16 | ] 17 | 18 | setup( 19 | name='channelnorm_cuda', 20 | ext_modules=[ 21 | CUDAExtension('channelnorm_cuda', [ 22 | 'channelnorm_cuda.cc', 23 | 'channelnorm_kernel.cu' 24 | ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) 25 | ], 26 | cmdclass={ 27 | 'build_ext': BuildExtension 28 | }) 29 | -------------------------------------------------------------------------------- /models/flownet2/FlowNetC.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from ..submodules import * 9 | 'Parameter count , 39,175,298 ' 10 | 11 | class FlowNetC(nn.Module): 12 | def __init__(self, batchNorm=True, div_flow = 20): 13 | super(FlowNetC,self).__init__() 14 | 15 | self.batchNorm = batchNorm 16 | self.div_flow = div_flow 17 | 18 | self.conv1 = conv(self.batchNorm, 3, 64, kernel_size=7, stride=2) 19 | self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) 20 | self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) 21 | self.conv_redir = conv(self.batchNorm, 256, 32, kernel_size=1, stride=1) 22 | 23 | # if args.fp16: 24 | # self.corr = nn.Sequential( 25 | # tofp32(), 26 | # Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1), 27 | # tofp16()) 28 | # else: 29 | self.corr = correlate #Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1) 30 | 31 | self.corr_activation = nn.LeakyReLU(0.1,inplace=True) 32 | self.conv3_1 = conv(self.batchNorm, 473, 256) 33 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 34 | self.conv4_1 = conv(self.batchNorm, 512, 512) 35 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 36 | self.conv5_1 = conv(self.batchNorm, 512, 512) 37 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 38 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 39 | 40 | self.deconv5 = deconv(1024,512) 41 | self.deconv4 = deconv(1026,256) 42 | self.deconv3 = deconv(770,128) 43 | self.deconv2 = deconv(386,64) 44 | 45 | self.predict_flow6 = predict_flow(1024) 46 | self.predict_flow5 = predict_flow(1026) 47 | self.predict_flow4 = predict_flow(770) 48 | self.predict_flow3 = predict_flow(386) 49 | self.predict_flow2 = predict_flow(194) 50 | 51 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 52 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 53 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 54 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) 55 | 56 | for m in self.modules(): 57 | if isinstance(m, nn.Conv2d): 58 | if m.bias is not None: 59 | init.uniform(m.bias) 60 | init.xavier_uniform(m.weight) 61 | 62 | if isinstance(m, nn.ConvTranspose2d): 63 | if m.bias is not None: 64 | init.uniform(m.bias) 65 | init.xavier_uniform(m.weight) 66 | # init_deconv_bilinear(m.weight) 67 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 68 | 69 | def forward(self, x): 70 | x1 = x[:,0:3,:,:] 71 | x2 = x[:,3::,:,:] 72 | 73 | out_conv1a = self.conv1(x1) 74 | out_conv2a = self.conv2(out_conv1a) 75 | out_conv3a = self.conv3(out_conv2a) 76 | 77 | # FlownetC bottom input stream 78 | out_conv1b = self.conv1(x2) 79 | 80 | out_conv2b = self.conv2(out_conv1b) 81 | out_conv3b = self.conv3(out_conv2b) 82 | 83 | # Merge streams 84 | out_corr = self.corr(out_conv3a, out_conv3b) # False 85 | out_corr = self.corr_activation(out_corr) 86 | 87 | # Redirect top input stream and concatenate 88 | out_conv_redir = self.conv_redir(out_conv3a) 89 | 90 | in_conv3_1 = torch.cat((out_conv_redir, out_corr), 1) 91 | 92 | # Merged conv layers 93 | out_conv3_1 = self.conv3_1(in_conv3_1) 94 | 95 | out_conv4 = self.conv4_1(self.conv4(out_conv3_1)) 96 | 97 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 98 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 99 | 100 | flow6 = self.predict_flow6(out_conv6) 101 | flow6_up = self.upsampled_flow6_to_5(flow6) 102 | out_deconv5 = self.deconv5(out_conv6) 103 | 104 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 105 | 106 | flow5 = self.predict_flow5(concat5) 107 | flow5_up = self.upsampled_flow5_to_4(flow5) 108 | out_deconv4 = self.deconv4(concat5) 109 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 110 | 111 | flow4 = self.predict_flow4(concat4) 112 | flow4_up = self.upsampled_flow4_to_3(flow4) 113 | out_deconv3 = self.deconv3(concat4) 114 | concat3 = torch.cat((out_conv3_1,out_deconv3,flow4_up),1) 115 | 116 | flow3 = self.predict_flow3(concat3) 117 | flow3_up = self.upsampled_flow3_to_2(flow3) 118 | out_deconv2 = self.deconv2(concat3) 119 | concat2 = torch.cat((out_conv2a,out_deconv2,flow3_up),1) 120 | 121 | flow2 = self.predict_flow2(concat2) 122 | 123 | if self.training: 124 | return flow2,flow3,flow4,flow5,flow6 125 | else: 126 | return flow2, 127 | -------------------------------------------------------------------------------- /models/flownet2/FlowNetFusion.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from ..submodules import * 9 | 'Parameter count = 581,226' 10 | 11 | class FlowNetFusion(nn.Module): 12 | def __init__(self, batchNorm=True): 13 | super(FlowNetFusion,self).__init__() 14 | 15 | self.batchNorm = batchNorm 16 | self.conv0 = conv(self.batchNorm, 11, 64) 17 | self.conv1 = conv(self.batchNorm, 64, 64, stride=2) 18 | self.conv1_1 = conv(self.batchNorm, 64, 128) 19 | self.conv2 = conv(self.batchNorm, 128, 128, stride=2) 20 | self.conv2_1 = conv(self.batchNorm, 128, 128) 21 | 22 | self.deconv1 = deconv(128,32) 23 | self.deconv0 = deconv(162,16) 24 | 25 | self.inter_conv1 = i_conv(self.batchNorm, 162, 32) 26 | self.inter_conv0 = i_conv(self.batchNorm, 82, 16) 27 | 28 | self.predict_flow2 = predict_flow(128) 29 | self.predict_flow1 = predict_flow(32) 30 | self.predict_flow0 = predict_flow(16) 31 | 32 | self.upsampled_flow2_to_1 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 33 | self.upsampled_flow1_to_0 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 34 | 35 | for m in self.modules(): 36 | if isinstance(m, nn.Conv2d): 37 | if m.bias is not None: 38 | init.uniform(m.bias) 39 | init.xavier_uniform(m.weight) 40 | 41 | if isinstance(m, nn.ConvTranspose2d): 42 | if m.bias is not None: 43 | init.uniform(m.bias) 44 | init.xavier_uniform(m.weight) 45 | # init_deconv_bilinear(m.weight) 46 | 47 | def forward(self, x): 48 | out_conv0 = self.conv0(x) 49 | out_conv1 = self.conv1_1(self.conv1(out_conv0)) 50 | out_conv2 = self.conv2_1(self.conv2(out_conv1)) 51 | 52 | flow2 = self.predict_flow2(out_conv2) 53 | flow2_up = self.upsampled_flow2_to_1(flow2) 54 | out_deconv1 = self.deconv1(out_conv2) 55 | 56 | concat1 = torch.cat((out_conv1,out_deconv1,flow2_up),1) 57 | out_interconv1 = self.inter_conv1(concat1) 58 | flow1 = self.predict_flow1(out_interconv1) 59 | flow1_up = self.upsampled_flow1_to_0(flow1) 60 | out_deconv0 = self.deconv0(concat1) 61 | 62 | concat0 = torch.cat((out_conv0,out_deconv0,flow1_up),1) 63 | out_interconv0 = self.inter_conv0(concat0) 64 | flow0 = self.predict_flow0(out_interconv0) 65 | 66 | return flow0 67 | -------------------------------------------------------------------------------- /models/flownet2/FlowNetS.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Portions of this code copyright 2017, Clement Pinard 3 | ''' 4 | 5 | import torch 6 | import torch.nn as nn 7 | from torch.nn import init 8 | 9 | import math 10 | import numpy as np 11 | 12 | from ..submodules import * 13 | 'Parameter count : 38,676,504 ' 14 | 15 | class FlowNetS(nn.Module): 16 | def __init__(self, input_channels = 12, batchNorm=True): 17 | super(FlowNetS,self).__init__() 18 | 19 | self.batchNorm = batchNorm 20 | self.conv1 = conv(self.batchNorm, input_channels, 64, kernel_size=7, stride=2) 21 | self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2) 22 | self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2) 23 | self.conv3_1 = conv(self.batchNorm, 256, 256) 24 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 25 | self.conv4_1 = conv(self.batchNorm, 512, 512) 26 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 27 | self.conv5_1 = conv(self.batchNorm, 512, 512) 28 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 29 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 30 | 31 | self.deconv5 = deconv(1024,512) 32 | self.deconv4 = deconv(1026,256) 33 | self.deconv3 = deconv(770,128) 34 | self.deconv2 = deconv(386,64) 35 | 36 | self.predict_flow6 = predict_flow(1024) 37 | self.predict_flow5 = predict_flow(1026) 38 | self.predict_flow4 = predict_flow(770) 39 | self.predict_flow3 = predict_flow(386) 40 | self.predict_flow2 = predict_flow(194) 41 | 42 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 43 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 44 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 45 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=False) 46 | 47 | for m in self.modules(): 48 | if isinstance(m, nn.Conv2d): 49 | if m.bias is not None: 50 | init.uniform(m.bias) 51 | init.xavier_uniform(m.weight) 52 | 53 | if isinstance(m, nn.ConvTranspose2d): 54 | if m.bias is not None: 55 | init.uniform(m.bias) 56 | init.xavier_uniform(m.weight) 57 | # init_deconv_bilinear(m.weight) 58 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 59 | 60 | def forward(self, x): 61 | out_conv1 = self.conv1(x) 62 | 63 | out_conv2 = self.conv2(out_conv1) 64 | out_conv3 = self.conv3_1(self.conv3(out_conv2)) 65 | out_conv4 = self.conv4_1(self.conv4(out_conv3)) 66 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 67 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 68 | 69 | flow6 = self.predict_flow6(out_conv6) 70 | flow6_up = self.upsampled_flow6_to_5(flow6) 71 | out_deconv5 = self.deconv5(out_conv6) 72 | 73 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 74 | flow5 = self.predict_flow5(concat5) 75 | flow5_up = self.upsampled_flow5_to_4(flow5) 76 | out_deconv4 = self.deconv4(concat5) 77 | 78 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 79 | flow4 = self.predict_flow4(concat4) 80 | flow4_up = self.upsampled_flow4_to_3(flow4) 81 | out_deconv3 = self.deconv3(concat4) 82 | 83 | concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) 84 | flow3 = self.predict_flow3(concat3) 85 | flow3_up = self.upsampled_flow3_to_2(flow3) 86 | out_deconv2 = self.deconv2(concat3) 87 | 88 | concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) 89 | flow2 = self.predict_flow2(concat2) 90 | 91 | if self.training: 92 | return flow2,flow3,flow4,flow5,flow6 93 | else: 94 | return flow2, 95 | -------------------------------------------------------------------------------- /models/flownet2/FlowNetSD.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from torch.nn import init 4 | 5 | import math 6 | import numpy as np 7 | 8 | from ..submodules import * 9 | 'Parameter count = 45,371,666' 10 | 11 | class FlowNetSD(nn.Module): 12 | def __init__(self, batchNorm=True): 13 | super(FlowNetSD,self).__init__() 14 | 15 | self.batchNorm = batchNorm 16 | self.conv0 = conv(self.batchNorm, 6, 64) 17 | self.conv1 = conv(self.batchNorm, 64, 64, stride=2) 18 | self.conv1_1 = conv(self.batchNorm, 64, 128) 19 | self.conv2 = conv(self.batchNorm, 128, 128, stride=2) 20 | self.conv2_1 = conv(self.batchNorm, 128, 128) 21 | self.conv3 = conv(self.batchNorm, 128, 256, stride=2) 22 | self.conv3_1 = conv(self.batchNorm, 256, 256) 23 | self.conv4 = conv(self.batchNorm, 256, 512, stride=2) 24 | self.conv4_1 = conv(self.batchNorm, 512, 512) 25 | self.conv5 = conv(self.batchNorm, 512, 512, stride=2) 26 | self.conv5_1 = conv(self.batchNorm, 512, 512) 27 | self.conv6 = conv(self.batchNorm, 512, 1024, stride=2) 28 | self.conv6_1 = conv(self.batchNorm,1024, 1024) 29 | 30 | self.deconv5 = deconv(1024,512) 31 | self.deconv4 = deconv(1026,256) 32 | self.deconv3 = deconv(770,128) 33 | self.deconv2 = deconv(386,64) 34 | 35 | self.inter_conv5 = i_conv(self.batchNorm, 1026, 512) 36 | self.inter_conv4 = i_conv(self.batchNorm, 770, 256) 37 | self.inter_conv3 = i_conv(self.batchNorm, 386, 128) 38 | self.inter_conv2 = i_conv(self.batchNorm, 194, 64) 39 | 40 | self.predict_flow6 = predict_flow(1024) 41 | self.predict_flow5 = predict_flow(512) 42 | self.predict_flow4 = predict_flow(256) 43 | self.predict_flow3 = predict_flow(128) 44 | self.predict_flow2 = predict_flow(64) 45 | 46 | self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 47 | self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 48 | self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 49 | self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1) 50 | 51 | for m in self.modules(): 52 | if isinstance(m, nn.Conv2d): 53 | if m.bias is not None: 54 | init.uniform(m.bias) 55 | init.xavier_uniform(m.weight) 56 | 57 | if isinstance(m, nn.ConvTranspose2d): 58 | if m.bias is not None: 59 | init.uniform(m.bias) 60 | init.xavier_uniform(m.weight) 61 | # init_deconv_bilinear(m.weight) 62 | self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear') 63 | 64 | 65 | 66 | def forward(self, x): 67 | out_conv0 = self.conv0(x) 68 | out_conv1 = self.conv1_1(self.conv1(out_conv0)) 69 | out_conv2 = self.conv2_1(self.conv2(out_conv1)) 70 | 71 | out_conv3 = self.conv3_1(self.conv3(out_conv2)) 72 | out_conv4 = self.conv4_1(self.conv4(out_conv3)) 73 | out_conv5 = self.conv5_1(self.conv5(out_conv4)) 74 | out_conv6 = self.conv6_1(self.conv6(out_conv5)) 75 | 76 | flow6 = self.predict_flow6(out_conv6) 77 | flow6_up = self.upsampled_flow6_to_5(flow6) 78 | out_deconv5 = self.deconv5(out_conv6) 79 | 80 | concat5 = torch.cat((out_conv5,out_deconv5,flow6_up),1) 81 | out_interconv5 = self.inter_conv5(concat5) 82 | flow5 = self.predict_flow5(out_interconv5) 83 | 84 | flow5_up = self.upsampled_flow5_to_4(flow5) 85 | out_deconv4 = self.deconv4(concat5) 86 | 87 | concat4 = torch.cat((out_conv4,out_deconv4,flow5_up),1) 88 | out_interconv4 = self.inter_conv4(concat4) 89 | flow4 = self.predict_flow4(out_interconv4) 90 | flow4_up = self.upsampled_flow4_to_3(flow4) 91 | out_deconv3 = self.deconv3(concat4) 92 | 93 | concat3 = torch.cat((out_conv3,out_deconv3,flow4_up),1) 94 | out_interconv3 = self.inter_conv3(concat3) 95 | flow3 = self.predict_flow3(out_interconv3) 96 | flow3_up = self.upsampled_flow3_to_2(flow3) 97 | out_deconv2 = self.deconv2(concat3) 98 | 99 | concat2 = torch.cat((out_conv2,out_deconv2,flow3_up),1) 100 | out_interconv2 = self.inter_conv2(concat2) 101 | flow2 = self.predict_flow2(out_interconv2) 102 | 103 | if self.training: 104 | return flow2,flow3,flow4,flow5,flow6 105 | else: 106 | return flow2, 107 | -------------------------------------------------------------------------------- /models/flownet2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/flownet2/__init__.py -------------------------------------------------------------------------------- /models/resample2d_package/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/resample2d_package/__init__.py -------------------------------------------------------------------------------- /models/resample2d_package/resample2d.py: -------------------------------------------------------------------------------- 1 | from torch.nn.modules.module import Module 2 | from torch.autograd import Function, Variable 3 | import resample2d_cuda 4 | 5 | class Resample2dFunction(Function): 6 | 7 | @staticmethod 8 | def forward(ctx, input1, input2, kernel_size=1, bilinear= True): 9 | assert input1.is_contiguous() 10 | assert input2.is_contiguous() 11 | 12 | ctx.save_for_backward(input1, input2) 13 | ctx.kernel_size = kernel_size 14 | ctx.bilinear = bilinear 15 | 16 | _, d, _, _ = input1.size() 17 | b, _, h, w = input2.size() 18 | output = input1.new(b, d, h, w).zero_() 19 | 20 | resample2d_cuda.forward(input1, input2, output, kernel_size, bilinear) 21 | 22 | return output 23 | 24 | @staticmethod 25 | def backward(ctx, grad_output): 26 | grad_output = grad_output.contiguous() 27 | assert grad_output.is_contiguous() 28 | 29 | input1, input2 = ctx.saved_tensors 30 | 31 | grad_input1 = Variable(input1.new(input1.size()).zero_()) 32 | grad_input2 = Variable(input1.new(input2.size()).zero_()) 33 | 34 | resample2d_cuda.backward(input1, input2, grad_output.data, 35 | grad_input1.data, grad_input2.data, 36 | ctx.kernel_size, ctx.bilinear) 37 | 38 | return grad_input1, grad_input2, None, None 39 | 40 | class Resample2d(Module): 41 | 42 | def __init__(self, kernel_size=1, bilinear = True): 43 | super(Resample2d, self).__init__() 44 | self.kernel_size = kernel_size 45 | self.bilinear = bilinear 46 | 47 | def forward(self, input1, input2): 48 | input1_c = input1.contiguous() 49 | return Resample2dFunction.apply(input1_c, input2, self.kernel_size, self.bilinear) 50 | -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_cuda.cc: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "resample2d_kernel.cuh" 5 | 6 | int resample2d_cuda_forward( 7 | at::Tensor& input1, 8 | at::Tensor& input2, 9 | at::Tensor& output, 10 | int kernel_size, bool bilinear) { 11 | resample2d_kernel_forward(input1, input2, output, kernel_size, bilinear); 12 | return 1; 13 | } 14 | 15 | int resample2d_cuda_backward( 16 | at::Tensor& input1, 17 | at::Tensor& input2, 18 | at::Tensor& gradOutput, 19 | at::Tensor& gradInput1, 20 | at::Tensor& gradInput2, 21 | int kernel_size, bool bilinear) { 22 | resample2d_kernel_backward(input1, input2, gradOutput, gradInput1, gradInput2, kernel_size, bilinear); 23 | return 1; 24 | } 25 | 26 | 27 | 28 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 29 | m.def("forward", &resample2d_cuda_forward, "Resample2D forward (CUDA)"); 30 | m.def("backward", &resample2d_cuda_backward, "Resample2D backward (CUDA)"); 31 | } 32 | 33 | -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_cuda.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 1.0 2 | Name: resample2d-cuda 3 | Version: 0.0.0 4 | Summary: UNKNOWN 5 | Home-page: UNKNOWN 6 | Author: UNKNOWN 7 | Author-email: UNKNOWN 8 | License: UNKNOWN 9 | Description: UNKNOWN 10 | Platform: UNKNOWN 11 | -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_cuda.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | resample2d_cuda.cc 2 | resample2d_kernel.cu 3 | setup.py 4 | resample2d_cuda.egg-info/PKG-INFO 5 | resample2d_cuda.egg-info/SOURCES.txt 6 | resample2d_cuda.egg-info/dependency_links.txt 7 | resample2d_cuda.egg-info/top_level.txt -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_cuda.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_cuda.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | resample2d_cuda 2 | -------------------------------------------------------------------------------- /models/resample2d_package/resample2d_kernel.cuh: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | 5 | void resample2d_kernel_forward( 6 | at::Tensor& input1, 7 | at::Tensor& input2, 8 | at::Tensor& output, 9 | int kernel_size, 10 | bool bilinear); 11 | 12 | void resample2d_kernel_backward( 13 | at::Tensor& input1, 14 | at::Tensor& input2, 15 | at::Tensor& gradOutput, 16 | at::Tensor& gradInput1, 17 | at::Tensor& gradInput2, 18 | int kernel_size, 19 | bool bilinear); -------------------------------------------------------------------------------- /models/resample2d_package/setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import torch 4 | 5 | from setuptools import setup 6 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 7 | 8 | cxx_args = ['-std=c++11'] 9 | 10 | nvcc_args = [ 11 | '-gencode', 'arch=compute_50,code=sm_50', 12 | '-gencode', 'arch=compute_52,code=sm_52', 13 | '-gencode', 'arch=compute_60,code=sm_60', 14 | '-gencode', 'arch=compute_61,code=sm_61', 15 | '-gencode', 'arch=compute_70,code=sm_70', 16 | '-gencode', 'arch=compute_70,code=compute_70' 17 | ] 18 | 19 | setup( 20 | name='resample2d_cuda', 21 | ext_modules=[ 22 | CUDAExtension('resample2d_cuda', [ 23 | 'resample2d_cuda.cc', 24 | 'resample2d_kernel.cu' 25 | ], extra_compile_args={'cxx': cxx_args, 'nvcc': nvcc_args}) 26 | ], 27 | cmdclass={ 28 | 'build_ext': BuildExtension 29 | }) 30 | -------------------------------------------------------------------------------- /models/spynet.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2.7 2 | 3 | import getopt 4 | import math 5 | import numpy 6 | import os 7 | import PIL 8 | import PIL.Image 9 | import sys 10 | import torch 11 | from torch.nn import init 12 | 13 | 14 | 15 | class Preprocess(torch.nn.Module): 16 | def __init__(self, pre_normalization): 17 | super(Preprocess, self).__init__() 18 | self.pre_normalization = pre_normalization 19 | # end 20 | 21 | def forward(self, variableInput): 22 | variableRed = variableInput[:, 0:1, :, :] 23 | variableGreen = variableInput[:, 1:2, :, :] 24 | variableBlue = variableInput[:, 2:3, :, :] 25 | 26 | if self.pre_normalization is not None: 27 | if hasattr(self.pre_normalization, 'mean') and hasattr(self.pre_normalization, 'std'): 28 | _mean = self.pre_normalization.mean 29 | _std = self.pre_normalization.std 30 | else: 31 | _mean = variableInput.transpose(0,1).contiguous().view(3, -1).mean(1) 32 | _std = variableInput.transpose(0,1).contiguous().view(3, -1).std(1) 33 | 34 | variableRed = variableRed * _std[0] 35 | variableGreen = variableGreen * _std[1] 36 | variableBlue = variableBlue * _std[2] 37 | 38 | variableRed = variableRed + _mean[0] 39 | variableGreen = variableGreen + _mean[1] 40 | variableBlue = variableBlue + _mean[2] 41 | 42 | variableRed = variableRed - 0.485 43 | variableGreen = variableGreen - 0.456 44 | variableBlue = variableBlue - 0.406 45 | 46 | variableRed = variableRed / 0.229 47 | variableGreen = variableGreen / 0.224 48 | variableBlue = variableBlue / 0.225 49 | 50 | return torch.cat([variableRed, variableGreen, variableBlue], 1) 51 | 52 | class Basic(torch.nn.Module): 53 | def __init__(self, intLevel, arguments_strModel): 54 | super(Basic, self).__init__() 55 | self.intLevel = intLevel 56 | 57 | self.moduleBasic = torch.nn.Sequential( 58 | torch.nn.Conv2d(in_channels=8, out_channels=32, kernel_size=7, stride=1, padding=3), 59 | torch.nn.ReLU(inplace=False), 60 | torch.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=7, stride=1, padding=3), 61 | torch.nn.ReLU(inplace=False), 62 | torch.nn.Conv2d(in_channels=64, out_channels=32, kernel_size=7, stride=1, padding=3), 63 | torch.nn.ReLU(inplace=False), 64 | torch.nn.Conv2d(in_channels=32, out_channels=16, kernel_size=7, stride=1, padding=3), 65 | torch.nn.ReLU(inplace=False), 66 | torch.nn.Conv2d(in_channels=16, out_channels=2, kernel_size=7, stride=1, padding=3) 67 | ) 68 | 69 | if intLevel == 5: 70 | if arguments_strModel == '3' or arguments_strModel == '4': 71 | intLevel = 4 # the models trained on the flying chairs dataset do not come with weights for the sixth layer 72 | 73 | for intConv in range(5): 74 | self.moduleBasic[intConv * 2].weight.data.copy_(torch.load('models/spynet_models/modelL' + str(intLevel + 1) + '_' + arguments_strModel + '-' + str(intConv + 1) + '-weight.pth.tar')) 75 | self.moduleBasic[intConv * 2].bias.data.copy_(torch.load('models/spynet_models/modelL' + str(intLevel + 1) + '_' + arguments_strModel + '-' + str(intConv + 1) + '-bias.pth.tar')) 76 | 77 | def forward(self, variableInput): 78 | return self.moduleBasic(variableInput) 79 | 80 | class Backward(torch.nn.Module): 81 | def __init__(self): 82 | super(Backward, self).__init__() 83 | 84 | def forward(self, variableInput, variableFlow): 85 | if hasattr(self, 'tensorGrid') == False or self.tensorGrid.size(0) != variableInput.size(0) or self.tensorGrid.size(2) != variableInput.size(2) or self.tensorGrid.size(3) != variableInput.size(3): 86 | torchHorizontal = torch.linspace(-1.0, 1.0, variableInput.size(3)).view(1, 1, 1, variableInput.size(3)).expand(variableInput.size(0), 1, variableInput.size(2), variableInput.size(3)) 87 | torchVertical = torch.linspace(-1.0, 1.0, variableInput.size(2)).view(1, 1, variableInput.size(2), 1).expand(variableInput.size(0), 1, variableInput.size(2), variableInput.size(3)) 88 | 89 | self.tensorGrid = torch.cat([ torchHorizontal, torchVertical ], 1).cuda() 90 | # end 91 | 92 | variableFlow = torch.cat([ variableFlow[:, 0:1, :, :] / ((variableInput.size(3) - 1.0) / 2.0), variableFlow[:, 1:2, :, :] / ((variableInput.size(2) - 1.0) / 2.0) ], 1) 93 | 94 | variableGrid = torch.autograd.Variable(data=self.tensorGrid) + variableFlow 95 | 96 | return torch.nn.functional.grid_sample(input=variableInput, grid=variableGrid.clamp(-1.0, 1.0).permute(0, 2, 3, 1), mode='bilinear') 97 | 98 | class Network(torch.nn.Module): 99 | """ 100 | Creates SpyNet model for estimating optical flow. 101 | If images passed 102 | TODO: 103 | """ 104 | def __init__(self, nlevels, strmodel='F', pre_normalization=None, pretrained=True): 105 | super(Network, self).__init__() 106 | print('Creating Spynet with', nlevels, 'levels') 107 | self.nlevels = nlevels 108 | self.strmodel = strmodel 109 | self.pre_normalization = pre_normalization 110 | self.pretrained = pretrained 111 | 112 | self.modulePreprocess = Preprocess(pre_normalization=pre_normalization) 113 | self.moduleBasic = torch.nn.ModuleList([ Basic(intLevel, strmodel) for intLevel in range(nlevels) ]) 114 | self.moduleBackward = Backward() 115 | 116 | if not self.pretrained: 117 | for m in self.modules(): 118 | if isinstance(m, torch.nn.Conv2d): 119 | if m.bias is not None: 120 | init.uniform(m.bias) 121 | init.xavier_uniform(m.weight) 122 | 123 | 124 | def forward(self, variableFirst, variableSecond): 125 | variableAllFlows = [ 0 for i in range(self.nlevels)] 126 | 127 | variableFirst = [ self.modulePreprocess(variableFirst) ] 128 | variableSecond = [ self.modulePreprocess(variableSecond) ] 129 | 130 | for intLevel in range(self.nlevels-1): 131 | #if variableFirst[0].size(2) > 32 or variableFirst[0].size(3) > 32: 132 | # print('downsample', intLevel) 133 | variableFirst.insert(0, torch.nn.functional.avg_pool2d(input=variableFirst[0], kernel_size=2, stride=2)) 134 | variableSecond.insert(0, torch.nn.functional.avg_pool2d(input=variableSecond[0], kernel_size=2, stride=2)) 135 | # end 136 | # end 137 | 138 | variableFlow = torch.autograd.Variable(data=torch.zeros(variableFirst[0].size(0), 2, int(math.floor(variableFirst[0].size(2) / 2.0)), int(math.floor(variableFirst[0].size(3) / 2.0))).cuda()) 139 | 140 | for intLevel in range(len(variableFirst)): 141 | variableUpsampled = torch.nn.functional.upsample(input=variableFlow, scale_factor=2, mode='bilinear') * 2.0 142 | 143 | if variableUpsampled.size(2) != variableFirst[intLevel].size(2): variableUpsampled = torch.nn.functional.pad(variableUpsampled, [0, 0, 0, 1], 'replicate') 144 | if variableUpsampled.size(3) != variableFirst[intLevel].size(3): variableUpsampled = torch.nn.functional.pad(variableUpsampled, [0, 1, 0, 0], 'replicate') 145 | 146 | variableFlow = self.moduleBasic[intLevel](torch.cat([ variableFirst[intLevel], self.moduleBackward(variableSecond[intLevel], variableUpsampled), variableUpsampled ], 1)) + variableUpsampled 147 | variableAllFlows[self.nlevels-intLevel-1] = variableFlow 148 | # end 149 | if self.training: 150 | return variableAllFlows 151 | else: 152 | return variableFlow 153 | -------------------------------------------------------------------------------- /models/spynet_models/README.md: -------------------------------------------------------------------------------- 1 | These weights originate from the models trained by the original authors: https://github.com/anuragranj/spynet -------------------------------------------------------------------------------- /models/spynet_models/convert2pth.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import torch.utils.serialization 3 | 4 | files = glob.glob('*.t7') 5 | 6 | for f in files: 7 | weight = torch.utils.serialization.load_lua(f) 8 | torch.save(weight, f[:-3]+'.pth.tar') 9 | print('saving', f) 10 | -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_3-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_3-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_4-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_4-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL1_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL1_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_3-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_3-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_4-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_4-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL2_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL2_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_3-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_3-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_4-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_4-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL3_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL3_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_3-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_3-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_4-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_4-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL4_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL4_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_3-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_3-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_4-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_4-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL5_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL5_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_C-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_C-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_F-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_F-5-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-1-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-1-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-1-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-1-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-2-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-2-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-2-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-2-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-3-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-3-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-3-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-3-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-4-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-4-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-4-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-4-weight.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-5-bias.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-5-bias.pth.tar -------------------------------------------------------------------------------- /models/spynet_models/modelL6_K-5-weight.pth.tar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/models/spynet_models/modelL6_K-5-weight.pth.tar -------------------------------------------------------------------------------- /models/submodules.py: -------------------------------------------------------------------------------- 1 | 2 | import torch.nn as nn 3 | import torch 4 | import numpy as np 5 | try: 6 | from spatial_correlation_sampler import spatial_correlation_sample 7 | except ImportError as e: 8 | import warnings 9 | with warnings.catch_warnings(): 10 | warnings.filterwarnings("default", category=ImportWarning) 11 | warnings.warn("failed to load custom correlation module" 12 | "which is needed for FlowNetC", ImportWarning) 13 | 14 | def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1): 15 | if batchNorm: 16 | return nn.Sequential( 17 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False), 18 | nn.BatchNorm2d(out_planes), 19 | nn.LeakyReLU(0.1,inplace=True) 20 | ) 21 | else: 22 | return nn.Sequential( 23 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True), 24 | nn.LeakyReLU(0.1,inplace=True) 25 | ) 26 | 27 | def i_conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1, bias = True): 28 | if batchNorm: 29 | return nn.Sequential( 30 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias), 31 | nn.BatchNorm2d(out_planes), 32 | ) 33 | else: 34 | return nn.Sequential( 35 | nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=bias), 36 | ) 37 | 38 | def predict_flow(in_planes): 39 | return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=True) 40 | 41 | def deconv(in_planes, out_planes): 42 | return nn.Sequential( 43 | nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=True), 44 | nn.LeakyReLU(0.1,inplace=True) 45 | ) 46 | 47 | class tofp16(nn.Module): 48 | def __init__(self): 49 | super(tofp16, self).__init__() 50 | 51 | def forward(self, input): 52 | return input.half() 53 | 54 | 55 | class tofp32(nn.Module): 56 | def __init__(self): 57 | super(tofp32, self).__init__() 58 | 59 | def forward(self, input): 60 | return input.float() 61 | 62 | 63 | def init_deconv_bilinear(weight): 64 | f_shape = weight.size() 65 | heigh, width = f_shape[-2], f_shape[-1] 66 | f = np.ceil(width/2.0) 67 | c = (2 * f - 1 - f % 2) / (2.0 * f) 68 | bilinear = np.zeros([heigh, width]) 69 | for x in range(width): 70 | for y in range(heigh): 71 | value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) 72 | bilinear[x, y] = value 73 | weight.data.fill_(0.) 74 | for i in range(f_shape[0]): 75 | for j in range(f_shape[1]): 76 | weight.data[i,j,:,:] = torch.from_numpy(bilinear) 77 | 78 | 79 | def save_grad(grads, name): 80 | def hook(grad): 81 | grads[name] = grad 82 | return hook 83 | 84 | def correlate(input1, input2): 85 | out_corr = spatial_correlation_sample(input1, 86 | input2, 87 | kernel_size=1, 88 | patch_size=21, 89 | stride=1, 90 | padding=0, 91 | dilation_patch=2) 92 | # collate dimensions 1 and 2 in order to be treated as a 93 | # regular 4D tensor 94 | b, ph, pw, h, w = out_corr.size() 95 | out_corr = out_corr.view(b, ph * pw, h, w)/input1.size(1) 96 | return out_corr 97 | 98 | ''' 99 | def save_grad(grads, name): 100 | def hook(grad): 101 | grads[name] = grad 102 | return hook 103 | import torch 104 | from channelnorm_package.modules.channelnorm import ChannelNorm 105 | model = ChannelNorm().cuda() 106 | grads = {} 107 | a = 100*torch.autograd.Variable(torch.randn((1,3,5,5)).cuda(), requires_grad=True) 108 | a.register_hook(save_grad(grads, 'a')) 109 | b = model(a) 110 | y = torch.mean(b) 111 | y.backward() 112 | 113 | ''' 114 | -------------------------------------------------------------------------------- /patches/Upatch1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/patches/Upatch1.png -------------------------------------------------------------------------------- /patches/Upatch2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/anuragranj/flowattack/34a17f5c4a877b1b49c4cdd30961000fe5072baf/patches/Upatch2.png -------------------------------------------------------------------------------- /print_patch.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import numpy as np 3 | import torch 4 | from PIL import Image 5 | from utils import * 6 | parser = argparse.ArgumentParser(description='Adversarial attacks on Optical Flow Networks', 7 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 8 | parser.add_argument('--patch_path', dest='patch_path', default='', 9 | help='path to dataset') 10 | parser.add_argument('--scale', dest='scale',type=int, default=8, 11 | help='resize scale') 12 | parser.add_argument('--output_path', dest='output_path', default='results', 13 | help='output dir') 14 | parser.add_argument('--output_name', dest='output_name', default='this_is_your_patch', 15 | help='output dir') 16 | 17 | def main(): 18 | global args 19 | args = parser.parse_args() 20 | patch = torch.Tensor(torch.load(args.patch_path)) 21 | patch_clamped = torch.clamp(patch, -1., 1.) 22 | patch_im = tensor2array(patch_clamped[0])*255. 23 | 24 | # make background white 25 | mask = createCircularMask(patch_im.shape[0], patch_im.shape[1]).astype('float32') 26 | mask = np.stack((mask,mask,mask), axis=-1) 27 | patch_im = (1-mask) * np.ones(patch_im.shape)*255 + mask * patch_im 28 | 29 | patch_im = Image.fromarray(patch_im.astype('uint8')) 30 | sz = patch_im.size 31 | patch_im = patch_im.resize((sz[0]*args.scale, sz[1]*args.scale)) 32 | patch_im.save('%s/%s.jpg' % (args.output_path, args.output_name)) 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | torchvision 2 | joblib 3 | tqdm 4 | path.py 5 | pillow 6 | msgpack 7 | spatial-correlation-sampler 8 | scipy 9 | blessings 10 | progressbar2 11 | tensorboardX 12 | pypng 13 | scikit-image 14 | opencv-python 15 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import division 2 | import shutil 3 | import numpy as np 4 | import os 5 | import sys 6 | import time 7 | import math 8 | 9 | import torch 10 | import torch.nn as nn 11 | import torch.nn.init as init 12 | from torch.autograd import Variable 13 | 14 | from scipy.ndimage.interpolation import rotate, zoom 15 | from PIL import Image 16 | 17 | def load_as_float(path): 18 | return np.array(Image.open(path)).astype(np.float32) 19 | 20 | def imresize(arr, sz): 21 | height, width = sz 22 | return np.array(Image.fromarray(arr.astype('uint8')).resize((width, height), resample=Image.BILINEAR)) 23 | 24 | def tensor2array(tensor, max_value=255, colormap='rainbow'): 25 | if max_value is None: 26 | max_value = tensor.max() 27 | if tensor.ndimension() == 2 or tensor.size(0) == 1: 28 | try: 29 | import cv2 30 | if cv2.__version__.startswith('3'): 31 | color_cvt = cv2.COLOR_BGR2RGB 32 | else: # 2.4 33 | color_cvt = cv2.cv.CV_BGR2RGB 34 | if colormap == 'rainbow': 35 | colormap = cv2.COLORMAP_RAINBOW 36 | elif colormap == 'bone': 37 | colormap = cv2.COLORMAP_BONE 38 | array = (255*tensor.squeeze().numpy()/max_value).clip(0, 255).astype(np.uint8) 39 | colored_array = cv2.applyColorMap(array, colormap) 40 | array = cv2.cvtColor(colored_array, color_cvt).astype(np.float32)/255 41 | except ImportError: 42 | if tensor.ndimension() == 2: 43 | tensor.unsqueeze_(2) 44 | array = (tensor.expand(tensor.size(0), tensor.size(1), 3).numpy()/max_value).clip(0,1) 45 | 46 | elif tensor.ndimension() == 3: 47 | if (tensor.size(0) == 3): 48 | array = 0.5 + tensor.numpy().transpose(1, 2, 0)*0.5 49 | elif (tensor.size(0) == 2): 50 | array = tensor.numpy().transpose(1, 2, 0) 51 | return array 52 | 53 | def transpose_image(array): 54 | return array.transpose(2, 0, 1) 55 | 56 | 57 | def save_checkpoint(save_path, dispnet_state, exp_pose_state, flownet_state, optimizer_state, is_best, filename='checkpoint.pth.tar'): 58 | file_prefixes = ['dispnet', 'exp_pose', 'flownet', 'optimizer'] 59 | states = [dispnet_state, exp_pose_state, flownet_state, optimizer_state] 60 | for (prefix, state) in zip(file_prefixes, states): 61 | torch.save(state, save_path/'{}_{}'.format(prefix,filename)) 62 | 63 | if is_best: 64 | for prefix in file_prefixes: 65 | shutil.copyfile(save_path/'{}_{}'.format(prefix,filename), save_path/'{}_model_best.pth.tar'.format(prefix)) 66 | 67 | def submatrix(arr): 68 | x, y = np.nonzero(arr) 69 | # Using the smallest and largest x and y indices of nonzero elements, 70 | # we can find the desired rectangular bounds. 71 | # And don't forget to add 1 to the top bound to avoid the fencepost problem. 72 | return arr[x.min():x.max()+1, y.min():y.max()+1] 73 | 74 | def crop_patch(patch): 75 | pass 76 | 77 | 78 | class ToSpaceBGR(object): 79 | def __init__(self, is_bgr): 80 | self.is_bgr = is_bgr 81 | def __call__(self, tensor): 82 | if self.is_bgr: 83 | new_tensor = tensor.clone() 84 | new_tensor[0] = tensor[2] 85 | new_tensor[2] = tensor[0] 86 | tensor = new_tensor 87 | return tensor 88 | 89 | 90 | class ToRange255(object): 91 | def __init__(self, is_255): 92 | self.is_255 = is_255 93 | def __call__(self, tensor): 94 | if self.is_255: 95 | tensor.mul_(255) 96 | return tensor 97 | 98 | def createCircularMask(h, w, center=None, radius=None): 99 | 100 | if center is None: # use the middle of the image 101 | center = [int(w/2), int(h/2)] 102 | if radius is None: # use the smallest distance between the center and image walls 103 | radius = min(center[0], center[1], w-center[0], h-center[1])-2 104 | 105 | Y, X = np.ogrid[:h, :w] 106 | dist_from_center = np.sqrt((X - center[0])**2 + (Y-center[1])**2) 107 | 108 | mask = dist_from_center <= radius 109 | return mask 110 | 111 | def init_patch_circle(image_size, patch_size): 112 | patch, patch_shape = init_patch_square(image_size, patch_size) 113 | mask = createCircularMask(patch_shape[-2], patch_shape[-1]).astype('float32') 114 | mask = np.array([[mask,mask,mask]]) 115 | return patch, mask, patch.shape 116 | 117 | 118 | def circle_transform(patch, mask, patch_init, data_shape, patch_shape, margin=0, center=False, norotate=False, fixed_loc=(-1,-1)): 119 | # get dummy image 120 | patch = patch + np.random.random()*0.1 - 0.05 121 | patch = np.clip(patch, 0.,1.) 122 | patch = patch*mask 123 | x = np.zeros(data_shape) 124 | xm = np.zeros(data_shape) 125 | xp = np.zeros(data_shape) 126 | 127 | # get shape 128 | image_w, image_h = data_shape[-1], data_shape[-2] 129 | 130 | zoom_factor = 1 + 0.05*(np.random.random() - 0.5) 131 | patch = zoom(patch, zoom=(1,1,zoom_factor, zoom_factor), order=1) 132 | mask = zoom(mask, zoom=(1,1,zoom_factor, zoom_factor), order=0) 133 | patch_init = zoom(patch_init, zoom=(1,1,zoom_factor, zoom_factor), order=1) 134 | patch_shape = patch.shape 135 | m_size = patch.shape[-1] 136 | for i in range(x.shape[0]): 137 | # random rotation 138 | if not norotate: 139 | rot = 10*(np.random.random() - 0.5) 140 | for j in range(patch[i].shape[0]): 141 | patch[i][j] = rotate(patch[i][j], angle=rot, reshape=False, order=1) 142 | patch_init[i][j] = rotate(patch_init[i][j], angle=rot, reshape=False, order=1) 143 | 144 | # random location 145 | # random_x = 2*m_size + np.random.choice(image_w - 4*m_size -2) 146 | # random_x = m_size + np.random.choice(image_w - 2*m_size -2) 147 | if fixed_loc[0] < 0 or fixed_loc[1] < 0: 148 | if center: 149 | random_x = (image_w - m_size) // 2 150 | else: 151 | random_x = m_size + margin + np.random.choice(image_w - 2*m_size - 2*margin -2) 152 | assert(random_x + m_size < x.shape[-1]) 153 | # while random_x + m_size > x.shape[-1]: 154 | # random_x = np.random.choice(image_w - m_size - 1) 155 | # random_y = m_size + np.random.choice(image_h - 2*m_size -2) 156 | if center: 157 | random_y = (image_h - m_size) // 2 158 | else: 159 | random_y = m_size + np.random.choice(image_h - 2*m_size -2) 160 | assert(random_y + m_size < x.shape[-2]) 161 | # while random_y + m_size > x.shape[-2]: 162 | # random_y = np.random.choice(image_h) 163 | else: 164 | random_x = fixed_loc[0] 165 | random_y = fixed_loc[1] 166 | 167 | # apply patch to dummy image 168 | x[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][0] 169 | x[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][1] 170 | x[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][2] 171 | 172 | # apply mask to dummy image 173 | xm[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][0] 174 | xm[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][1] 175 | xm[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][2] 176 | 177 | # apply patch_init to dummy image 178 | xp[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][0] 179 | xp[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][1] 180 | xp[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][2] 181 | 182 | return x, xm, xp, random_x, random_y, patch_shape 183 | 184 | def init_patch_square(image_size, patch_size): 185 | # get mask 186 | # image_size = image_size**2 187 | noise_size = image_size*patch_size 188 | noise_dim = int(noise_size)#**(0.5)) 189 | patch = np.random.rand(1,3,noise_dim,noise_dim) 190 | return patch, patch.shape 191 | 192 | def init_patch_from_image(image_path, mask_path, image_size, patch_size): 193 | noise_size = np.floor(image_size*np.sqrt(patch_size)) 194 | patch_image = load_as_float(image_path) 195 | return patch, mask, patch.shape 196 | patch_image = imresize(patch_image, (int(noise_size), int(noise_size)))/128. -1 197 | patch = np.array([patch_image.transpose(2,0,1)]) 198 | 199 | mask_image = load_as_float(mask_path) 200 | mask_image = imresize(mask_image, (int(noise_size), int(noise_size)))/256. 201 | mask = np.array([mask_image.transpose(2,0,1)]) 202 | 203 | 204 | 205 | def square_transform(patch, mask, patch_init, data_shape, patch_shape, norotate=False): 206 | # get dummy image 207 | image_w, image_h = data_shape[-1], data_shape[-2] 208 | x = np.zeros(data_shape) 209 | xm = np.zeros(data_shape) 210 | xp = np.zeros(data_shape) 211 | # get shape 212 | m_size = patch_shape[-1] 213 | 214 | for i in range(x.shape[0]): 215 | 216 | # random rotation 217 | if not norotate: 218 | rot = np.random.choice(4) 219 | for j in range(patch[i].shape[0]): 220 | patch[i][j] = np.rot90(patch[i][j], rot) 221 | mask[i][j] = np.rot90(mask[i][j], rot) 222 | 223 | patch_init[i][j] = np.rot90(patch_init[i][j], rot) 224 | 225 | # random location 226 | random_x = np.random.choice(image_w-m_size-1) 227 | if random_x + m_size > x.shape[-1]: 228 | while random_x + m_size > x.shape[-1]: 229 | random_x = np.random.choice(image_w) 230 | random_y = np.random.choice(image_h-m_size-1) 231 | if random_y + m_size > x.shape[-2]: 232 | while random_y + m_size > x.shape[-2]: 233 | random_y = np.random.choice(image_h) 234 | 235 | # apply patch to dummy image 236 | x[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][0] 237 | x[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][1] 238 | x[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch[i][2] 239 | # apply mask to dummy image 240 | xm[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][0] 241 | xm[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][1] 242 | xm[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = mask[i][2] 243 | 244 | # apply patch_init to dummy image 245 | xp[i][0][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][0] 246 | xp[i][1][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][1] 247 | xp[i][2][random_y:random_y+patch_shape[-2], random_x:random_x+patch_shape[-1]] = patch_init[i][2] 248 | 249 | # mask = np.copy(x) 250 | # mask[mask != 0] = 1.0 251 | 252 | return x, xm, xp, random_x, random_y 253 | --------------------------------------------------------------------------------