├── .github └── workflows │ ├── ci.yml │ └── weekly.yml ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.rst ├── anncolvar └── __init__.py ├── bin └── anncolvar ├── data ├── reference.pdb ├── results_isomap └── traj_fit.xtc ├── docs └── html │ ├── installing.rst │ └── quickstart.rst ├── requirements.txt ├── setup.py └── tests ├── reference.pdb ├── results_isomap ├── test_it.py ├── test_it2.py ├── test_it3.py ├── test_it4.py ├── test_it5.py ├── test_it6.py ├── test_it7.py └── traj_fit.xtc /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Build 2 | 3 | on: [ push, pull_request ] 4 | 5 | env: 6 | CORE_CHANGED: false 7 | APP_CHANGED: false 8 | MKL_THREADING_LAYER: GNU 9 | PLUMED_CC: gcc 10 | PLUMED_CXX: g++ 11 | CUDA_VISIBLE_DEVICES: -1 12 | 13 | jobs: 14 | install-anncolvar: 15 | name: ${{ matrix.os }}, python ${{ matrix.python-version }}, ${{ matrix.keras-backend }} 16 | runs-on: ${{ matrix.os }} 17 | strategy: 18 | matrix: 19 | os: [ ubuntu-18.04, ubuntu-latest, macos-latest ] 20 | python-version: [ 3.7, 3.8 ] 21 | keras-backend: 22 | - KERAS_BACKEND=tensorflow CONFIG_FLAGS="--enable-debug --enable-debug-glibcxx" 23 | - KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile CONFIG_FLAGS="--enable-debug" 24 | 25 | steps: 26 | - uses: actions/checkout@v2 27 | with: 28 | python-version: ${{ matrix.python-version }} 29 | env: ${{ matrix.keras-backend }} 30 | 31 | - name: Download Miniconda for ${{ matrix.os }} and python 3.7 32 | if: contains( matrix.python-version, '3.7' ) 33 | run: | 34 | wget https://repo.continuum.io/miniconda/Miniconda3-py37_4.9.2-${{ startsWith(matrix.os, 'ubuntu' ) && 'Linux' || 'MacOSX' }}-x86_64.sh -O miniconda.sh 35 | 36 | - name: Download Miniconda for ${{ matrix.os }} and python 3.8 37 | if: contains( matrix.python-version, '3.8' ) 38 | run: | 39 | wget https://repo.continuum.io/miniconda/Miniconda3-py38_4.9.2-${{ startsWith(matrix.os, 'ubuntu' ) && 'Linux' || 'MacOSX' }}-x86_64.sh -O miniconda.sh 40 | 41 | # TODO: There are problems with conda env activation. With activated env, pip installations can be omitted probably 42 | - name: Install Miniconda for Ubuntu 43 | shell: bash 44 | if: ${{ startsWith( matrix.os , 'ubuntu') }} 45 | run: | 46 | bash miniconda.sh -b -p $HOME/miniconda 47 | echo "$HOME/miniconda/bin:$PATH" >> $GITHUB_PATH 48 | echo "$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH" >> $GITHUB_PATH 49 | conda config --set always_yes yes --set changeps1 no 50 | conda update -q conda 51 | conda info -a 52 | conda create -q -n test-environment python=${{ matrix.python-version }} pandas 53 | 54 | - name: Install Miniconda for MacOS 55 | shell: bash 56 | if: ${{ startsWith( matrix.os , 'macos') }} 57 | run: | 58 | /usr/bin/sudo chown -R runner:staff /usr/local/miniconda 59 | bash miniconda.sh -b -p $HOME/miniconda 60 | /usr/local/miniconda/condabin/conda init --all 61 | conda config --set always_yes yes --set changeps1 no 62 | conda install python=${{ matrix.python-version }} 63 | conda info -a 64 | conda create -q -n test-environment python=${{ matrix.python-version }} pandas 65 | 66 | - name: Install dependencies for Ubuntu 67 | if: ${{ startsWith( matrix.os , 'ubuntu') }} 68 | run: | 69 | python --version 70 | python -m pip install --upgrade pip 71 | pip install tensorflow 72 | conda install numpy nose cython h5py theano tqdm pytest pytest-cov 73 | conda install -c conda-forge mdtraj mkl mkl-service 74 | pip install -e .[tests] 75 | 76 | - name: Install dependencies for MacOS 77 | if: ${{ startsWith( matrix.os , 'macos') }} 78 | run: | 79 | python3 --version 80 | python3 -m pip install --upgrade pip 81 | pip3 install pytest pytest-cov tensorflow 82 | conda install numpy nose cython h5py theano tqdm 83 | conda install -c conda-forge mdtraj mkl mkl-service 84 | pip3 install -e .[tests] 85 | 86 | - name: Install Plumed 2 and run tests 87 | run: | 88 | git clone https://github.com/plumed/plumed2.git 89 | cd plumed2 90 | ./configure CXX=$(which $PLUMED_CXX) CC=$(which $PLUMED_CC) --prefix="$HOME/opt" --enable-modules=annfunc 91 | make -j 4 92 | make install 93 | source sourceme.sh 94 | plumed -h 95 | cd .. 96 | python3 -c "import keras.backend" 97 | sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json; 98 | echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)" 99 | KERAS_BACKEND=$KERAS_BACKEND pytest --cov=anncolvar tests/ 100 | 101 | -------------------------------------------------------------------------------- /.github/workflows/weekly.yml: -------------------------------------------------------------------------------- 1 | name: Weekly build 2 | on: 3 | schedule: 4 | - cron: "0 10 * * 1" 5 | 6 | env: 7 | CORE_CHANGED: false 8 | APP_CHANGED: false 9 | MKL_THREADING_LAYER: GNU 10 | PLUMED_CC: gcc 11 | PLUMED_CXX: g++ 12 | CUDA_VISIBLE_DEVICES: -1 13 | 14 | jobs: 15 | install-anncolvar: 16 | name: ${{ matrix.os }}, python ${{ matrix.python-version }}, ${{ matrix.keras-backend }} 17 | runs-on: ${{ matrix.os }} 18 | strategy: 19 | matrix: 20 | os: [ ubuntu-18.04, ubuntu-latest, macos-latest ] 21 | python-version: [ 3.7, 3.8 ] 22 | keras-backend: 23 | - KERAS_BACKEND=tensorflow CONFIG_FLAGS="--enable-debug --enable-debug-glibcxx" 24 | - KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile CONFIG_FLAGS="--enable-debug" 25 | 26 | steps: 27 | - uses: actions/checkout@v2 28 | with: 29 | python-version: ${{ matrix.python-version }} 30 | env: ${{ matrix.keras-backend }} 31 | 32 | - name: Download Miniconda for ${{ matrix.os }} and python 3.7 33 | if: contains( matrix.python-version, '3.7' ) 34 | run: | 35 | wget https://repo.continuum.io/miniconda/Miniconda3-py37_4.9.2-${{ startsWith(matrix.os, 'ubuntu' ) && 'Linux' || 'MacOSX' }}-x86_64.sh -O miniconda.sh 36 | 37 | - name: Download Miniconda for ${{ matrix.os }} and python 3.8 38 | if: contains( matrix.python-version, '3.8' ) 39 | run: | 40 | wget https://repo.continuum.io/miniconda/Miniconda3-py38_4.9.2-${{ startsWith(matrix.os, 'ubuntu' ) && 'Linux' || 'MacOSX' }}-x86_64.sh -O miniconda.sh 41 | 42 | # TODO: There are problems with conda env activation. With activated env, pip installations can be omitted probably 43 | - name: Install Miniconda for Ubuntu 44 | shell: bash 45 | if: ${{ startsWith( matrix.os , 'ubuntu') }} 46 | run: | 47 | bash miniconda.sh -b -p $HOME/miniconda 48 | echo "$HOME/miniconda/bin:$PATH" >> $GITHUB_PATH 49 | echo "$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH" >> $GITHUB_PATH 50 | conda config --set always_yes yes --set changeps1 no 51 | conda update -q conda 52 | conda info -a 53 | conda create -q -n test-environment python=${{ matrix.python-version }} pandas 54 | 55 | - name: Install Miniconda for MacOS 56 | shell: bash 57 | if: ${{ startsWith( matrix.os , 'macos') }} 58 | run: | 59 | /usr/bin/sudo chown -R runner:staff /usr/local/miniconda 60 | bash miniconda.sh -b -p $HOME/miniconda 61 | /usr/local/miniconda/condabin/conda init --all 62 | conda config --set always_yes yes --set changeps1 no 63 | conda install python=${{ matrix.python-version }} 64 | conda info -a 65 | conda create -q -n test-environment python=${{ matrix.python-version }} pandas 66 | 67 | - name: Install dependencies for Ubuntu 68 | if: ${{ startsWith( matrix.os , 'ubuntu') }} 69 | run: | 70 | python --version 71 | python -m pip install --upgrade pip 72 | pip install tensorflow 73 | conda install numpy nose cython h5py theano tqdm pytest pytest-cov 74 | conda install -c conda-forge mdtraj mkl mkl-service 75 | pip install -e .[tests] 76 | 77 | - name: Install dependencies for MacOS 78 | if: ${{ startsWith( matrix.os , 'macos') }} 79 | run: | 80 | python3 --version 81 | python3 -m pip install --upgrade pip 82 | pip3 install pytest pytest-cov tensorflow 83 | conda install numpy nose cython h5py theano tqdm 84 | conda install -c conda-forge mdtraj mkl mkl-service 85 | pip3 install -e .[tests] 86 | 87 | - name: Install Plumed 2 and run tests 88 | run: | 89 | git clone https://github.com/plumed/plumed2.git 90 | cd plumed2 91 | ./configure CXX=$(which $PLUMED_CXX) CC=$(which $PLUMED_CC) --prefix="$HOME/opt" --enable-modules=annfunc 92 | make -j 4 93 | make install 94 | source sourceme.sh 95 | plumed -h 96 | cd .. 97 | python3 -c "import keras.backend" 98 | sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json; 99 | echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)" 100 | KERAS_BACKEND=$KERAS_BACKEND pytest --cov=anncolvar tests/ 101 | 102 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # modified from https://github.com/keras-team/keras/blob/master/.travis.yml 2 | sudo: required 3 | #dist: trusty 4 | language: python 5 | cache: ccache 6 | matrix: 7 | include: 8 | - python: 3.6 9 | env: KERAS_BACKEND=tensorflow PLUMED_CC=gcc PLUMED_CXX=g++ CONFIG_FLAGS="--enable-debug --enable-debug-glibcxx" 10 | - python: 3.6 11 | env: KERAS_BACKEND=theano THEANO_FLAGS=optimizer=fast_compile PLUMED_CC=gcc PLUMED_CXX=g++ CONFIG_FLAGS="--enable-debug" 12 | - python: 3.6 13 | env: KERAS_BACKEND=cntk PYTHONWARNINGS=ignore PLUMED_CC=gcc PLUMED_CXX=g++ CONFIG_FLAGS="--enable-debug --enable-debug-glibcxx" 14 | install: 15 | - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; 16 | - bash miniconda.sh -b -p $HOME/miniconda 17 | - export PATH="$HOME/miniconda/bin:$PATH" 18 | - hash -r 19 | - conda config --set always_yes yes --set changeps1 no 20 | - conda update -q conda 21 | - conda info -a 22 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION pytest-cov pandas 23 | - source activate test-environment 24 | - pip install --only-binary=numpy numpy nose cython h5py theano 25 | - conda install mkl mkl-service 26 | - export LD_LIBRARY_PATH=$HOME/miniconda/envs/test-environment/lib/:$LD_LIBRARY_PATH 27 | - conda install Pillow; 28 | - pip install argparse 29 | - pip install -e .[tests] 30 | - pip install tensorflow-cpu 31 | - pip install https://cntk.ai/PythonWheel/CPU-Only/cntk-2.7.post1-cp36-cp36m-linux_x86_64.whl; 32 | - export CORE_CHANGED=False 33 | - for entry in `git diff --name-only HEAD~1`; do if [[ "$entry" == "keras/backend/"* ]] || [[ "$entry" == "keras/engine/"* ]] || [[ "$entry" == "keras/layers/"* ]]; then export CORE_CHANGED=True; fi; done 34 | - export APP_CHANGED=False 35 | - for entry in `git diff --name-only HEAD~1`; do if [[ "$entry" == "keras/applications/"* ]]; then export APP_CHANGED=True; fi; done 36 | - rm -rf ~/mpi 37 | - mkdir ~/mpi 38 | - pushd ~/mpi 39 | - wget http://cntk.ai/PythonWheel/ForKeras/depends/openmpi_1.10-3.zip 40 | - unzip ./openmpi_1.10-3.zip 41 | - sudo dpkg -i openmpi_1.10-3.deb 42 | - popd 43 | - pip install codecov 44 | - ccache -s 45 | - git clone https://github.com/plumed/plumed2.git 46 | - cd plumed2 47 | - ./configure CXX="ccache $(which $PLUMED_CXX)" CC=$(which $PLUMED_CC) CXXFLAGS="$PLUMED_CXXFLAGS" LDFLAGS="$PLUMED_LDFLAGS" CPPFLAGS="$PLUMED_CPPFLAGS" $CONFIG_FLAGS --prefix="$HOME/opt" --enable-modules=annfunc 48 | - make -j 4 49 | - make install 50 | - cd .. 51 | - export PATH=$PATH:/home/travis/opt/bin/ 52 | - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/travis/opt/lib/ 53 | 54 | script: 55 | - export MKL_THREADING_LAYER="GNU" 56 | - python -c "import keras.backend" 57 | - sed -i -e 's/"backend":[[:space:]]*"[^"]*/"backend":\ "'$KERAS_BACKEND'/g' ~/.keras/keras.json; 58 | - echo -e "Running tests with the following config:\n$(cat ~/.keras/keras.json)" 59 | - KERAS_BACKEND=$KERAS_BACKEND PYTHONPATH=$PWD:$PYTHONPATH py.test --cov=anncolvar tests/; 60 | 61 | after_success: 62 | - bash <(curl -s https://codecov.io/bash) -t ${CODECOV_TOKEN} 63 | 64 | 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Vojtech Spiwok 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | |PyPI| |Anaconda| |BuildStatus| |WeeklyBuildStatus| |codecov| |lgtm| |lgtmpy| 2 | |DOI| |nest| 3 | 4 | Read more in 5 | D. Trapl, I. Horvaćanin, V. Mareška, F. Özçelik, G. Unal and V. Spiwok: anncolvar: Approximation of Complex Collective Variables by Artificial Neural Networks for Analysis and Biasing of Molecular Simulations *Front. Mol. Biosci.* 2019, **6**, 25 (doi: 10.3389/fmolb.2019.00025) 6 | 7 | ********* 8 | anncolvar 9 | ********* 10 | 11 | News 12 | ==== 13 | 14 | August 2020: Support for Python 2.7 terminated, use Python 3. 15 | 16 | Current master vsersion makes it possible to use ANN module of recent master version of Plumed. 17 | 18 | Syntax 19 | ====== 20 | 21 | Collective variables by artificial neural networks:: 22 | 23 | usage: anncolvar [-h] [-i INFILE] [-p INTOP] [-c COLVAR] [-col COL] 24 | [-boxx BOXX] [-boxy BOXY] [-boxz BOXZ] [-nofit NOFIT] 25 | [-testset TESTSET] [-shuffle SHUFFLE] [-layers LAYERS] 26 | [-layer1 LAYER1] [-layer2 LAYER2] [-layer3 LAYER3] 27 | [-actfun1 ACTFUN1] [-actfun2 ACTFUN2] [-actfun3 ACTFUN3] 28 | [-optim OPTIM] [-loss LOSS] [-epochs EPOCHS] [-batch BATCH] 29 | [-o OFILE] [-model MODELFILE] [-plumed PLUMEDFILE] 30 | [-plumed2 PLUMEDFILE2] 31 | 32 | Artificial neural network learning of collective variables of molecular 33 | systems, requires numpy, keras and mdtraj 34 | 35 | optional arguments: 36 | -h, --help show this help message and exit 37 | -i INFILE Input trajectory in pdb, xtc, trr, dcd, netcdf or mdcrd, 38 | WARNING: the trajectory must be 1. must contain only atoms 39 | to be analyzed, 2. must not contain any periodic boundary 40 | condition issues! 41 | -p INTOP Input topology in pdb, WARNING: the structure must be 1. 42 | centered in the PBC box and 2. must contain only atoms 43 | to be analyzed! 44 | -c COLVAR Input collective variable file in text format, must 45 | contain the same number of lines as frames in the 46 | trajectory 47 | -col COL The index of the column containing collective variables 48 | in the input collective variable file 49 | -boxx BOXX Size of x coordinate of PBC box (from 0 to set value in 50 | nm) 51 | -boxy BOXY Size of y coordinate of PBC box (from 0 to set value in 52 | nm) 53 | -boxz BOXZ Size of z coordinate of PBC box (from 0 to set value in 54 | nm) 55 | -nofit NOFIT Disable fitting, the trajectory must be properly fited 56 | (default False) 57 | -testset TESTSET Size of test set (fraction of the trajectory, default = 58 | 0.1) 59 | -shuffle SHUFFLE Shuffle trajectory frames to obtain training and test 60 | set (default True) 61 | -layers LAYERS Number of hidden layers (allowed values 1-3, default = 62 | 1) 63 | -layer1 LAYER1 Number of neurons in the first encoding layer (default = 64 | 256) 65 | -layer2 LAYER2 Number of neurons in the second encoding layer (default 66 | = 256) 67 | -layer3 LAYER3 Number of neurons in the third encoding layer (default = 68 | 256) 69 | -actfun1 ACTFUN1 Activation function of the first layer (default = 70 | sigmoid, for options see keras documentation) 71 | -actfun2 ACTFUN2 Activation function of the second layer (default = 72 | linear, for options see keras documentation) 73 | -actfun3 ACTFUN3 Activation function of the third layer (default = 74 | linear, for options see keras documentation) 75 | -optim OPTIM Optimizer (default = adam, for options see keras 76 | documentation) 77 | -loss LOSS Loss function (default = mean_squared_error, for options 78 | see keras documentation) 79 | -epochs EPOCHS Number of epochs (default = 100, >1000 may be necessary 80 | for real life applications) 81 | -batch BATCH Batch size (0 = no batches, default = 256) 82 | -o OFILE Output file with original and approximated collective 83 | variables (txt, default = no output) 84 | -model MODELFILE Prefix for output model files (experimental, default = 85 | no output) 86 | -plumed PLUMEDFILE Output file for Plumed (default = plumed.dat) 87 | -plumed2 PLUMEDFILE2 Output file for Plumed with ANN module (default = 88 | plumed2.dat) 89 | 90 | Introduction 91 | ============ 92 | 93 | Biased simulations, such as metadynamics, use a predefined set of parameters known 94 | as collective variables. An artificial bias force is applied on collective variables 95 | to enhance sampling. There are two conditions for a parameter to be applied as 96 | a collective variable. First, the value of the collective variables can be calculated 97 | solely from atomic coordinates. Second, the force acting on collective variables 98 | can be converted to the force acting on individual atoms. In the other words, it 99 | is possible to calculate the first derivative of the collective variables with 100 | respect to atomic coordinates. Both calculations must be fast enough, because 101 | they must be evaluated in every step of the simulation. 102 | 103 | There are many potential collective variables that cannot be easily calculated. 104 | It is possible to calculate the collective variable for hundreds or thousands of 105 | structures, but not for millions of structures (which is necessary for nanosecond 106 | long simulations). *anncolvar* can approximate such collective variables using 107 | a neural network. 108 | 109 | Installation 110 | ============ 111 | 112 | You have to chose and install one of keras backends, such as Tensorflow, Theano or 113 | CNTK. For this follow one of these links: 114 | 115 | - TensorFlow_ 116 | - Theano_ 117 | - CNTK_ (CNTK 2.7 is the last release since 2019) 118 | 119 | Install numpy and cython by PIP:: 120 | 121 | pip install numpy cython 122 | 123 | Next, install anncolvar by PIP:: 124 | 125 | pip install anncolvar 126 | 127 | If you use Anaconda type:: 128 | 129 | conda install -c spiwokv anncolvar 130 | 131 | Usage 132 | ===== 133 | 134 | A series of representative structures (hundreds or more) with pre-calculated values 135 | of the collective variable is used to train the neural network. The user can specify 136 | the input set of reference structures (*-i*) in the form of a trajectory in pdb, xtc, 137 | trr, dcd, netcdf or mdcrd. The trajectory must contain only atoms to be analyzed 138 | (for example only non-hydrogen atoms). The trajectory must not contain any periodic 139 | boundary condition issues. Both conversions can be made by molecular dynamics 140 | simulation packages, for example by *gmx trjconv*. It is not necessary to fit 141 | frames to a reference structure. It is possible to switch fitting off by 142 | *-nofit True*. 143 | 144 | It is necessary to supply an input topology in PDB. This is a structure used 145 | as a template for fitting. It is also used to define a box. This box must be large 146 | enough to fit the molecule in all frames of the trajectory. It should not be too 147 | large because this suppresses non-linearity in the neural network. When the user 148 | decides to use a 3x3x3 nm box it is necessary to place the molecule to be centered 149 | at coordinates (1.5,1.5,1.5) nm. In Gromacs it is possible to use:: 150 | 151 | gmx editconf -f mol.pdb -o reference.pdb -c -box 3 3 3 152 | 153 | It must also contain only atoms to be analyzed. Size of the box can be specified 154 | by parameters *-boxx*, *-boxy* and *-boxz* (in nm). 155 | 156 | Last input file is the collective variable file. It is a space-separated text 157 | file with the same number of lines as the number of frames in the input trajectory. 158 | The index of the column can be specified by *-col* (e.g. *-col 2* for the second 159 | column of the file. 160 | 161 | The option *-testset* can control the fraction of the trajectory used as 162 | the test set. For example *-testset 0.1* means that 10 % of input data is used 163 | as the test set and 90 % as the training set. The option *-shuffle True* causes 164 | that first 90 % is used as the training set and remaining 10 % as the test set. 165 | Otherwise frames are shuffled before separation to the training and test set. 166 | 167 | The architecture of the neural network is controlled by multiple parameters. 168 | The input layer contains 3N neurons (where N is the number of atoms). The number 169 | of hidden layers is controlled by *-layers*. This can be 1, 2 or 3. For higher 170 | number of layers contact the authors. Number of neurons in the first, second and 171 | third layer is controlled by *-layer1*, *-layer2* and *-layer3*. It is useful 172 | to use the number of layers equal to powers of 2 (32, 64, 128 etc.). Huge numbers 173 | of neurons can cause that the program is slow or run out of memory. Activation 174 | functions of neurons can be controlled by *-actfun1*, *-actfun2* and *-actfun3*. 175 | Any activation function supported by keras can be used. 176 | 177 | The optimizer used in the training process can be controlled by *-optim*. The 178 | default ADAM optimizer (*-optim adam*) works well. The loss function can be 179 | controlled by *-loss*. The default *-loss mean_squared_error* works well. The 180 | number of epochs can be controlled by *-epochs*. The default value (100) is 181 | quite little, usually >1000 is necessary for real life applications. The batch 182 | size can be controlled by *-batch* (*-batch 0* for no batches, default is 256). 183 | 184 | Output is written into the text file *-o*. It contains the approximated and 185 | the original values of collective variable. The model can be stored in the set 186 | of text files (try *-model*). The input file is printed into the file controlled 187 | by *-plumed* (by default plumed.dat). This file can be directly used to calculate 188 | the evolution of the collective variable by *plumed driver* or by Plumed-patched 189 | molecular dynamics engine. To use the collective variable in enhances sampling 190 | (for example metadynamics) it is necessary to add a suitable keyword (for example 191 | METAD). 192 | 193 | .. |PyPI| image:: https://img.shields.io/pypi/v/anncolvar.svg 194 | :target: https://pypi.org/project/anncolvar/ 195 | :alt: Latest version released on PyPI 196 | 197 | .. |Anaconda| image:: https://anaconda.org/spiwokv/anncolvar/badges/version.svg 198 | :target: https://anaconda.org/spiwokv/anncolvar 199 | :alt: Latest version released on Anaconda Cloud 200 | 201 | .. |BuildStatus| image:: https://github.com/spiwokv/anncolvar/actions/workflows/ci.yml/badge.svg 202 | :target: https://github.com/spiwokv/anncolvar/actions 203 | :alt: Build status of the master branch on Mac/Linux at Github Actions 204 | 205 | .. |WeeklyBuildStatus| image:: https://github.com/spiwokv/anncolvar/actions/workflows/weekly.yml/badge.svg 206 | :target: https://github.com/spiwokv/anncolvar/actions 207 | :alt: Weekly Monday 10 AM build status of the master branch on Mac/Linux at Github Actions 208 | 209 | .. |codecov| image:: https://codecov.io/gh/spiwokv/anncolvar/branch/master/graph/badge.svg 210 | :target: https://codecov.io/gh/spiwokv/anncolvar/ 211 | :alt: Code coverage 212 | 213 | .. |lgtm| image:: https://img.shields.io/lgtm/alerts/g/spiwokv/anncolvar.svg?logo=lgtm&logoWidth=18 214 | :target: https://lgtm.com/projects/g/spiwokv/anncolvar/alerts/ 215 | :alt: LGTM code alerts 216 | 217 | .. |lgtmpy| image:: https://img.shields.io/lgtm/grade/python/g/spiwokv/anncolvar.svg?logo=lgtm&logoWidth=18 218 | :target: https://lgtm.com/projects/g/spiwokv/anncolvar/context:python 219 | :alt: LGTM python quality 220 | 221 | .. |nest| image:: https://www.plumed-nest.org/eggs/19/008/badge.svg 222 | :target: https://www.plumed-nest.org/eggs/19/008/ 223 | :alt: Plumed Nest ID: 008 224 | 225 | .. |DOI| image:: https://zenodo.org/badge/DOI/10.3389/fmolb.2019.00025.svg 226 | :target: https://doi.org/10.3389/fmolb.2019.00025 227 | :alt: DOI: 10.3389/fmolb.2019.00025 228 | 229 | .. _TensorFlow: https://www.tensorflow.org/install/ 230 | 231 | .. _Theano: http://deeplearning.net/software/theano/install.html 232 | 233 | .. _CNTK: https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine 234 | 235 | 236 | -------------------------------------------------------------------------------- /anncolvar/__init__.py: -------------------------------------------------------------------------------- 1 | name = "anncolvar" 2 | 3 | # Loading necessary libraries 4 | libnames = [('mdtraj', 'md'), ('numpy', 'np'), ('keras', 'krs'), ('argparse', 'arg'), ('datetime', 'dt'), ('sys', 'sys')] 5 | 6 | for (name, short) in libnames: 7 | try: 8 | lib = __import__(name) 9 | except ImportError: 10 | print("Library %s cannot be loaded, exiting" % name) 11 | exit(0) 12 | else: 13 | globals()[short] = lib 14 | 15 | def anncollectivevariable(infilename='', intopname='', colvarname='', column=2, 16 | boxx=0.0, boxy=0.0, boxz=0.0, atestset=0.1, 17 | shuffle=1, nofit=0, layers=1, layer1=256, layer2=256, layer3=256, 18 | actfun1='sigmoid', actfun2='sigmoid', actfun3='sigmoid', 19 | optim='adam', loss='mean_squared_error', epochs=100, batch=0, 20 | ofilename='', modelfile='', plumedfile='', plumedfile2=''): 21 | try: 22 | print("Loading trajectory") 23 | refpdb = md.load_pdb(intopname) 24 | traj = md.load(infilename, top=intopname) 25 | print("Fitting trajectory") 26 | if nofit==0: 27 | traj.superpose(refpdb) 28 | except IOError: 29 | print("Cannot load %s or %s, exiting." % (infilename, intopname)) 30 | exit(0) 31 | else: 32 | print("%s succesfully loaded and fitted" % traj) 33 | print("") 34 | 35 | # Conversion of the trajectory from Nframes x Natoms x 3 to Nframes x (Natoms x 3) 36 | trajsize = traj.xyz.shape 37 | traj2 = np.zeros((trajsize[0], trajsize[1]*3)) 38 | for i in range(trajsize[1]): 39 | traj2[:,3*i] = traj.xyz[:,i,0] 40 | traj2[:,3*i+1] = traj.xyz[:,i,1] 41 | traj2[:,3*i+2] = traj.xyz[:,i,2] 42 | 43 | # Checking whether all atoms fit the box 44 | if (np.amin(traj2)) < 0.0: 45 | print("ERROR: Some of atom has negative coordinate (i.e. it is outside the box)") 46 | exit(0) 47 | 48 | if boxx == 0.0 or boxy == 0.0 or boxz == 0.0: 49 | print("WARNING: box size not set, it will be determined automatically") 50 | if boxx == 0.0: 51 | boxx = 1.2*np.amax(traj.xyz[:,:,0]) 52 | if boxy == 0.0: 53 | boxy = 1.2*np.amax(traj.xyz[:,:,1]) 54 | if boxz == 0.0: 55 | boxz = 1.2*np.amax(traj.xyz[:,:,2]) 56 | print("box size set to %6.3f x %6.3f x %6.3f nm" % (boxx, boxy, boxz)) 57 | print("") 58 | 59 | if np.amax(traj.xyz[:,:,0]) > boxx or np.amax(traj.xyz[:,:,1]) > boxy or np.amax(traj.xyz[:,:,2]) > boxz: 60 | print("ERROR: Some of atom has coordinate higher than box size (i.e. it is outside the box)") 61 | exit(0) 62 | 63 | if boxx > 2.0*np.amax(traj.xyz[:,:,0]) or boxy > 2.0*np.amax(traj.xyz[:,:,1]) or boxz > 2.0*np.amax(traj.xyz[:,:,2]): 64 | print("WARNING: Box size is bigger than 2x of highest coordinate,") 65 | print("maybe the box is too big or the molecule is not centered") 66 | 67 | maxbox = max([boxx, boxy, boxz]) 68 | 69 | # Checking colvar file 70 | try: 71 | cvfile = open(colvarname, 'r').readlines() 72 | except IOError: 73 | print("Cannot load %s, exiting." % colvarname) 74 | exit(0) 75 | cvs = [] 76 | for line in cvfile: 77 | sline = str.split(line) 78 | if len(sline) > 1: 79 | if sline[0][0]!="#": 80 | if len(sline) >= column: 81 | try: 82 | cvs.append(float(sline[column-1])) 83 | except ValueError: 84 | print("Cannot read %s." % colvarname) 85 | exit(0) 86 | if len(cvs) != trajsize[0]: 87 | print("File %s contains %i values, but %s contains %i frames, exiting." % (colvarname, len(cvs), infilename, trajsize[0])) 88 | exit(0) 89 | cvs = np.array(cvs) 90 | 91 | # Splitting the trajectory into training and testing sets 92 | testsize = int(atestset * trajsize[0]) 93 | if testsize < 1: 94 | print("ERROR: testset empty, increase testsize") 95 | exit(0) 96 | print("Training and test sets consist of %i and %i trajectory frames, respectively" % (trajsize[0]-testsize, testsize)) 97 | print("") 98 | 99 | # Shuffling the trajectory before splitting 100 | if shuffle == 1: 101 | print("Trajectory will be shuffled before splitting into training and test set") 102 | elif shuffle == 0: 103 | print("Trajectory will NOT be shuffled before splitting into training and test set") 104 | print("(first %i frames will be used for trainintg, next %i for testing)" % (trajsize[0]-testsize, testsize)) 105 | indexes = list(range(trajsize[0])) 106 | if shuffle == 1: 107 | np.random.shuffle(indexes) 108 | training_set, testing_set = traj2[indexes[:-testsize],:]/maxbox, traj2[indexes[-testsize:],:]/maxbox 109 | training_cvs, testing_cvs = cvs[indexes[:-testsize]], cvs[indexes[-testsize:]] 110 | 111 | # (Deep) learning 112 | input_coord = krs.layers.Input(shape=(trajsize[1]*3,)) 113 | encoded = krs.layers.Dense(layer1, activation=actfun1, use_bias=True)(input_coord) 114 | if layers == 3: 115 | encoded = krs.layers.Dense(layer2, activation=actfun2, use_bias=True)(encoded) 116 | encoded = krs.layers.Dense(layer3, activation=actfun3, use_bias=True)(encoded) 117 | if layers == 2: 118 | encoded = krs.layers.Dense(layer2, activation=actfun2, use_bias=True)(encoded) 119 | encoded = krs.layers.Dense(1, activation='linear', use_bias=True)(encoded) 120 | codecvs = krs.models.Model(input_coord, encoded) 121 | codecvs.compile(optimizer=optim, loss=loss) 122 | 123 | if batch>0: 124 | codecvs.fit(training_set, training_cvs, 125 | epochs=epochs, 126 | batch_size=batch, 127 | validation_data=(testing_set, testing_cvs)) 128 | else: 129 | codecvs.fit(training_set, training_cvs, 130 | epochs=epochs, 131 | validation_data=(testing_set, testing_cvs)) 132 | 133 | # Encoding and decoding the trajectory 134 | coded_cvs = codecvs.predict(traj2/maxbox) 135 | 136 | # Calculating Pearson correlation coefficient 137 | print("") 138 | print("Pearson correlation coefficient for original and coded cvs is %f" % np.corrcoef(cvs,coded_cvs[:,0])[0,1]) 139 | print("") 140 | 141 | print("Pearson correlation coefficient for original and coded cvs in training set is %f" % np.corrcoef(training_cvs,coded_cvs[indexes[:-testsize],0])[0,1]) 142 | print("") 143 | 144 | print("Pearson correlation coefficient for original and coded cvs in testing set is %f" % np.corrcoef(testing_cvs,coded_cvs[indexes[-testsize:],0])[0,1]) 145 | print("") 146 | 147 | # Generating low-dimensional output 148 | if len(ofilename) > 0: 149 | print("Writing collective variables into %s" % ofilename) 150 | print("") 151 | ofile = open(ofilename, "w") 152 | for i in range(trajsize[0]): 153 | ofile.write("%f %f " % (coded_cvs[i],cvs[i])) 154 | typeofset = 'TE' 155 | if i in indexes[:-testsize]: 156 | typeofset = 'TR' 157 | ofile.write("%s \n" % typeofset) 158 | ofile.close() 159 | 160 | # Saving the model 161 | if modelfile != '': 162 | print("Writing model into %s.txt" % modelfile) 163 | print("") 164 | ofile = open(modelfile+'.txt', "w") 165 | ofile.write("maxbox = %f\n" % maxbox) 166 | ofile.write("input_coord = krs.layers.Input(shape=(trajsize[1]*3,))\n") 167 | ofile.write("encoded = krs.layers.Dense(%i, activation='%s', use_bias=True)(input_coord)\n" % (layer1, actfun1)) 168 | if layers == 3: 169 | ofile.write("encoded = krs.layers.Dense(%i, activation='%s', use_bias=True)(encoded)\n" % (layer2, actfun2)) 170 | ofile.write("encoded = krs.layers.Dense(%i, activation='%s', use_bias=True)(encoded)\n" % (layer3, actfun3)) 171 | if layers == 2: 172 | ofile.write("encoded = krs.layers.Dense(%i, activation='%s', use_bias=True)(encoded)\n" % (layer2, actfun2)) 173 | ofile.write("encoded = krs.layers.Dense(1, activation='linear', use_bias=True)(encoded)\n") 174 | ofile.write("codecvs = krs.models.Model(input_coord, encoded)\n") 175 | ofile.close() 176 | print("Writing model weights and biases into %s_*.npy NumPy arrays" % modelfile) 177 | print("") 178 | if layers == 1: 179 | np.save(file=modelfile+"_1.npy", arr=codecvs.layers[1].get_weights()) 180 | np.save(file=modelfile+"_2.npy", arr=codecvs.layers[2].get_weights()) 181 | if layers == 2: 182 | np.save(file=modelfile+"_1.npy", arr=codecvs.layers[1].get_weights()) 183 | np.save(file=modelfile+"_2.npy", arr=codecvs.layers[2].get_weights()) 184 | np.save(file=modelfile+"_3.npy", arr=codecvs.layers[3].get_weights()) 185 | else: 186 | np.save(file=modelfile+"_1.npy", arr=codecvs.layers[1].get_weights()) 187 | np.save(file=modelfile+"_2.npy", arr=codecvs.layers[2].get_weights()) 188 | np.save(file=modelfile+"_3.npy", arr=codecvs.layers[3].get_weights()) 189 | np.save(file=modelfile+"_4.npy", arr=codecvs.layers[4].get_weights()) 190 | 191 | if plumedfile != '': 192 | print("Writing Plumed <=2.5.3 input into %s" % plumedfile) 193 | print("") 194 | traj = md.load(infilename, top=intopname) 195 | table, bonds = traj.topology.to_dataframe() 196 | atoms = table['serial'][:] 197 | ofile = open(plumedfile, "w") 198 | ofile.write("WHOLEMOLECULES ENTITY0=1-%i\n" % np.max(atoms)) 199 | ofile.write("FIT_TO_TEMPLATE STRIDE=1 REFERENCE=%s TYPE=OPTIMAL\n" % intopname) 200 | for i in range(trajsize[1]): 201 | ofile.write("p%i: POSITION ATOM=%i NOPBC\n" % (i+1,atoms[i])) 202 | for i in range(trajsize[1]): 203 | ofile.write("p%ix: COMBINE ARG=p%i.x COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 204 | ofile.write("p%iy: COMBINE ARG=p%i.y COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 205 | ofile.write("p%iz: COMBINE ARG=p%i.z COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 206 | if layers==1: 207 | for i in range(layer1): 208 | toprint = "l1_%i: COMBINE ARG=" % (i+1) 209 | for j in range(trajsize[1]): 210 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 211 | toprint = toprint[:-1] + " COEFFICIENTS=" 212 | for j in range(3*trajsize[1]): 213 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 214 | toprint = toprint[:-1] + " PERIODIC=NO\n" 215 | ofile.write(toprint) 216 | for i in range(layer1): 217 | onebias = codecvs.layers[1].get_weights()[1][i] 218 | if onebias>0.0: 219 | if actfun1 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 220 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 221 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 222 | elif actfun1 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 223 | elif actfun1 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 224 | elif actfun1 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 225 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 226 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 227 | elif actfun1 == 'linear': printfun = "(x+%0.6f)" % (onebias) 228 | else: 229 | if actfun1 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 230 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 231 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 232 | elif actfun1 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 233 | elif actfun1 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 234 | elif actfun1 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 235 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 236 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 237 | elif actfun1 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 238 | ofile.write("l1r_%i: MATHEVAL ARG=l1_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 239 | toprint = "l2: COMBINE ARG=" 240 | for j in range(layer1): 241 | toprint = toprint + "l1r_%i," % (j+1) 242 | toprint = toprint[:-1] + " COEFFICIENTS=" 243 | for j in range(layer1): 244 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j]) 245 | toprint = toprint[:-1] + " PERIODIC=NO\n" 246 | ofile.write(toprint) 247 | if codecvs.layers[2].get_weights()[1][0]>0.0: 248 | ofile.write("l2r: MATHEVAL ARG=l2 FUNC=(x+%0.6f) PERIODIC=NO\n" % (codecvs.layers[2].get_weights()[1][0])) 249 | else: 250 | ofile.write("l2r: MATHEVAL ARG=l2 FUNC=(x-%0.6f) PERIODIC=NO\n" % (-codecvs.layers[2].get_weights()[1][0])) 251 | toprint = "PRINT ARG=l2r STRIDE=100 FILE=COLVAR\n" 252 | ofile.write(toprint) 253 | if layers==2: 254 | for i in range(layer1): 255 | toprint = "l1_%i: COMBINE ARG=" % (i+1) 256 | for j in range(trajsize[1]): 257 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 258 | toprint = toprint[:-1] + " COEFFICIENTS=" 259 | for j in range(3*trajsize[1]): 260 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 261 | toprint = toprint[:-1] + " PERIODIC=NO\n" 262 | ofile.write(toprint) 263 | for i in range(layer1): 264 | onebias = codecvs.layers[1].get_weights()[1][i] 265 | if onebias>0.0: 266 | if actfun1 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 267 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 268 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 269 | elif actfun1 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 270 | elif actfun1 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 271 | elif actfun1 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 272 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 273 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 274 | elif actfun1 == 'linear': printfun = "(x+%0.6f)" % (onebias) 275 | else: 276 | if actfun1 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 277 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 278 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 279 | elif actfun1 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 280 | elif actfun1 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 281 | elif actfun1 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 282 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 283 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 284 | elif actfun1 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 285 | ofile.write("l1r_%i: MATHEVAL ARG=l1_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 286 | for i in range(layer2): 287 | toprint = "l2_%i: COMBINE ARG=" % (i+1) 288 | for j in range(layer1): 289 | toprint = toprint + "l1r_%i," % (j+1) 290 | toprint = toprint[:-1] + " COEFFICIENTS=" 291 | for j in range(layer1): 292 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j,i]) 293 | toprint = toprint[:-1] + " PERIODIC=NO\n" 294 | ofile.write(toprint) 295 | for i in range(layer2): 296 | onebias = codecvs.layers[2].get_weights()[1][i] 297 | if onebias>0.0: 298 | if actfun2 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 299 | elif actfun2 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 300 | elif actfun2 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 301 | elif actfun2 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 302 | elif actfun2 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 303 | elif actfun2 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 304 | elif actfun2 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 305 | elif actfun2 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 306 | elif actfun2 == 'linear': printfun = "(x+%0.6f)" % (onebias) 307 | else: 308 | if actfun2 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 309 | elif actfun2 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 310 | elif actfun2 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 311 | elif actfun2 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 312 | elif actfun2 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 313 | elif actfun2 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 314 | elif actfun2 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 315 | elif actfun2 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 316 | elif actfun2 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 317 | ofile.write("l2r_%i: MATHEVAL ARG=l2_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 318 | toprint = "l3: COMBINE ARG=" 319 | for j in range(layer2): 320 | toprint = toprint + "l2r_%i," % (j+1) 321 | toprint = toprint[:-1] + " COEFFICIENTS=" 322 | for j in range(layer2): 323 | toprint = toprint + "%0.6f," % (codecvs.layers[3].get_weights()[0][j]) 324 | toprint = toprint[:-1] + " PERIODIC=NO\n" 325 | ofile.write(toprint) 326 | if codecvs.layers[3].get_weights()[1][0]>0.0: 327 | ofile.write("l3r: MATHEVAL ARG=l3 FUNC=(x+%0.6f) PERIODIC=NO\n" % (codecvs.layers[3].get_weights()[1][0])) 328 | else: 329 | ofile.write("l3r: MATHEVAL ARG=l3 FUNC=(x-%0.6f) PERIODIC=NO\n" % (-codecvs.layers[3].get_weights()[1][0])) 330 | toprint = "PRINT ARG=l3r STRIDE=100 FILE=COLVAR\n" 331 | ofile.write(toprint) 332 | if layers==3: 333 | for i in range(layer1): 334 | toprint = "l1_%i: COMBINE ARG=" % (i+1) 335 | for j in range(trajsize[1]): 336 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 337 | toprint = toprint[:-1] + " COEFFICIENTS=" 338 | for j in range(3*trajsize[1]): 339 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 340 | toprint = toprint[:-1] + " PERIODIC=NO\n" 341 | ofile.write(toprint) 342 | for i in range(layer1): 343 | onebias = codecvs.layers[1].get_weights()[1][i] 344 | if onebias>0.0: 345 | if actfun1 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 346 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 347 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 348 | elif actfun1 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 349 | elif actfun1 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 350 | elif actfun1 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 351 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 352 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 353 | elif actfun1 == 'linear': printfun = "(x+%0.6f)" % (onebias) 354 | else: 355 | if actfun1 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 356 | elif actfun1 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 357 | elif actfun1 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 358 | elif actfun1 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 359 | elif actfun1 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 360 | elif actfun1 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 361 | elif actfun1 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 362 | elif actfun1 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 363 | elif actfun1 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 364 | ofile.write("l1r_%i: MATHEVAL ARG=l1_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 365 | for i in range(layer2): 366 | toprint = "l2_%i: COMBINE ARG=" % (i+1) 367 | for j in range(layer1): 368 | toprint = toprint + "l1r_%i," % (j+1) 369 | toprint = toprint[:-1] + " COEFFICIENTS=" 370 | for j in range(layer1): 371 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j,i]) 372 | toprint = toprint[:-1] + " PERIODIC=NO\n" 373 | ofile.write(toprint) 374 | for i in range(layer2): 375 | onebias = codecvs.layers[2].get_weights()[1][i] 376 | if onebias>0.0: 377 | if actfun2 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 378 | elif actfun2 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 379 | elif actfun2 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 380 | elif actfun2 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 381 | elif actfun2 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 382 | elif actfun2 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 383 | elif actfun2 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 384 | elif actfun2 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 385 | elif actfun2 == 'linear': printfun = "(x+%0.6f)" % (onebias) 386 | else: 387 | if actfun2 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 388 | elif actfun2 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 389 | elif actfun2 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 390 | elif actfun2 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 391 | elif actfun2 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 392 | elif actfun2 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 393 | elif actfun2 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 394 | elif actfun2 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 395 | elif actfun2 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 396 | ofile.write("l2r_%i: MATHEVAL ARG=l2_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 397 | for i in range(layer3): 398 | toprint = "l3_%i: COMBINE ARG=" % (i+1) 399 | for j in range(layer2): 400 | toprint = toprint + "l2r_%i," % (j+1) 401 | toprint = toprint[:-1] + " COEFFICIENTS=" 402 | for j in range(layer2): 403 | toprint = toprint + "%0.6f," % (codecvs.layers[3].get_weights()[0][j,i]) 404 | toprint = toprint[:-1] + " PERIODIC=NO\n" 405 | ofile.write(toprint) 406 | for i in range(layer3): 407 | onebias = codecvs.layers[3].get_weights()[1][i] 408 | if onebias>0.0: 409 | if actfun3 == 'elu': printfun = "(exp(x+%0.6f)-1.0)*step(-x-%0.6f)+(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 410 | elif actfun3 == 'selu': printfun = "1.0507*(1.67326*exp(x+%0.6f)-1.67326)*step(-x-%0.6f)+1.0507*(x+%0.6f)*step(x+%0.6f)" % (onebias,onebias,onebias,onebias) 411 | elif actfun3 == 'softplus': printfun = "log(1.0+exp(x+%0.6f))" % (onebias) 412 | elif actfun3 == 'softsign': printfun = "(x+%0.6f)/(1.0+step(x+%0.6f)*(x+%0.6f)+step(-x-%0.6f)*(-x-%0.6f))" % (onebias,onebias,onebias,onebias,onebias) 413 | elif actfun3 == 'relu': printfun = "step(x+%0.6f)*(x+%0.6f)" % (onebias,onebias) 414 | elif actfun3 == 'tanh': printfun = "(exp(x+%0.6f)-exp(-x-%0.6f))/(exp(x+%0.6f)+exp(-x-%0.6f))" % (onebias,onebias,onebias,onebias) 415 | elif actfun3 == 'sigmoid': printfun = "1.0/(1.0+exp(-x-%0.6f))" % (onebias) 416 | elif actfun3 == 'hard_sigmoid': printfun = "step(x+2.5+%0.6f)*((0.2*(x+%0.6f)+0.5)-step(x-2.5+%0.6f)*(0.2*(x+%0.6f)-0.5))" % (onebias,onebias,onebias,onebias) 417 | elif actfun3 == 'linear': printfun = "(x+%0.6f)" % (onebias) 418 | else: 419 | if actfun3 == 'elu': printfun = "(exp(x-%0.6f)-1.0)*step(-x+%0.6f)+(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 420 | elif actfun3 == 'selu': printfun = "1.0507*(1.67326*exp(x-%0.6f)-1.67326)*step(-x+%0.6f)+1.0507*(x-%0.6f)*step(x-%0.6f)" % (-onebias,-onebias,-onebias,-onebias) 421 | elif actfun3 == 'softplus': printfun = "log(1.0+exp(x-%0.6f))" % (-onebias) 422 | elif actfun3 == 'softsign': printfun = "(x-%0.6f)/(1.0+step(x-%0.6f)*(x-%0.6f)+step(-x+%0.6f)*(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias,-onebias) 423 | elif actfun3 == 'relu': printfun = "step(x-%0.6f)*(x-%0.6f)" % (-onebias,-onebias) 424 | elif actfun3 == 'tanh': printfun = "(exp(x-%0.6f)-exp(-x+%0.6f))/(exp(x-%0.6f)+exp(-x+%0.6f))" % (-onebias,-onebias,-onebias,-onebias) 425 | elif actfun3 == 'sigmoid': printfun = "1.0/(1.0+exp(-x+%0.6f))" % (-onebias) 426 | elif actfun3 == 'hard_sigmoid': printfun = "step(x+2.5-%0.6f)*((0.2*(x-%0.6f)+0.5)-step(x-2.5-%0.6f)*(0.2*(x-%0.6f)-0.5))" % (-onebias,-onebias,-onebias,-onebias) 427 | elif actfun3 == 'linear': printfun = "(x-%0.6f)" % (-onebias) 428 | ofile.write("l3r_%i: MATHEVAL ARG=l3_%i FUNC=%s PERIODIC=NO\n" % (i+1,i+1,printfun)) 429 | #for i in range(encdim): 430 | toprint = "l4: COMBINE ARG=" 431 | for j in range(layer3): 432 | toprint = toprint + "l3r_%i," % (j+1) 433 | toprint = toprint[:-1] + " COEFFICIENTS=" 434 | for j in range(layer3): 435 | toprint = toprint + "%0.6f," % (codecvs.layers[4].get_weights()[0][j]) 436 | toprint = toprint[:-1] + " PERIODIC=NO\n" 437 | ofile.write(toprint) 438 | #for i in range(encdim): 439 | if codecvs.layers[4].get_weights()[1][0]>0.0: 440 | ofile.write("l4r: MATHEVAL ARG=l4 FUNC=(x+%0.6f) PERIODIC=NO\n" % (codecvs.layers[4].get_weights()[1][0])) 441 | else: 442 | ofile.write("l4r: MATHEVAL ARG=l4 FUNC=(x-%0.6f) PERIODIC=NO\n" % (-codecvs.layers[4].get_weights()[1][0])) 443 | toprint = "PRINT ARG=l4r STRIDE=100 FILE=COLVAR\n" 444 | ofile.write(toprint) 445 | ofile.close() 446 | 447 | if plumedfile2 != '': 448 | print("Writing Plumed >= 2.6 input into %s" % plumedfile2) 449 | print("") 450 | traj = md.load(infilename, top=intopname) 451 | table, bonds = traj.topology.to_dataframe() 452 | atoms = table['serial'][:] 453 | ofile = open(plumedfile2, "w") 454 | ofile.write("WHOLEMOLECULES ENTITY0=1-%i\n" % np.max(atoms)) 455 | ofile.write("FIT_TO_TEMPLATE STRIDE=1 REFERENCE=%s TYPE=OPTIMAL\n" % intopname) 456 | for i in range(trajsize[1]): 457 | ofile.write("p%i: POSITION ATOM=%i NOPBC\n" % (i+1,atoms[i])) 458 | for i in range(trajsize[1]): 459 | ofile.write("p%ix: COMBINE ARG=p%i.x COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 460 | ofile.write("p%iy: COMBINE ARG=p%i.y COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 461 | ofile.write("p%iz: COMBINE ARG=p%i.z COEFFICIENTS=%f PERIODIC=NO\n" % (i+1,i+1,1.0/maxbox)) 462 | if layers==1: 463 | ofile.write("ANN ...\n") 464 | ofile.write("LABEL=ann\n") 465 | toprint = "ARG=" 466 | for j in range(trajsize[1]): 467 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 468 | toprint = toprint[:-1] + "\n" 469 | ofile.write(toprint) 470 | ofile.write("NUM_LAYERS=3\n") 471 | ofile.write("NUM_NODES=%i,%i,1\n" % (3*trajsize[1],layer1)) 472 | if actfun1 == 'tanh': 473 | ofile.write("ACTIVATIONS=Tanh,Linear\n") 474 | else: 475 | print("ERROR: Only tanh activation function supported in ANN module") 476 | exit(0) 477 | toprint = "WEIGHTS0=" 478 | for i in range(layer1): 479 | for j in range(3*trajsize[1]): 480 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 481 | toprint = toprint[:-1] + "\n" 482 | ofile.write(toprint) 483 | toprint = "WEIGHTS1=" 484 | for j in range(layer1): 485 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j]) 486 | toprint = toprint[:-1] + "\n" 487 | ofile.write(toprint) 488 | toprint = "BIASES0=" 489 | for i in range(layer1): 490 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[1][i]) 491 | toprint = toprint[:-1] + "\n" 492 | ofile.write(toprint) 493 | toprint = "BIASES1=%0.6f\n" % (codecvs.layers[2].get_weights()[1][0]) 494 | ofile.write(toprint) 495 | ofile.write("... ANN\n") 496 | toprint = "PRINT ARG=ann.node-0 STRIDE=100 FILE=COLVAR\n" 497 | ofile.write(toprint) 498 | if layers==2: 499 | ofile.write("ANN ...\n") 500 | ofile.write("LABEL=ann\n") 501 | toprint = "ARG=" 502 | for j in range(trajsize[1]): 503 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 504 | toprint = toprint[:-1] + "\n" 505 | ofile.write(toprint) 506 | ofile.write("NUM_LAYERS=4\n") 507 | ofile.write("NUM_NODES=%i,%i,%i,1\n" % (3*trajsize[1],layer1,layer2)) 508 | if actfun1 == 'tanh' and actfun2 == 'tanh': 509 | ofile.write("ACTIVATIONS=Tanh,Tanh,Linear\n") 510 | else: 511 | print("ERROR: Only tanh activation function supported in ANN module") 512 | exit(0) 513 | toprint = "WEIGHTS0=" 514 | for i in range(layer1): 515 | for j in range(3*trajsize[1]): 516 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 517 | toprint = toprint[:-1] + "\n" 518 | ofile.write(toprint) 519 | toprint = "WEIGHTS1=" 520 | for i in range(layer2): 521 | for j in range(layer1): 522 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j,i]) 523 | toprint = toprint[:-1] + "\n" 524 | ofile.write(toprint) 525 | toprint = "WEIGHTS2=" 526 | #for i in range(layer3): 527 | for j in range(layer2): 528 | toprint = toprint + "%0.6f," % (codecvs.layers[3].get_weights()[0][j]) 529 | toprint = toprint[:-1] + "\n" 530 | ofile.write(toprint) 531 | toprint = "BIASES0=" 532 | for i in range(layer1): 533 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[1][i]) 534 | toprint = toprint[:-1] + "\n" 535 | ofile.write(toprint) 536 | toprint = "BIASES1=" 537 | for i in range(layer2): 538 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[1][i]) 539 | toprint = toprint[:-1] + "\n" 540 | ofile.write(toprint) 541 | toprint = "BIASES2=%0.6f\n" % (codecvs.layers[3].get_weights()[1][0]) 542 | ofile.write(toprint) 543 | ofile.write("... ANN\n") 544 | toprint = "PRINT ARG=ann.node-0 STRIDE=100 FILE=COLVAR\n" 545 | ofile.write(toprint) 546 | if layers==3: 547 | ofile.write("ANN ...\n") 548 | ofile.write("LABEL=ann\n") 549 | toprint = "ARG=" 550 | for j in range(trajsize[1]): 551 | toprint = toprint + "p%ix,p%iy,p%iz," % (j+1,j+1,j+1) 552 | toprint = toprint[:-1] + "\n" 553 | ofile.write(toprint) 554 | ofile.write("NUM_LAYERS=5\n") 555 | ofile.write("NUM_NODES=%i,%i,%i,%i,1\n" % (3*trajsize[1],layer1,layer2,layer3)) 556 | if actfun1 == 'tanh' and actfun2 == 'tanh' and actfun3 == 'tanh': 557 | ofile.write("ACTIVATIONS=Tanh,Tanh,Tanh,Linear\n") 558 | else: 559 | print("ERROR: Only tanh activation function supported in ANN module") 560 | exit(0) 561 | toprint = "WEIGHTS0=" 562 | for i in range(layer1): 563 | for j in range(3*trajsize[1]): 564 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[0][j,i]) 565 | toprint = toprint[:-1] + "\n" 566 | ofile.write(toprint) 567 | toprint = "WEIGHTS1=" 568 | for i in range(layer2): 569 | for j in range(layer1): 570 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[0][j,i]) 571 | toprint = toprint[:-1] + "\n" 572 | ofile.write(toprint) 573 | toprint = "WEIGHTS2=" 574 | for i in range(layer3): 575 | for j in range(layer2): 576 | toprint = toprint + "%0.6f," % (codecvs.layers[3].get_weights()[0][j,i]) 577 | toprint = toprint[:-1] + "\n" 578 | ofile.write(toprint) 579 | toprint = "WEIGHTS3=" 580 | #for i in range(layer4): 581 | for j in range(layer3): 582 | toprint = toprint + "%0.6f," % (codecvs.layers[4].get_weights()[0][j]) 583 | toprint = toprint[:-1] + "\n" 584 | ofile.write(toprint) 585 | toprint = "BIASES0=" 586 | for i in range(layer1): 587 | toprint = toprint + "%0.6f," % (codecvs.layers[1].get_weights()[1][i]) 588 | toprint = toprint[:-1] + "\n" 589 | ofile.write(toprint) 590 | toprint = "BIASES1=" 591 | for i in range(layer2): 592 | toprint = toprint + "%0.6f," % (codecvs.layers[2].get_weights()[1][i]) 593 | toprint = toprint[:-1] + "\n" 594 | ofile.write(toprint) 595 | toprint = "BIASES2=" 596 | for i in range(layer3): 597 | toprint = toprint + "%0.6f," % (codecvs.layers[3].get_weights()[1][i]) 598 | toprint = toprint[:-1] + "\n" 599 | ofile.write(toprint) 600 | toprint = "BIASES3=%0.6f\n" % (codecvs.layers[4].get_weights()[1][0]) 601 | ofile.write(toprint) 602 | ofile.write("... ANN\n") 603 | toprint = "PRINT ARG=ann.node-0 STRIDE=100 FILE=COLVAR\n" 604 | ofile.write(toprint) 605 | ofile.close() 606 | return codecvs, np.corrcoef(cvs,coded_cvs[:,0])[0,1] 607 | 608 | -------------------------------------------------------------------------------- /bin/anncolvar: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | libnames = [('mdtraj', 'md'), ('numpy', 'np'), ('keras', 'krs'), ('tensorflow', 'tf'), ('argparse', 'arg'), ('datetime', 'dt'), ('sys', 'sys')] 4 | 5 | for (name, short) in libnames: 6 | try: 7 | lib = __import__(name) 8 | except ImportError: 9 | print("Library %s cannot be loaded, exiting" % name) 10 | exit(0) 11 | else: 12 | globals()[short] = lib 13 | 14 | import anncolvar 15 | 16 | # Parsing command line arguments 17 | parser = arg.ArgumentParser(description='Artificial neural network learning of collective variables of molecular systems, requires numpy, keras and mdtraj') 18 | 19 | parser.add_argument('-i', dest='infile', default='traj.xtc', 20 | help='Input trajectory in pdb, xtc, trr, dcd, netcdf or mdcrd, WARNING: the trajectory must be 1. centered in the PBC box, 2. fitted to a reference structure and 3. must contain only atoms to be analysed!') 21 | 22 | parser.add_argument('-p', dest='intop', default='top.pdb', 23 | help='Input topology in pdb, WARNING: the structure must be 1. centered in the PBC box and 2. must contain only atoms to be analysed!') 24 | 25 | parser.add_argument('-c', dest='colvar', default='colvar.txt', 26 | help='Input collective variable file in text formate, must contain the same number of lines as frames in the trajectory') 27 | 28 | parser.add_argument('-col', dest='col', default=2, type=int, 29 | help='The index of the column containing collective variables in the input collective variable file') 30 | 31 | parser.add_argument('-boxx', dest='boxx', default=0.0, type=float, 32 | help='Size of x coordinate of PBC box (from 0 to set value in nm)') 33 | 34 | parser.add_argument('-boxy', dest='boxy', default=0.0, type=float, 35 | help='Size of y coordinate of PBC box (from 0 to set value in nm)') 36 | 37 | parser.add_argument('-boxz', dest='boxz', default=0.0, type=float, 38 | help='Size of z coordinate of PBC box (from 0 to set value in nm)') 39 | 40 | parser.add_argument('-nofit', dest='nofit', default='False', 41 | help='Disable fitting, the trajectory must be properly fited (default False)') 42 | 43 | parser.add_argument('-testset', dest='testset', default=0.10, type=float, 44 | help='Size of test set (fraction of the trajectory, default = 0.1)') 45 | 46 | parser.add_argument('-shuffle', dest='shuffle', default='True', 47 | help='Shuffle trajectory frames to obtain training and test set (default True)') 48 | 49 | parser.add_argument('-layers', dest='layers', default=1, type=int, 50 | help='Number of hidden layers (allowed values 1-3, default = 1)') 51 | 52 | parser.add_argument('-layer1', dest='layer1', default=256, type=int, 53 | help='Number of neurons in the first encoding layer (default = 256)') 54 | 55 | parser.add_argument('-layer2', dest='layer2', default=256, type=int, 56 | help='Number of neurons in the second encoding layer (default = 256)') 57 | 58 | parser.add_argument('-layer3', dest='layer3', default=256, type=int, 59 | help='Number of neurons in the third encoding layer (default = 256)') 60 | 61 | parser.add_argument('-actfun1', dest='actfun1', default='sigmoid', 62 | help='Activation function of the first layer (default = sigmoid, for options see keras documentation)') 63 | 64 | parser.add_argument('-actfun2', dest='actfun2', default='linear', 65 | help='Activation function of the second layer (default = linear, for options see keras documentation)') 66 | 67 | parser.add_argument('-actfun3', dest='actfun3', default='linear', 68 | help='Activation function of the third layer (default = linear, for options see keras documentation)') 69 | 70 | parser.add_argument('-optim', dest='optim', default='adam', 71 | help='Optimizer (default = adam, for options see keras documentation)') 72 | 73 | parser.add_argument('-loss', dest='loss', default='mean_squared_error', 74 | help='Loss function (default = mean_squared_error, for options see keras documentation)') 75 | 76 | parser.add_argument('-epochs', dest='epochs', default=100, type=int, 77 | help='Number of epochs (default = 100, >1000 may be necessary for real life applications)') 78 | 79 | parser.add_argument('-batch', dest='batch', default=256, type=int, 80 | help='Batch size (0 = no batches, default = 256)') 81 | 82 | parser.add_argument('-o', dest='ofile', default='', 83 | help='Output file with original and approximated collective variables (txt, default = no output)') 84 | 85 | parser.add_argument('-model', dest='modelfile', default='', 86 | help='Prefix for output model files (experimental, default = no output)') 87 | 88 | parser.add_argument('-plumed', dest='plumedfile', default='plumed.dat', 89 | help='Output file for Plumed (default = plumed.dat)') 90 | 91 | parser.add_argument('-plumed2', dest='plumedfile2', default='plumed2.dat', 92 | help='Output file for Plumed with ANN module (default = plumed2.dat)') 93 | 94 | args = parser.parse_args() 95 | 96 | infilename = args.infile 97 | intopname = args.intop 98 | colvarname = args.colvar 99 | column = args.col 100 | boxx = args.boxx 101 | boxy = args.boxy 102 | boxz = args.boxz 103 | if args.testset < 0.0 or args.testset > 0.5: 104 | print("ERROR: -testset must be 0.0 - 0.5") 105 | exit(0) 106 | atestset = float(args.testset) 107 | 108 | # Shuffling the trajectory before splitting 109 | if args.shuffle == "True": 110 | shuffle = 1 111 | elif args.shuffle == "False": 112 | shuffle = 0 113 | else: 114 | print("ERROR: -shuffle %s not understood" % args.shuffle) 115 | exit(0) 116 | if args.nofit == "True": 117 | nofit = 1 118 | elif args.nofit == "False": 119 | nofit = 0 120 | else: 121 | print("ERROR: -nofit %s not understood" % args.nofit) 122 | exit(0) 123 | if args.layers < 1 or args.layers > 3: 124 | print("ERROR: -layers must be 1-3, for deeper learning contact authors") 125 | exit(0) 126 | if args.layer1 > 1024: 127 | print("WARNING: You plan to use %i neurons in the first layer, could be slow") 128 | if args.layers == 2: 129 | if args.layer2 > 1024: 130 | print("WARNING: You plan to use %i neurons in the second layer, could be slow") 131 | if args.layers == 3: 132 | if args.layer3 > 1024: 133 | print("WARNING: You plan to use %i neurons in the third layer, could be slow") 134 | if args.actfun1 not in ['softmax','elu','selu','softplus','softsign','relu','tanh','sigmoid','hard_sigmoid','linear']: 135 | print("ERROR: cannot understand -actfun1 %s" % args.actfun1) 136 | exit(0) 137 | if args.layers == 2: 138 | if args.actfun2 not in ['softmax','elu','selu','softplus','softsign','relu','tanh','sigmoid','hard_sigmoid','linear']: 139 | print("ERROR: cannot understand -actfun2 %s" % args.actfun1) 140 | exit(0) 141 | if args.layers == 3: 142 | if args.actfun3 not in ['softmax','elu','selu','softplus','softsign','relu','tanh','sigmoid','hard_sigmoid','linear']: 143 | print("ERROR: cannot understand -actfun3 %s" % args.actfun3) 144 | exit(0) 145 | if args.layers == 1 and args.actfun2!='linear': 146 | print("ERROR: actfun2 must be linear for -layers 1") 147 | exit(0) 148 | if args.layers == 2 and args.actfun3!='linear': 149 | print("ERROR: actfun3 must be linear for -layers 2") 150 | exit(0) 151 | layers = args.layers 152 | layer1 = args.layer1 153 | layer2 = args.layer2 154 | layer3 = args.layer3 155 | actfun1 = args.actfun1 156 | actfun2 = args.actfun2 157 | actfun3 = args.actfun3 158 | epochs = args.epochs 159 | optim = args.optim 160 | batch = args.batch 161 | loss = args.loss 162 | if args.ofile[-4:] == '.txt': 163 | ofilename = args.ofile 164 | elif len(args.ofile)>0: 165 | ofilename = args.ofile + '.txt' 166 | else: 167 | ofilename = '' 168 | modelfile = args.modelfile 169 | plumedfile = args.plumedfile 170 | plumedfile2 = args.plumedfile2 171 | if plumedfile[-4:] != '.dat': 172 | plumedfile = plumedfile + '.dat' 173 | if plumedfile2[-4:] != '.dat': 174 | plumedfile2 = plumedfile2 + '.dat' 175 | if plumedfile2 !="": 176 | if layers == 1: 177 | if actfun1 !="tanh": 178 | print("ERROR: only tanh and linear functions are currently supported in ANN module of Plumed") 179 | exit(0) 180 | elif layers == 2: 181 | if actfun1 !="tanh" or actfun2 !="tanh": 182 | print("ERROR: only tanh and linear functions are currently supported in ANN module of Plumed") 183 | exit(0) 184 | elif layers == 3: 185 | if actfun1 !="tanh" or actfun2 !="tanh" or actfun3 !="tanh": 186 | print("ERROR: only tanh and linear functions are currently supported in ANN module of Plumed") 187 | exit(0) 188 | anncolvar.anncollectivevariable(infilename, intopname, colvarname, column, 189 | boxx, boxy, boxz, atestset, shuffle, nofit, 190 | layers, layer1, layer2, layer3, 191 | actfun1, actfun2, actfun3, 192 | optim, loss, epochs, batch, 193 | ofilename, modelfile, plumedfile, plumedfile2) 194 | 195 | 196 | -------------------------------------------------------------------------------- /data/reference.pdb: -------------------------------------------------------------------------------- 1 | TITLE Great Red Owns Many ACres of Sand 2 | REMARK THIS IS A SIMULATION BOX 3 | CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1 1 4 | MODEL 1 5 | ATOM 1 C1 MOL 1 6.464 5.717 4.333 1.00 1.00 C 6 | ATOM 2 C2 MOL 1 6.654 4.285 4.806 1.00 1.00 C 7 | ATOM 3 C3 MOL 1 5.529 3.740 5.671 1.00 1.00 C 8 | ATOM 4 C4 MOL 1 4.301 3.271 4.908 1.00 1.00 C 9 | ATOM 5 C5 MOL 1 3.178 4.290 4.808 1.00 1.00 C 10 | ATOM 6 C6 MOL 1 3.621 5.702 4.462 1.00 1.00 C 11 | ATOM 7 C7 MOL 1 4.343 6.440 5.577 1.00 1.00 C 12 | ATOM 8 C8 MOL 1 5.830 6.652 5.351 1.00 1.00 C 13 | ATOM 9 F1 MOL 1 7.737 6.221 4.054 1.00 1.00 F 14 | ATOM 10 H1 MOL 1 5.935 5.733 3.367 1.00 1.00 H 15 | ATOM 11 H2 MOL 1 6.839 3.618 3.942 1.00 1.00 H 16 | ATOM 12 H3 MOL 1 7.591 4.243 5.400 1.00 1.00 H 17 | ATOM 13 F2 MOL 1 6.074 2.616 6.277 1.00 1.00 F 18 | ATOM 14 H4 MOL 1 5.272 4.405 6.503 1.00 1.00 H 19 | ATOM 15 H5 MOL 1 4.603 2.953 3.893 1.00 1.00 H 20 | ATOM 16 F4 MOL 1 3.756 2.149 5.537 1.00 1.00 F 21 | ATOM 17 H6 MOL 1 2.445 3.935 4.050 1.00 1.00 H 22 | ATOM 18 H7 MOL 1 2.618 4.311 5.768 1.00 1.00 H 23 | ATOM 19 H8 MOL 1 4.136 5.727 3.485 1.00 1.00 H 24 | ATOM 20 H9 MOL 1 2.681 6.274 4.264 1.00 1.00 H 25 | ATOM 21 H10 MOL 1 3.880 7.452 5.678 1.00 1.00 H 26 | ATOM 22 H11 MOL 1 4.140 5.967 6.562 1.00 1.00 H 27 | ATOM 23 H12 MOL 1 6.382 6.597 6.314 1.00 1.00 H 28 | ATOM 24 H13 MOL 1 5.983 7.694 4.990 1.00 1.00 H 29 | TER 30 | ENDMDL 31 | -------------------------------------------------------------------------------- /data/traj_fit.xtc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spiwokv/anncolvar/353aac87c27b66bbdec69c5077332b9a9400b010/data/traj_fit.xtc -------------------------------------------------------------------------------- /docs/html/installing.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | 5 | Prerequisite 6 | ------------- 7 | 8 | Anncolvar requires one of three machine learning beckends, either 9 | Tensorflow, Theano or CNTK (all tested by continuous integration 10 | services). Chose one of beckends and install by following these 11 | sites: 12 | 13 | `TensorFlow`_ 14 | 15 | `Theano`_ 16 | 17 | `CNTK`_ 18 | 19 | 20 | Installing with pip 21 | ------------------- 22 | 23 | To install with pip, run the following:: 24 | 25 | pip install anncolvar 26 | 27 | 28 | Installing with pip from GitHub 29 | ------------------------------- 30 | 31 | To install the master version from GitHub with pip, run the following:: 32 | 33 | git clone https://github.com/spiwokv/anncolvar.git 34 | 35 | cd anncolvar 36 | 37 | pip install . 38 | 39 | 40 | Upgrading 41 | --------- 42 | 43 | To upgrade, type:: 44 | 45 | pip install -U pip 46 | 47 | 48 | Compatibility 49 | ------------- 50 | 51 | Anncolvar requires Python libraries sys, datetime, argparse, numpy, mdtraj and keras. 52 | Keras must run on one of three backends: Tensorflow, Theano or CNTK. 53 | 54 | .. _TensorFlow: https://www.tensorflow.org/install/ 55 | 56 | .. _Theano: http://deeplearning.net/software/theano/install.html 57 | 58 | .. _CNTK: https://docs.microsoft.com/en-us/cognitive-toolkit/setup-cntk-on-your-machine 59 | 60 | -------------------------------------------------------------------------------- /docs/html/quickstart.rst: -------------------------------------------------------------------------------- 1 | Quickstart 2 | ========== 3 | 4 | For help type:: 5 | 6 | anncolvar -h 7 | 8 | 9 | In a local directory place an mdtraj-compatible trajectory (without periodic boundary issues, 10 | analysed atoms only) into the file traj_fit.xtc, its structure in PDB format in reference.pdb 11 | (same atoms as in traj_fit.xtc and values of collective variables in the file results_isomap 12 | (space separated file with structure number in the first column and collective variables in 13 | second, third, fourth and fifth column). Next type:: 14 | 15 | anncolvar -i traj_fit.xtc -p reference.pdb -c results_isomap -col 2 -boxx 1 -boxy 1 -boxz 1 \ 16 | -layers 3 -layer1 16 -layer2 8 -layer3 4 -actfun1 sigmoid -actfun2 sigmoid -actfun3 sigmoid \ 17 | -optim adam -loss mean_squared_error -epochs 1000 -batch 256 \ 18 | -o low.txt -model model -plumed plumed.dat 19 | 20 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | certifi>=2021.10.8 2 | Cython>=0.29.24 3 | DateTime>=4.3 4 | h5py>=3.1.0 5 | idna>=3.3 6 | keras>=2.6.0 7 | Keras-Applications>=1.0.8 8 | Keras-Preprocessing>=1.1.2 9 | mdtraj>=1.9.6 10 | numpy>=1.19.5 11 | pandas>=0.22 12 | pytz>=2021.3 13 | PyYAML>=5.3.1 14 | scipy>=1.5.4 15 | six>=1.15.0 16 | tensorflow>=2.6.0 17 | urllib3>=1.26.7 18 | zipp>=3.6.0 19 | zope.interface>=5.4.0 20 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | from os import path 4 | this_directory = path.abspath(path.dirname(__file__)) 5 | with open(path.join(this_directory, 'README.rst')) as f: 6 | long_description = f.read() 7 | 8 | setup(name='anncolvar', 9 | version='0.9', 10 | description='Coding collective variables by artificial neural networks', 11 | long_description=long_description, 12 | long_description_content_type='text/x-rst', 13 | classifiers=[ 14 | 'Development Status :: 2 - Pre-Alpha', 15 | 'License :: OSI Approved :: MIT License', 16 | 'Programming Language :: Python', 17 | 'Topic :: Scientific/Engineering :: Artificial Intelligence', 18 | 'Topic :: Scientific/Engineering :: Chemistry', 19 | ], 20 | keywords='artificial neural networks molecular dynamics simulation', 21 | url='https://github.com/spiwokv/anncolvar', 22 | author='Vojtech Spiwok, ', 23 | author_email='spiwokv@vscht.cz', 24 | license='MIT', 25 | packages=['anncolvar'], 26 | scripts=['bin/anncolvar'], 27 | install_requires=[ 28 | 'numpy', 29 | 'cython', 30 | 'mdtraj', 31 | 'keras', 32 | 'argparse', 33 | 'datetime', 34 | 'codecov', 35 | 'pandas', 36 | 'tensorflow' 37 | ], 38 | include_package_data=True, 39 | zip_safe=False) 40 | 41 | -------------------------------------------------------------------------------- /tests/reference.pdb: -------------------------------------------------------------------------------- 1 | TITLE Great Red Owns Many ACres of Sand 2 | REMARK THIS IS A SIMULATION BOX 3 | CRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1 1 4 | MODEL 1 5 | ATOM 1 C1 MOL 1 6.464 5.717 4.333 1.00 1.00 C 6 | ATOM 2 C2 MOL 1 6.654 4.285 4.806 1.00 1.00 C 7 | ATOM 3 C3 MOL 1 5.529 3.740 5.671 1.00 1.00 C 8 | ATOM 4 C4 MOL 1 4.301 3.271 4.908 1.00 1.00 C 9 | ATOM 5 C5 MOL 1 3.178 4.290 4.808 1.00 1.00 C 10 | ATOM 6 C6 MOL 1 3.621 5.702 4.462 1.00 1.00 C 11 | ATOM 7 C7 MOL 1 4.343 6.440 5.577 1.00 1.00 C 12 | ATOM 8 C8 MOL 1 5.830 6.652 5.351 1.00 1.00 C 13 | ATOM 9 F1 MOL 1 7.737 6.221 4.054 1.00 1.00 F 14 | ATOM 10 H1 MOL 1 5.935 5.733 3.367 1.00 1.00 H 15 | ATOM 11 H2 MOL 1 6.839 3.618 3.942 1.00 1.00 H 16 | ATOM 12 H3 MOL 1 7.591 4.243 5.400 1.00 1.00 H 17 | ATOM 13 F2 MOL 1 6.074 2.616 6.277 1.00 1.00 F 18 | ATOM 14 H4 MOL 1 5.272 4.405 6.503 1.00 1.00 H 19 | ATOM 15 H5 MOL 1 4.603 2.953 3.893 1.00 1.00 H 20 | ATOM 16 F4 MOL 1 3.756 2.149 5.537 1.00 1.00 F 21 | ATOM 17 H6 MOL 1 2.445 3.935 4.050 1.00 1.00 H 22 | ATOM 18 H7 MOL 1 2.618 4.311 5.768 1.00 1.00 H 23 | ATOM 19 H8 MOL 1 4.136 5.727 3.485 1.00 1.00 H 24 | ATOM 20 H9 MOL 1 2.681 6.274 4.264 1.00 1.00 H 25 | ATOM 21 H10 MOL 1 3.880 7.452 5.678 1.00 1.00 H 26 | ATOM 22 H11 MOL 1 4.140 5.967 6.562 1.00 1.00 H 27 | ATOM 23 H12 MOL 1 6.382 6.597 6.314 1.00 1.00 H 28 | ATOM 24 H13 MOL 1 5.983 7.694 4.990 1.00 1.00 H 29 | TER 30 | ENDMDL 31 | -------------------------------------------------------------------------------- /tests/test_it.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 17 | intopname=myintopname, 18 | colvarname=mycolvarname, 19 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 20 | atestset=0.1, shuffle=1, nofit=0, layers=3, layer1=16, layer2=8, layer3=4, 21 | actfun1='sigmoid', actfun2='sigmoid', actfun3='sigmoid', 22 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 23 | ofilename='', modelfile='', plumedfile='', plumedfile2='') 24 | assert(cor > 0.99) 25 | 26 | if __name__ == '__main__': 27 | pytest.main([__file__]) 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /tests/test_it2.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=3, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='tanh', actfun3='tanh', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | 39 | -------------------------------------------------------------------------------- /tests/test_it3.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=2, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='tanh', actfun3='linear', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | 39 | -------------------------------------------------------------------------------- /tests/test_it4.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=1, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='linear', actfun3='linear', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | -------------------------------------------------------------------------------- /tests/test_it5.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=3, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='tanh', actfun3='tanh', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname2 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | 39 | 40 | -------------------------------------------------------------------------------- /tests/test_it6.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=2, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='tanh', actfun3='linear', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname2 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | 39 | -------------------------------------------------------------------------------- /tests/test_it7.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mdtraj as md 3 | import numpy as np 4 | import keras as krs 5 | import argparse as arg 6 | import datetime as dt 7 | import sys 8 | import os 9 | 10 | import anncolvar 11 | 12 | def test_it(): 13 | myinfilename = os.path.join(os.path.dirname(__file__), 'traj_fit.xtc') 14 | myintopname = os.path.join(os.path.dirname(__file__), 'reference.pdb') 15 | mycolvarname = os.path.join(os.path.dirname(__file__), 'results_isomap') 16 | myplumedname = os.path.join(os.path.dirname(__file__), 'test.dat') 17 | myplumedname2 = os.path.join(os.path.dirname(__file__), 'test2.dat') 18 | ae, cor = anncolvar.anncollectivevariable(infilename=myinfilename, 19 | intopname=myintopname, 20 | colvarname=mycolvarname, 21 | column=2, boxx=1.0, boxy=1.0, boxz=1.0, 22 | atestset=0.1, shuffle=1, nofit=0, layers=1, layer1=16, layer2=8, layer3=4, 23 | actfun1='tanh', actfun2='linear', actfun3='linear', 24 | optim='adam', loss='mean_squared_error', epochs=1000, batch=256, 25 | ofilename='', modelfile='', plumedfile=myplumedname, plumedfile2=myplumedname2) 26 | 27 | command = "plumed driver --mf_pdb "+myintopname+" --plumed "+myplumedname2 28 | now = dt.datetime.now() 29 | os.system(command) 30 | print("time %i s\n" % (dt.datetime.now()-now).seconds) 31 | ifile = open("COLVAR", "r").readlines() 32 | sline = str.split(ifile[1]) 33 | x = float(sline[1]) 34 | assert((x > 0.29) and (x < 0.33)) 35 | 36 | if __name__ == '__main__': 37 | pytest.main([__file__]) 38 | 39 | -------------------------------------------------------------------------------- /tests/traj_fit.xtc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spiwokv/anncolvar/353aac87c27b66bbdec69c5077332b9a9400b010/tests/traj_fit.xtc --------------------------------------------------------------------------------